SlowGuess commited on
Commit
5ca34d8
·
verified ·
1 Parent(s): 86cd522

Add Batch dec56c7b-7ecc-4d27-9126-c8e9357ff253

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +21 -0
  2. 2202.01xxx/2202.01017/5e8d62de-11d3-43c3-b022-380f6e6453a6_content_list.json +0 -0
  3. 2202.01xxx/2202.01017/5e8d62de-11d3-43c3-b022-380f6e6453a6_model.json +0 -0
  4. 2202.01xxx/2202.01017/5e8d62de-11d3-43c3-b022-380f6e6453a6_origin.pdf +3 -0
  5. 2202.01xxx/2202.01017/full.md +567 -0
  6. 2202.01xxx/2202.01017/images.zip +3 -0
  7. 2202.01xxx/2202.01017/layout.json +0 -0
  8. 2202.01xxx/2202.01020/3f9e10da-85b7-4cf2-a6b2-fc1e487efd0e_content_list.json +1046 -0
  9. 2202.01xxx/2202.01020/3f9e10da-85b7-4cf2-a6b2-fc1e487efd0e_model.json +1334 -0
  10. 2202.01xxx/2202.01020/3f9e10da-85b7-4cf2-a6b2-fc1e487efd0e_origin.pdf +3 -0
  11. 2202.01xxx/2202.01020/full.md +219 -0
  12. 2202.01xxx/2202.01020/images.zip +3 -0
  13. 2202.01xxx/2202.01020/layout.json +0 -0
  14. 2202.01xxx/2202.01032/e1331bf0-3f9e-4d13-8efb-86967bf2089b_content_list.json +0 -0
  15. 2202.01xxx/2202.01032/e1331bf0-3f9e-4d13-8efb-86967bf2089b_model.json +0 -0
  16. 2202.01xxx/2202.01032/e1331bf0-3f9e-4d13-8efb-86967bf2089b_origin.pdf +3 -0
  17. 2202.01xxx/2202.01032/full.md +0 -0
  18. 2202.01xxx/2202.01032/images.zip +3 -0
  19. 2202.01xxx/2202.01032/layout.json +0 -0
  20. 2202.01xxx/2202.01034/902ca864-5cf2-40eb-b887-ccda751ec15b_content_list.json +0 -0
  21. 2202.01xxx/2202.01034/902ca864-5cf2-40eb-b887-ccda751ec15b_model.json +0 -0
  22. 2202.01xxx/2202.01034/902ca864-5cf2-40eb-b887-ccda751ec15b_origin.pdf +3 -0
  23. 2202.01xxx/2202.01034/full.md +0 -0
  24. 2202.01xxx/2202.01034/images.zip +3 -0
  25. 2202.01xxx/2202.01034/layout.json +0 -0
  26. 2202.01xxx/2202.01054/fec1fdb5-19ad-4e97-9913-360e71566230_content_list.json +0 -0
  27. 2202.01xxx/2202.01054/fec1fdb5-19ad-4e97-9913-360e71566230_model.json +0 -0
  28. 2202.01xxx/2202.01054/fec1fdb5-19ad-4e97-9913-360e71566230_origin.pdf +3 -0
  29. 2202.01xxx/2202.01054/full.md +0 -0
  30. 2202.01xxx/2202.01054/images.zip +3 -0
  31. 2202.01xxx/2202.01054/layout.json +0 -0
  32. 2202.01xxx/2202.01094/df2346ee-d8b7-4b8c-9abf-5431f77d269b_content_list.json +1123 -0
  33. 2202.01xxx/2202.01094/df2346ee-d8b7-4b8c-9abf-5431f77d269b_model.json +1299 -0
  34. 2202.01xxx/2202.01094/df2346ee-d8b7-4b8c-9abf-5431f77d269b_origin.pdf +3 -0
  35. 2202.01xxx/2202.01094/full.md +229 -0
  36. 2202.01xxx/2202.01094/images.zip +3 -0
  37. 2202.01xxx/2202.01094/layout.json +0 -0
  38. 2202.01xxx/2202.01110/e0f9d575-e95f-42fa-a038-5b4a519e5d7b_content_list.json +1203 -0
  39. 2202.01xxx/2202.01110/e0f9d575-e95f-42fa-a038-5b4a519e5d7b_model.json +0 -0
  40. 2202.01xxx/2202.01110/e0f9d575-e95f-42fa-a038-5b4a519e5d7b_origin.pdf +3 -0
  41. 2202.01xxx/2202.01110/full.md +267 -0
  42. 2202.01xxx/2202.01110/images.zip +3 -0
  43. 2202.01xxx/2202.01110/layout.json +0 -0
  44. 2202.01xxx/2202.01113/6750c7cd-1832-4e06-91d6-3a748ed5fecf_content_list.json +0 -0
  45. 2202.01xxx/2202.01113/6750c7cd-1832-4e06-91d6-3a748ed5fecf_model.json +0 -0
  46. 2202.01xxx/2202.01113/6750c7cd-1832-4e06-91d6-3a748ed5fecf_origin.pdf +3 -0
  47. 2202.01xxx/2202.01113/full.md +0 -0
  48. 2202.01xxx/2202.01113/images.zip +3 -0
  49. 2202.01xxx/2202.01113/layout.json +0 -0
  50. 2202.01xxx/2202.01136/64512bb3-257e-447f-9d41-4659d42602f0_content_list.json +0 -0
.gitattributes CHANGED
@@ -8727,3 +8727,24 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
8727
  2201.00xxx/2201.00759/7abeba98-7c25-430d-a28b-8b8cf788749e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8728
  2201.01xxx/2201.01191/8fe7e520-b832-4c39-abf7-942e702b0397_origin.pdf filter=lfs diff=lfs merge=lfs -text
8729
  2201.01xxx/2201.01389/6c6f9712-a2a1-4d64-a6b7-60e815c0779c_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8727
  2201.00xxx/2201.00759/7abeba98-7c25-430d-a28b-8b8cf788749e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8728
  2201.01xxx/2201.01191/8fe7e520-b832-4c39-abf7-942e702b0397_origin.pdf filter=lfs diff=lfs merge=lfs -text
8729
  2201.01xxx/2201.01389/6c6f9712-a2a1-4d64-a6b7-60e815c0779c_origin.pdf filter=lfs diff=lfs merge=lfs -text
8730
+ 2202.01xxx/2202.01017/5e8d62de-11d3-43c3-b022-380f6e6453a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
8731
+ 2202.01xxx/2202.01020/3f9e10da-85b7-4cf2-a6b2-fc1e487efd0e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8732
+ 2202.01xxx/2202.01032/e1331bf0-3f9e-4d13-8efb-86967bf2089b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8733
+ 2202.01xxx/2202.01034/902ca864-5cf2-40eb-b887-ccda751ec15b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8734
+ 2202.01xxx/2202.01054/fec1fdb5-19ad-4e97-9913-360e71566230_origin.pdf filter=lfs diff=lfs merge=lfs -text
8735
+ 2202.01xxx/2202.01094/df2346ee-d8b7-4b8c-9abf-5431f77d269b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8736
+ 2202.01xxx/2202.01110/e0f9d575-e95f-42fa-a038-5b4a519e5d7b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8737
+ 2202.01xxx/2202.01113/6750c7cd-1832-4e06-91d6-3a748ed5fecf_origin.pdf filter=lfs diff=lfs merge=lfs -text
8738
+ 2202.01xxx/2202.01136/64512bb3-257e-447f-9d41-4659d42602f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
8739
+ 2202.01xxx/2202.01159/0e50118f-a5de-4fd9-a4f1-bbaddb47f6be_origin.pdf filter=lfs diff=lfs merge=lfs -text
8740
+ 2202.01xxx/2202.01169/27b9bc83-7100-42af-9d4d-6b85bd88306a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8741
+ 2202.01xxx/2202.01181/80f11718-d36a-4aec-9d25-c7ee65ef41e6_origin.pdf filter=lfs diff=lfs merge=lfs -text
8742
+ 2202.01xxx/2202.01197/f8590509-b52c-45d8-bf61-2d3e5797bb96_origin.pdf filter=lfs diff=lfs merge=lfs -text
8743
+ 2202.01xxx/2202.01252/d9c5cefe-3be6-4727-b59e-b701ae449821_origin.pdf filter=lfs diff=lfs merge=lfs -text
8744
+ 2202.01xxx/2202.01267/561f7fef-02d8-4c72-8bf7-321dbf098bef_origin.pdf filter=lfs diff=lfs merge=lfs -text
8745
+ 2202.01xxx/2202.01279/f9cbb344-3bee-49a9-b36f-b0f3e42aaf8a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8746
+ 2202.01xxx/2202.01280/ddb6e068-5674-4df5-b057-8707ec2429fb_origin.pdf filter=lfs diff=lfs merge=lfs -text
8747
+ 2202.01xxx/2202.01284/efc3589a-7b9b-480b-84b4-96f2cb825b8b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8748
+ 2202.01xxx/2202.01288/840fb690-8481-4aae-8a04-6bc06eb14007_origin.pdf filter=lfs diff=lfs merge=lfs -text
8749
+ 2202.03xxx/2202.03274/334b8934-ae0b-49eb-b15e-f2ac967651fc_origin.pdf filter=lfs diff=lfs merge=lfs -text
8750
+ 2203.03xxx/2203.03540/ad3557be-9ad0-4c54-9213-fcc03072376e_origin.pdf filter=lfs diff=lfs merge=lfs -text
2202.01xxx/2202.01017/5e8d62de-11d3-43c3-b022-380f6e6453a6_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01017/5e8d62de-11d3-43c3-b022-380f6e6453a6_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01017/5e8d62de-11d3-43c3-b022-380f6e6453a6_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f6d971e580ec93e7074979bcee026fd04fa90ef97c2d1ff32b564655c73af26
3
+ size 6900141
2202.01xxx/2202.01017/full.md ADDED
@@ -0,0 +1,567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Aviv Navon $^{1}$ Aviv Shamsian $^{1}$ Idan Achituve $^{1}$ Haggai Maron $^{2}$ Kenji Kawaguchi $^{3}$ Gal Chechik $^{1,2}$ Ethan Fetaya $^{1}$
2
+
3
+ # Abstract
4
+
5
+ In Multi-task learning (MTL), a joint model is trained to simultaneously make predictions for several tasks. Joint training reduces computation costs and improves data efficiency; however, since the gradients of these different tasks may conflict, training a joint model for MTL often yields lower performance than its corresponding single-task counterparts. A common method for alleviating this issue is to combine per-task gradients into a joint update direction using a particular heuristic. In this paper, we propose viewing the gradients combination step as a bargaining game, where tasks negotiate to reach an agreement on a joint direction of parameter update. Under certain assumptions, the bargaining problem has a unique solution, known as the Nash Bargaining Solution, which we propose to use as a principled approach to multi-task learning. We describe a new MTL optimization procedure, Nash-MTL, and derive theoretical guarantees for its convergence. Empirically, we show that Nash-MTL achieves state-of-the-art results on multiple MTL benchmarks in various domains.
6
+
7
+ # 1. Introduction
8
+
9
+ In many real-world applications, one needs to solve several tasks simultaneously using limited computational or data resources. For example, perception for autonomous vehicles requires lane detection, object detection, and free-space estimation, which must all run in parallel and in real-time. This is normally solved via multi-task learning (MTL), where one model is jointly trained on several learning tasks (Caruana, 1997; Ruder, 2017; Crawshaw, 2020). Multi-task learning was also shown to improve generalization in theory (Baxter,
10
+
11
+ *Equal contribution 1Bar-Ilan University, Ramat Gan, Israel 2Nvidia, Tel-Aviv, Israel 3National University of Singapore. Correspondence to: Aviv Navon <aviv.navon@biu.ac.il>, Aviv Shamsian <aviv.shamsian@live.biu.ac.il>.
12
+
13
+ Proceedings of the $39^{th}$ International Conference on Machine Learning, Baltimore, Maryland, USA, PMLR 162, 2022. Copyright 2022 by the author(s).
14
+
15
+ 2000) and in practice (e.g., auxiliary learning, Liu et al., 2019a; Achituve et al., 2021; Navon et al., 2021a).
16
+
17
+ Unfortunately, MTL often causes performance degradation compared to single-task models (Standley et al., 2020). A main reason for such degradation is gradients conflict (Yu et al., 2020a; Wang et al., 2020; Liu et al., 2021a). These per-task gradients may have conflicting directions or a large difference in magnitudes, with the largest gradient dominating the update direction. The degraded performance of MTL due to poor training, compared with its potential to improve performance due to better generalization, has a major impact on many real-world systems. Improving MTL optimization algorithms is therefore an important task with significant implications to many systems.
18
+
19
+ Currently, most MTL optimization algorithms (Sener & Koltun, 2018; Yu et al., 2020a; Liu et al., 2021a) follow a general scheme. First, compute the gradients for all tasks $g_{1}, \ldots, g_{K}$ . Next, combine those gradients into a joint direction, $\Delta = \mathcal{A}(g_{1}, \ldots, g_{K})$ using an aggregation algorithm $\mathcal{A}$ . Finally, update model parameters using a single-task optimization algorithm, replacing the gradients with $\Delta$ . Multiple heuristics were proposed for the aggregation algorithm $\mathcal{A}$ . However, to the best of our knowledge, a principled, axiomatic, approach to gradient aggregation is still missing.
20
+
21
+ Here we address the gradient combination step by viewing it as a cooperative bargaining game (Thomson, 1994). Each task represents a player, whose utility is derived from its gradient, and players negotiate to reach an agreed direction. This formulation allows us to use results from game theory literature that analyze this problem from an axiomatic perspective. In his seminal paper, Nash (1953) presented an axiomatic approach to the bargaining problem and showed that under certain axioms, the bargaining problem has a unique solution known as the Nash Bargaining Solution. This solution is known to be proportionally fair, where any alternative will have a negative average relative change. This proportionally fair update allows us to find a solution that works for all tasks without being dominated by a single large gradient.
22
+
23
+ Building on Nash's results, we propose a novel MTL optimization algorithm, named Nash-MTL, where the gradients are combined at each step using the Nash bargaining so
24
+
25
+ ![](images/f36e2be145953af1b8679939b044d75487879225f3ad774114aea17cd779aaea.jpg)
26
+ Figure 1. Illustrative example: Optimization trajectories in loss space. Shown are 5 different initializations (black dots $\bullet$ ), and their trajectories are colored from orange to purple. Losses have a large difference in scale. See Appendix B for details. For linear scalarization (LS), PCGrad, and CAGrad, the optimization process is controlled by the gradient of $\ell_2$ , since it has a larger magnitude, resulting in imbalanced solutions between tasks (mostly ending at the bottom right). These three methods also fail to converge to an optimal solution for the rightmost initialization points. In contrast, MGDA is inclined towards the task with the smallest gradient magnitude $(\ell_1)$ . Our method, Nash-MTL, is invariant to changes in loss scale and produces solutions that are well balanced across the Pareto front.
27
+
28
+ lution. We first characterize the Nash bargaining solution for MTL and derive an efficient algorithm to approximate its value. Then, we analyze our approach theoretically and establish convergence guarantees in the convex and nonconvex cases. Finally, we show empirically that our NashMTL approach achieves state-of-the-art results on four MTL benchmarks on a variety of challenges ranging from computer vision and quantum chemistry to reinforcement learning. To support future research and the reproducibility of the results, we make our source code publicly available at: https://github.com/AvivNavon/nash-mt1.
29
+
30
+ # 2. Background
31
+
32
+ # 2.1. Pareto Optimality
33
+
34
+ Optimization for MTL is a specific case of multiple-objective optimization (MOO). Given objective functions $\ell_1, \ldots, \ell_K$ , the performance of solution $x$ is measured by the vector of objective values $(\ell_1(x), \ldots, \ell_K(x))$ . One main property of MOO is that since there is no natural linear ordering on vectors it is not always possible to compare solutions so there is no clear optimal value.
35
+
36
+ We say that a solution $x$ dominates $x'$ if it is better on one or more objectives and not worse on any other objectives. A solution that is not dominated by any other is called Pareto optimal, and the set of all such solutions is called the Pareto front. It is important to note that there is no clear way to select between different Pareto optimal solutions without additional assumptions or prior about the user preferences (Navon et al., 2021b). For non-convex problems, a point is defined as local Pareto optimal if it is Pareto optimal in some open set containing it. Further, a point is called Pareto stationary if there exists a convex combination of the gradients at this point that equals zero. Pareto stationarity is a necessary condition for Pareto optimality.
37
+
38
+ # 2.2. Nash Bargaining Solution
39
+
40
+ We provide a brief background on cooperative bargaining games and the Nash bargaining solution, see Thomson (1994) for more details. In a bargaining problem, we have $K$ players, each with their own utility function $u_{i}:A\cup \{D\} \to \mathbb{R}$ , which they wish to maximize. $A$ is the set of possible agreements and $D$ is the disagreement point which the players default to if they fail to reach an agreement. We define the set of possible payoffs as $U = \{(u_{1}(x),\dots,u_{K}(x)):x\in A\} \subset \mathbb{R}^{K}$ and $d = (u_{1}(D),\dots,u_{K}(D))$ . We assume $U$ is convex, compact and that there exists a point in $U$ that strictly dominates $d$ , namely there exists a $u\in U$ such that $\forall i:u_i > d_i$ .
41
+
42
+ Nash (1953) showed that for such payoff set $U$ , the two-player bargaining problem has a unique solution that satisfies the following properties or axioms: Pareto optimality, symmetry, independence of irrelevant alternatives, and invariant to affine transformations. This was later extended to multiple players (Szép & Forgo, 1985).
43
+
44
+ Axiom 2.1. Pareto optimality: The agreed solution must not be dominated by another option, i.e. there cannot be any other agreement that is better for at least one player and not worse for any of the players.
45
+
46
+ As it is a cooperative game, it makes little sense that the players will curtail another player without any personal gains, so it is natural to assume the agreed solution will not be dominated by another.
47
+
48
+ Axiom 2.2. Symmetry: The solution should be invariant to permuting the order of the players.
49
+
50
+ Axiom 2.3. Independence of irrelevant alternatives (IIA): If we enlarge the set of possible payoffs to $\tilde{U} \supseteq U$ , and the solution is in the original set $U$ , $u^{*} \in U$ , then the agreed point when the set of possible payoffs is $U$ will stay $u^{*}$ .
51
+
52
+ Axiom 2.4. Invariance to affine transformation: If we transform each utility function $u_{i}(x)$ to $\tilde{u}_i(x) = c_i\cdot u_i(x) + b_i$ with $c_{i} > 0$ then if the original agreement had utilities $(y_{1},\dots,y_{k})$ the agreement after the transformation has utilities $(c_{1}y_{1} + b_{1},\ldots ,c_{k}y_{k} + b_{k})$
53
+
54
+ We argue that in the MTL setting, it is natural to require axioms 2.1-2.3. Axiom 2.4, in our mind, is the only nonnatural assumption used by the Nash bargaining solution in the context of MTL. We argue that indeed it is a desired property that is helpful for MTL. Axiom 2.4 means that the solution does not take into account the gradients' norms but rather treats all of them the same, as if they were normalized. Without enforcing this assumption, the solution can easily be dominated by a single direction (see Figure 1). We further validate the importance of this assumption by investigating a scale-invariant baseline in Section 6.
55
+
56
+ The unique point satisfying all these axioms is called the Nash bargaining solution and is given as
57
+
58
+ $$
59
+ \begin{array}{l} u ^ {*} = \arg \max _ {u \in U} \sum_ {i} \log \left(u _ {i} - d _ {i}\right) \tag {1} \\ s. t. \forall i: u _ {i} > d _ {i} \\ \end{array}
60
+ $$
61
+
62
+ # 3. Method
63
+
64
+ We now describe our Nash-MTL method in detail. We first formalize the gradient combination step as a bargaining game and analyze the Nash bargaining solution for this game. We then describe our algorithm to approximate the solution efficiently. We note that the computational cost of that approximation is critical because this approximation is executed for each gradient update. To simplify the notation, we do not distinguish between shared and task-specific parameters. We note, however, that task-specific parameters have no contribution to the Nash bargaining solution calculation.
65
+
66
+ # 3.1. Nash Bargaining Multi-Task Learning
67
+
68
+ Given an MTL optimization problem and parameters $\theta$ , we search for an update vector $\Delta \theta$ in the ball of radius $\epsilon$ centered around zero, $B_{\epsilon}$ . We frame this as a bargaining problem with the agreement set $B_{\epsilon}$ and the disagreement point at 0, i.e., staying at the current parameters $\theta$ . We define the utility function for each player as $u_{i}(\Delta \theta) = g_{i}^{\top} \Delta \theta$ where $g_{i}$ is the gradient of the loss of task $i$ at $\theta$ . We note that since the agreement set is compact and convex and the utilities are linear then the set of possible payoffs is also compact and convex.
69
+
70
+ Our main assumption, besides the ones used by Nash, is that if $\theta$ is not Pareto stationary then the gradients are linearly independent (see further discussion on this assumption in Section 5). Under this assumption, we also have that the
71
+
72
+ disagreement point, $\Delta \theta = 0$ is dominated by another in $B_{\epsilon}$ . We now show that if $\theta$ is not on the Pareto front, the unique Nash bargaining solution has the following form:
73
+
74
+ Claim 3.1. Let $G$ be the $d \times K$ matrix whose columns are the gradients $g_i$ . The solution to $\arg \max_{\Delta \theta \in B_\epsilon} \sum_i \log (\Delta \theta^\top g_i)$ is (up to scaling) $\sum_i \alpha_i g_i$ where $\alpha \in \mathbb{R}_+^K$ is the solution to $G^\top G\alpha = 1 / \alpha$ where $1 / \alpha$ is the element-wise reciprocal.
75
+
76
+ Proof. The derivative of this objective is $\sum_{i=1}^{K} \frac{1}{\Delta \theta^T g_i} g_i$ . For all vectors $\Delta \theta$ such that $\forall i: \Delta \theta^T g_i > 0$ the utilities are monotonically increasing with the norm of $\Delta \theta$ . Thus, from the Pareto optimality assumption by Nash, the optimal solution has to be on the boundary of $B_\epsilon$ . From this we see that the gradient at the optimal point $\sum_{i=1}^{K} \frac{1}{\Delta \theta^T g_i} g_i$ must be in the radial direction, i.e., $\sum_{i=1}^{K} \frac{1}{\Delta \theta^T g_i} g_i \parallel \Delta \theta$ or $\sum_{i=1}^{K} \frac{1}{\Delta \theta^T g_i} g_i = \lambda \Delta \theta$ . Since the gradients are independent we must have $\Delta \theta = \sum_{i} \alpha_i g_i$ and $\forall i: \frac{1}{\Delta \theta^T g_i} = \lambda \alpha_i$ or $\forall i: \Delta \theta^T g_i = \frac{1}{\lambda \alpha_i}$ . As the inner product must be positive for a descent direction we can conclude $\lambda > 0$ ; we set $\lambda = 1$ to ascertain the direction of $\Delta \theta$ (the norm might be larger then $\epsilon$ ). Now finding the bargaining solution is reduced to finding $\alpha \in \mathbb{R}^K$ with $\alpha_i > 0$ such that $\forall i: \Delta \theta^T g_i = \sum_j \alpha_j g_j^T g_i = \frac{1}{\alpha_i}$ . This is equivalent to requiring that $G^T G\alpha = 1/\alpha$ where $1/\alpha$ is the element-wise reciprocal.
77
+
78
+ We now provide some intuition for this solution. First, if all $g_{i}$ are orthogonal we get $\alpha_{i} = 1 / ||g_{i}||$ and $\Delta \theta = \sum \frac{g_i}{||g_i||}$ which is the obvious scale invariant solution. When they are not orthogonal, we get
79
+
80
+ $$
81
+ \alpha_ {i} \| g _ {i} \| ^ {2} + \sum_ {j \neq i} \alpha_ {j} g _ {j} ^ {\top} g _ {i} = 1 / \alpha_ {i} \tag {2}
82
+ $$
83
+
84
+ We can consider $\sum_{j\neq i}\alpha_{j}g_{j}^{\top}g_{i} = \left(\sum_{j\neq i}\alpha_{j}g_{j}\right)^{\top}g_{i}$ as the interaction between task $i$ and the other tasks; If it is positive there is a positive interaction and the other gradients aid the $i^{\prime}$ th task, and if it is negative they hamper it. When there is a negative interaction, the LHS of Eq. 2 decreases and as a result, $\alpha_{i}$ increases to compensate for it. Conversely, where there is a positive interaction $\alpha_{i}$ will decrease.
85
+
86
+ # 3.2. Solving $\mathbf{G}^{\top}\mathbf{G}\alpha = 1 / \alpha$
87
+
88
+ Here we describe how to efficiently approximate the optimal solution for $G^{\top}G\alpha = 1 / \alpha$ through a sequence of convex optimization problems. We define a $\beta_{i}(\alpha) = g_{i}^{\top}G\alpha$ , and wish to find $\alpha$ such that $\alpha_{i} = 1 / \beta_{i}$ for all $i$ , or equivalently $\log (\alpha_{i}) + \log (\beta_{i}(\alpha)) = 0$ . Denote $\varphi_{i}(\alpha) = \log (\alpha_{i}) + \log (\beta_{i})$ and $\varphi (\alpha) = \sum_{i}\varphi_{i}(\alpha)$ . With that, our goal is to find a non-negative $\alpha$ such that $\varphi_{i}(\alpha) = 0$ for all $i$ . We can
89
+
90
+ ![](images/1c2813c718535b784fecd5c49b563d2246d554704c3d543493aba83cd668caf7.jpg)
91
+ Figure 2. Visualization of the update direction: We show the update direction (blue) obtained by various methods on three gradients in $\mathbb{R}^3$ . We rescaled the returned vectors for better visibility, showing only the direction. We further show the size of the projection (red) of the update to each gradient direction (black). Nash-MTL produce an update direction with the most balanced projections.
92
+
93
+ # Algorithm 1 Nash-MTL
94
+
95
+ Input: $\theta^{(0)}$ - initial parameter vector, $\{\ell_i\}_{i = 1}^K$ - differentiable loss functions, $\eta$ - learning rate
96
+
97
+ for $t = 1,\dots,T$ do
98
+
99
+ Compute task gradients $g_{i}^{(t)} = \nabla_{\theta^{(t - 1)}}\ell_{i}$
100
+
101
+ Set $G^{(t)}$ the matrix with columns $g_{i}^{(t)}$
102
+
103
+ Solve for $\alpha \colon (G^{(t)})^{\top}G^{(t)}\alpha = 1 / \alpha$ to obtain $\alpha^{(t)}$
104
+
105
+ Update the parameters $\theta^{(t)} = \theta^{(t)} - \eta G^{(t)}\alpha^{(t)}$
106
+
107
+ end for
108
+
109
+ Return: $\theta^{(T)}$
110
+
111
+ write this as the following optimization problem
112
+
113
+ $$
114
+ \min _ {\alpha} \sum_ {i} \varphi_ {i} (\alpha) \tag {3}
115
+ $$
116
+
117
+ $$
118
+ \begin{array}{c c} \text {s . t .} \forall i, & - \varphi_ {i} (\alpha) \leq 0 \end{array}
119
+ $$
120
+
121
+ $$
122
+ \alpha_ {i} > 0.
123
+ $$
124
+
125
+ The constraints in this problem are convex and linear and the objective is concave. We first try to solve the following convex surrogate objective
126
+
127
+ $$
128
+ \min _ {\alpha} \sum_ {i} \beta_ {i} (\alpha) \tag {4}
129
+ $$
130
+
131
+ $$
132
+ \begin{array}{c c} \text {s . t .} \forall i, & - \varphi_ {i} (\alpha) \leq 0 \end{array}
133
+ $$
134
+
135
+ $$
136
+ \alpha_ {i} > 0.
137
+ $$
138
+
139
+ Here, we minimize $\sum_{i}\beta_{i}$ under the constraint $\beta_{i} = g_{i}^{\top}G\alpha \geq 1 / \alpha_{i}$ . While this objective is not equivalent to the original problem, we found it very useful. In many cases, it produces exact solutions with $\varphi (\alpha) = 0$ as required.
140
+
141
+ To further improve our approximation, we considered the following problem,
142
+
143
+ $$
144
+ \min _ {\alpha} \sum_ {i} \beta_ {i} (\alpha) + \varphi (\alpha) \tag {5}
145
+ $$
146
+
147
+ $$
148
+ \begin{array}{c c} \text {s . t .} \forall i, & - \varphi_ {i} (\alpha) \leq 0 \end{array}
149
+ $$
150
+
151
+ $$
152
+ \alpha_ {i} > 0.
153
+ $$
154
+
155
+ Adding $\varphi (\alpha)$ to the objective may further reduce it, moving it closer to zero; however, it renders the problem to be nonconvex. Despite that, our solution can be improved iteratively by replacing the concave term $\varphi (\alpha)$ with its first-order approximation $\tilde{\varphi}_{\tau}(\alpha) = \varphi (\alpha^{(\tau)}) + \nabla \varphi (\alpha^{(\tau)})^{\top}(\alpha -\alpha^{(\tau)})$ Where, $\alpha^{(\tau)}$ is the solution at iteration $\tau$ . Note that we replace $\varphi$ with $\tilde{\varphi}$ only in the objective and keep $\varphi (\alpha)$ as is in the constraint: i.e., $\min_{\alpha}\sum_{i}\beta_{i}(\alpha) + \tilde{\varphi}_{\tau}(\alpha)$ s.t. $-\varphi_{i}(\alpha)\leq 0$ and $\alpha_{i} > 0$ for all $i$ . This sequential optimization approach is a variation of the concave-convex procedure (CCP) (Yuille & Rangarajan, 2003; Lipp & Boyd, 2016). Therefore the sequence $\{\alpha^{(\tau)}\}_{\tau}$ converges to a critical point of the original non-convex problem in Eq. 5 based on previous theory of CCP by Striperumbudur & Lanckriet (2009). Moreover, since we do not modify the constraint, $\alpha^{(\tau)}$ always satisfies the constraint of the original problem for any $\tau$ . Finally, the following proposition shows that original objective monotonically decreases with $\tau$ ..
156
+
157
+ Proposition 3.2. Denote the objective for the optimization problem in Eq. 5 by $\phi (\alpha) = \sum_{i}\beta_{i}(\alpha) + \varphi (\alpha)$ . Then, $\phi \left(\alpha^{(\tau +1)}\right)\leq \phi \left(\alpha^{(\tau)}\right)$ for all $\tau \geq 1$
158
+
159
+ We provide proof and further discussion in Appendix A. In practice, we limit the sequence of CCP to 20 in all experiments, with the exception of Section 6.3 for which we use a single step. We found the improved solution to have a limited effect on the MTL performance (see Appendix D.2).
160
+
161
+ # 3.3. Practical Speedup
162
+
163
+ One shortcoming of many leading MTL methods is that all task gradients are required for obtaining the joint update direction. When the number of tasks $K$ becomes large, this may be too computationally expensive as it requires one to perform $K$ backward passes through the shared backbone to compute the $K$ gradients. Prior work suggested using a subset of tasks (Liu et al., 2021a) or replacing the task gradients with the feature level gradient (Sener & Koltun, 2018; Liu et al., 2021b; Javaloy & Valera, 2021) as potential practical speedups. We emphasize that this issue is not
164
+
165
+ ![](images/27c3f12b06e2e82cc91262b1848c4d6714e18b060b672b0a2fc629c4df4ab528.jpg)
166
+ Figure 3. QM9. Test $\Delta_{m}$ throughout the training process averaged over 3 random seeds.
167
+
168
+ unique to our method, but rather is shared to all methods that compute all gradients for all tasks.
169
+
170
+ In practice, we found that using feature-level gradients as a surrogate to the gradient of the shared parameters dramatically degrades the performance of our method. See Appendix C for empirical results and further discussion. As an alternative, we suggest updating the gradient weights $\alpha^{(t)}$ once every few iterations instead of every iteration. This simple yet effective solution greatly reduces the runtime (up to $\sim \times 10$ for QM9 and $\sim \times 5$ for MT10) while maintaining high performance. In Section 6.4 we provide experimental results while varying the frequency of task weights update on the QM9 dataset and the MT10 benchmark. Our results show that Nash-MTL runtime can be reduced to about the same as linear scalarization (or STL) while maintaining competitive results compared to other baselines; However, in some cases, we do see a noticeable drop in performance compared with our standard approach.
171
+
172
+ # 4. Related Work
173
+
174
+ In multitask learning (MTL), one simultaneously solves several learning problems while sharing information among tasks (Caruana, 1997; Ruder, 2017), commonly through a joint hidden representation (Zhang et al., 2014; Dai et al., 2016; Pinto & Gupta, 2017; Zhao et al., 2018; Liu et al., 2019b). Studies in the literature proposed several explanations for the difficulty in the optimization process of MTL, such as conflicting gradients (Wang et al., 2020; Yu et al., 2020a), or plateaus in the loss landscape (Schaul et al., 2019). Other studies aimed at improving multitask learning by proposing novel architectures (Misra et al., 2016; Hashimoto et al., 2017; Liu et al., 2019b; Chen et al., 2020). We focus on weighting the gradients of the tasks via an axiomatic approach that is agnostic to the architecture used. Studies in a similar vein proposed to weigh the task losses
175
+
176
+ Table 1. QM9. Test performance averaged over 3 random seeds.
177
+
178
+ <table><tr><td></td><td>MR ↓</td><td>Δm% ↓</td></tr><tr><td>LS</td><td>6.8</td><td>177.6 ± 3.4</td></tr><tr><td>SI</td><td>4.0</td><td>77.8 ± 9.2</td></tr><tr><td>RLW</td><td>8.2</td><td>203.8 ± 3.4</td></tr><tr><td>DWA</td><td>6.4</td><td>175.3 ± 6.3</td></tr><tr><td>UW</td><td>5.3</td><td>108.0 ± 22.5</td></tr><tr><td>MGDA</td><td>5.9</td><td>120.5 ± 2.0</td></tr><tr><td>PCGrad</td><td>5.0</td><td>125.7 ± 10.3</td></tr><tr><td>CAGrad</td><td>5.7</td><td>112.8 ± 4.0</td></tr><tr><td>IMTL-G</td><td>4.7</td><td>77.2 ± 9.3</td></tr><tr><td>Nash-MTL</td><td>2.5</td><td>62.0 ± 1.4</td></tr></table>
179
+
180
+ with various approaches, such as the uncertainty of the tasks (Kendall et al., 2018), the norm of the gradients (Chen et al., 2018), random weights (Lin et al., 2021), and similarity of the gradients (Du et al., 2018; Suteu & Guo, 2019). These methods are mostly heuristic and can have unstable performance (Liu et al., 2021a). Recently, several studies proposed MTL approaches based on the multiple-gradient descent algorithm (MGDA) for multi-objective optimization (Désideri, 2012). This is an appealing approach since, under mild conditions, convergence to a Pareto stationary point is guaranteed. Sener & Koltun (2018) cast the multi-objective problem to multi-task problem and suggest task weighting based on the Frank-Wolfe algorithm (Jaggi, 2013). Liu et al. (2021a) searches for an update direction in a neighborhood of the average gradient that maximizes the worst improvement of any task. Unlike these studies, we propose an MTL approach based on a Bargaining game that can find solutions that are Pareto optimal and proportionally fair.
181
+
182
+ The closest work to our approach, to the best of our knowledge, is Liu et al. (2021b). There, the authors propose to look for a fair gradient direction where all the cosine similarities are equal. We note that this update direction satisfies all of the Nash axioms except for Pareto optimally. Thus, unlike our proportionally fair approach, it can settle for a sub-optimal solution for the sake of fairness.
183
+
184
+ Finally, we note that the Nash bargaining solution was effectively applied to problems in various fields such as communication (Zhang et al., 2008; Leshem & Zehavi, 2011; Shi et al., 2018), economics (Dagan & Volij, 1993), and computing (Grosu et al., 2002), and to several learning setups, such as reinforcement learning (Qiao et al., 2006), Bayesian optimization (Binois et al., 2020), clustering (Rezaee et al., 2021), federated learning (Kim, 2021), and multi-armed bandits (Baek & Farias, 2021).
185
+
186
+ Table 2. NYUv2. Test performance for three tasks: semantic segmentation, depth estimation, and surface normal. Values are averages over 3 random seeds.
187
+
188
+ <table><tr><td rowspan="3"></td><td colspan="2">Segmentation</td><td colspan="2">Depth</td><td colspan="5">Surface Normal</td><td rowspan="3">MR↓</td><td rowspan="3">Δm%↓</td></tr><tr><td rowspan="2">mIoU ↑</td><td rowspan="2">Pix Acc ↑</td><td rowspan="2">Abs Err ↓</td><td rowspan="2">Rel Err ↓</td><td colspan="2">Angle Distance ↓</td><td colspan="3">Within t° ↑</td></tr><tr><td>Mean</td><td>Median</td><td>11.25</td><td>22.5</td><td>30</td></tr><tr><td>STL</td><td>38.30</td><td>63.76</td><td>0.6754</td><td>0.2780</td><td>25.01</td><td>19.21</td><td>30.14</td><td>57.20</td><td>69.15</td><td></td><td></td></tr><tr><td>LS</td><td>39.29</td><td>65.33</td><td>0.5493</td><td>0.2263</td><td>28.15</td><td>23.96</td><td>22.09</td><td>47.50</td><td>61.08</td><td>8.11</td><td>5.59</td></tr><tr><td>SI</td><td>38.45</td><td>64.27</td><td>0.5354</td><td>0.2201</td><td>27.60</td><td>23.37</td><td>22.53</td><td>48.57</td><td>62.32</td><td>7.11</td><td>4.39</td></tr><tr><td>RLW</td><td>37.17</td><td>63.77</td><td>0.5759</td><td>0.2410</td><td>28.27</td><td>24.18</td><td>22.26</td><td>47.05</td><td>60.62</td><td>10.11</td><td>7.78</td></tr><tr><td>DWA</td><td>39.11</td><td>65.31</td><td>0.5510</td><td>0.2285</td><td>27.61</td><td>23.18</td><td>24.17</td><td>50.18</td><td>62.39</td><td>6.88</td><td>3.57</td></tr><tr><td>UW</td><td>36.87</td><td>63.17</td><td>0.5446</td><td>0.2260</td><td>27.04</td><td>22.61</td><td>23.54</td><td>49.05</td><td>63.65</td><td>6.44</td><td>4.05</td></tr><tr><td>MGDA</td><td>30.47</td><td>59.90</td><td>0.6070</td><td>0.2555</td><td>24.88</td><td>19.45</td><td>29.18</td><td>56.88</td><td>69.36</td><td>5.44</td><td>1.38</td></tr><tr><td>PCGrad</td><td>38.06</td><td>64.64</td><td>0.5550</td><td>0.2325</td><td>27.41</td><td>22.80</td><td>23.86</td><td>49.83</td><td>63.14</td><td>6.88</td><td>3.97</td></tr><tr><td>GradDrop</td><td>39.39</td><td>65.12</td><td>0.5455</td><td>0.2279</td><td>27.48</td><td>22.96</td><td>23.38</td><td>49.44</td><td>62.87</td><td>6.44</td><td>3.58</td></tr><tr><td>CAGrad</td><td>39.79</td><td>65.49</td><td>0.5486</td><td>0.2250</td><td>26.31</td><td>21.58</td><td>25.61</td><td>52.36</td><td>65.58</td><td>3.77</td><td>0.20</td></tr><tr><td>IMTL-G</td><td>39.35</td><td>65.60</td><td>0.5426</td><td>0.2256</td><td>26.02</td><td>21.19</td><td>26.2</td><td>53.13</td><td>66.24</td><td>3.11</td><td>-0.76</td></tr><tr><td>Nash-MTL</td><td>40.13</td><td>65.93</td><td>0.5261</td><td>0.2171</td><td>25.26</td><td>20.08</td><td>28.4</td><td>55.47</td><td>68.15</td><td>1.55</td><td>-4.04</td></tr></table>
189
+
190
+ # 5. Analysis
191
+
192
+ We now analyze the convergence of our method in the convex and non-convex cases. As even single-task non-convex optimization might only converge to a stationary point, we will prove convergence to a Pareto stationary point, i.e., a point where some convex combination of the gradients is zero. As stated, we also assume that the gradients are independent while not at a Pareto stationary point. Independence of the gradients is a slightly stronger assumption than Pareto stationarity but is needed to exclude degenerate edge cases such as two identical tasks.
193
+
194
+ We note that by substituting local Pareto optimality for Pareto stationarity in Assumption 5.1 we can show convergence to a local Pareto optimal point. However, this assumption has strong implications, as it implies we avoid local maxima and saddle points of any specific task. Since our update rule is a descent direction for all tasks, we can reasonably assume that our algorithm avoids local maxima points. Furthermore, it was shown that first-order methods avoid saddle points (Panageas et al., 2019), giving credence to this stronger assumption. Nevertheless, we take a conservative approach and state our results with the weaker assumption.
195
+
196
+ We formally make the following assumptions:
197
+
198
+ Assumption 5.1. We assume that for a sequence $\{\theta^{(t)}\}_{t = 1}^{\infty}$ generated by our algorithm, the set of the gradient vectors $g_{1}^{(t)},\dots,g_{K}^{(t)}$ at any point on the sequence and at any partial limit are linearly independent unless that point is a Pareto stationary point.
199
+
200
+ Assumption 5.2. We assume that all loss functions are differentiable, bounded below and that all sub-level sets are
201
+
202
+ bounded. The input domain is open and convex.
203
+
204
+ Assumption 5.3. We assume that all the loss functions are L-smooth,
205
+
206
+ $$
207
+ \left| \left| \nabla \ell_ {i} (x) - \nabla \ell_ {i} (y) \right| \right| \leq L \| x - y \|. \tag {6}
208
+ $$
209
+
210
+ Theorem 5.4. Let $\{\theta^{(t)}\}_{t=1}^{\infty}$ be the sequence generated by the update rule $\theta^{(t+1)} = \theta^{(t)} - \mu^{(t)}\Delta\theta^{(t)}$ where $\Delta\theta^{(t)} = \sum_{i=1}^{K} \alpha_i^{(t)} g_i^{(t)}$ is the Nash bargaining solution $(G^{(t)})^\top G^{(t)}\alpha^{(t)} = 1/\alpha^{(t)}$ . Set $\mu^{(t)} = \min_{i \in [K]} \frac{1}{LK\alpha_i^{(t)}}$ . Then, the sequence $\{\theta^{(t)}\}_{t=1}^{\infty}$ has a subsequence that converges to a Pareto stationary point $\theta^*$ . Moreover all the loss functions $(\ell_1(\theta^{(t)}), \dots, \ell_K(\theta^{(t)}))$ converge to $(\ell_1(\theta^*), \dots, \ell_K(\theta^*))$ .
211
+
212
+ Proof sketch. We can show that $\mu^{(t)} = \min_i\frac{1}{\alpha_i^{(t)}}\to 0$ so $||\alpha^{(t)}||\rightarrow \infty$ . We also show that $||1 / \alpha^{(t)}||$ is bounded. As $(G^{(t)})^{\top}G^{(t)}\alpha^{(t)} = 1 / \alpha^{(t)}$ this means that the smallest singular value of $(G^{(t)})^{\top}G^{(t)}$ must converge to zero. From compactness $\{\theta^{(t)}\}_{t = 1}^{\infty}$ has a converging subsequence whose limit we denote as $\theta^{*}$ . From continuity we get that the gradients Gram matrix $G^{\top}G$ computed at $\theta^{*}$ must have a zero singular value and therefore the gradients are linearly dependent. From our assumption this means that $\theta^{*}$ is Pareto stationary. As the losses are monotonically deceasing and bounded below they must converge and to the subsequence limit of $(\ell_1(\theta^*),\dots,\ell_K(\theta^*))$ .
213
+
214
+ If we also assume convexity, we can strengthen our claim
215
+
216
+ Theorem 5.5. Let $\{\theta^{(t)}\}_{t=1}^{\infty}$ be the sequence generated by the update rule $\theta^{(t+1)} = \theta^{(t)} - \mu^{(t)}\Delta\theta^{(t)}$ where $\Delta\theta^{(t)} = \sum_{i=1}^{K} \alpha_i^{(t)} g_i^{(t)}$ is the Nash bargaining solution $(G^{(t)})^\top G^{(t)}\alpha^{(t)} = 1/\alpha^{(t)}$ . Set $\mu^{(t)} = \min_{i \in [K]} \frac{1}{LK\alpha_i^{(t)}}$ . If
217
+
218
+ Table 3. CityScapes. Test performance for two tasks: semantic segmentation and depth estimation. Value are averages over 3 random seeds.
219
+
220
+ <table><tr><td></td><td colspan="2">Segmentation</td><td colspan="2">Depth</td><td rowspan="2">MR↓</td><td rowspan="2">Δm%↓</td></tr><tr><td></td><td>mIoU ↑</td><td>Pix Acc ↑</td><td>Abs Err ↓</td><td>Rel Err↓</td></tr><tr><td>STL</td><td>74.01</td><td>93.16</td><td>0.0125</td><td>27.77</td><td></td><td></td></tr><tr><td>LS</td><td>75.18</td><td>93.49</td><td>0.0155</td><td>46.77</td><td>6.12</td><td>22.60</td></tr><tr><td>SI</td><td>70.95</td><td>91.73</td><td>0.0161</td><td>33.83</td><td>8.00</td><td>14.11</td></tr><tr><td>RLW</td><td>74.57</td><td>93.41</td><td>0.0158</td><td>47.79</td><td>9.25</td><td>24.38</td></tr><tr><td>DWA</td><td>75.24</td><td>93.52</td><td>0.0160</td><td>44.37</td><td>6.00</td><td>21.45</td></tr><tr><td>UW</td><td>72.02</td><td>92.85</td><td>0.0140</td><td>30.13</td><td>5.25</td><td>5.89</td></tr><tr><td>MGDA</td><td>68.84</td><td>91.54</td><td>0.0309</td><td>33.50</td><td>8.75</td><td>44.14</td></tr><tr><td>PCGrad</td><td>75.13</td><td>93.48</td><td>0.0154</td><td>42.07</td><td>6.37</td><td>18.29</td></tr><tr><td>GradDrop</td><td>75.27</td><td>93.53</td><td>0.0157</td><td>47.54</td><td>5.50</td><td>23.73</td></tr><tr><td>CAGrad</td><td>75.16</td><td>93.48</td><td>0.0141</td><td>37.60</td><td>5.37</td><td>11.64</td></tr><tr><td>IMTL-G</td><td>75.33</td><td>93.49</td><td>0.0135</td><td>38.41</td><td>3.62</td><td>11.10</td></tr><tr><td>Nash-MTL</td><td>75.41</td><td>93.66</td><td>0.0129</td><td>35.02</td><td>1.75</td><td>6.82</td></tr></table>
221
+
222
+ we assume that all the loss functions are convex, then the sequence $\{\theta^{(t)}\}_{t = 1}^{\infty}$ converges to a Pareto optimal point $\theta^{*}$ .
223
+
224
+ See the full proofs in the appendix Sec. A.
225
+
226
+ # 6. Experiments
227
+
228
+ We evaluate Nash-MTL on diverse multi-task learning problems. The experiments show the superiority of Nash-MTL over previous MTL methods. To support future research and the reproducibility of the results, we will make our source code publicly available. Additional experimental results and details are provided in Appendix B.
229
+
230
+ Compared methods: We compare the following approaches: (1) Our proposed Nash-MTL algorithm described in Section 3; (2) Single task learning (STL), training an independent model for each task; (3) Linear scalarization (LS) baseline which minimizes $\sum_{k}\ell_{k}$ ; (4) Scale-invariant (SI) baseline which minimizes $\sum_{k}\log \ell_{k}$ . This baseline is invariant to rescaling each loss with a positive number; (5) Dynamic Weight Average (DWA) (Liu et al., 2019b) adjusts task weights based on the rates of loss changes over time; (6) Uncertainty weighting (UW) (Kendall et al., 2018) uses task uncertainty quantification to adjust task weights; (7) MGDA (Sener & Koltun, 2018) finds a convex combination of gradients with a minimal norm; (8) Random loss weighting (RLW) with normal distribution, scales the losses according to randomly sampled task weights (Lin et al., 2021); (9) PCGrad (Yu et al., 2020a) removes conflicting components of each gradient w.r.t the other gradients; (10) GradDrop (Chen et al., 2020) randomly drops components of the task gradients based on how much they conflict; (11) CAGrad (Liu et al., 2021a) optimizes for the average loss while explicitly controlling the minimum decrease rate across tasks; (12) IMTL-G (Liu et al., 2021b) uses an update
231
+
232
+ direction with equal projections on task gradients. IMTL-G is applied to the feature-level gradients, as was suggested by the authors. We also tried applying IMTL-G to the shared-parameters gradient for a fair comparison, but its performance was even worse.
233
+
234
+ Evaluation. For each experiment, we report the common evaluation metrics for each task. Since naturally MTL does not carry a single objective and since the scale of per-task metrics often varies significantly, we report two metrics that capture the overall performance: (1) $\Delta_{\mathbf{m}}\%$ , the average per-task performance drop of method $m$ relative to the STL baseline denoted $b$ . Formally, $\Delta_{m}\% = \frac{1}{K}\sum_{k=1}^{K}(-1)^{\delta_k}(M_{m,k} - M_{b,k}) / M_{b,k}$ , where $M_{b,k}$ is the value of metric $M_{k}$ obtained by the baseline and $M_{m,k}$ by the compared method. $\delta_k = 1$ if a higher value is better for a metric $M_{k}$ and 0 otherwise (Maninis et al., 2019; Liu et al., 2021a). (2) Mean Rank (MR): The average rank of each method across the different tasks (lower is better). A method receives the best value, $\mathrm{MR} = 1$ , if it ranks first in all tasks.
235
+
236
+ # 6.1. Multi-Task Regression for QM9
237
+
238
+ We evaluate Nash-MTL on predicting 11 properties of molecules from the QM9 dataset (Ramakrishnan et al., 2014), a widely used benchmark for graph neural networks. QM9 consists of $\sim 130K$ molecules represented as graphs annotated with both node and edge features. We used the QM9 example in PyTorch Geometric (Fey & Lenssen, 2019), and use $110\mathrm{K}$ molecules for training, $10\mathrm{K}$ for validation, and $10\mathrm{K}$ as a test set. As each task target range is at a different scale, this could be an issue for other methods that are not scale-invariant like ours. For fairness, we normalized each task target to have zero mean and unit standard deviation. We use the popular GNN model from
239
+
240
+ Gilmer et al. (2017), a network comprised of several concatenated message passing layers, which update the node features based on both node and edge features, followed by the pooling operator from Vinyals et al. (2015). Specifically, we used the implementation from Fey & Lenssen (2019). We train each method for 300 epochs and search for the best learning-rate (lr) given by the $\Delta_{m}$ performance on the validation set. We use a learning-rate scheduler to reduce the lr once the validation $\Delta_{m}$ metric has stopped improving. The validation set is also used for early stopping.
241
+
242
+ Predicting molecular properties in QM9 poses a significant challenge for MTL methods because the number of tasks is large and because the loss scales vary significantly. The scale issue is only partially resolved by normalization because some tasks are easier to learn than others. Prior work found that single-task learning significantly improves performance on all targets compared to MTL methods (Maron et al., 2019; Klicpera et al., 2020).
243
+
244
+ Results are shown in Figure 3 and Table 1. Nash-MTL achieves the best performance in terms of both MR and $\Delta_{m}$ . Interestingly, most MTL methods fall short compared to the simple scale-invariant baseline, which ignores gradient interaction, except for IMTL-G whose performance is on par with this baseline. This result shows that the scale-invariant property of our approach can be beneficial. See Appendix D.1 for the per-task evaluation results.
245
+
246
+ # 6.2. Scene Understanding
247
+
248
+ We follow the protocol of (Liu et al., 2019b) and evaluate Nash-MTL on the NYUv2 and Cityscapes datasets (Silberman et al., 2012; Cordts et al., 2016). NYUv2 is an indoor scene dataset that consists of 1449 RGBD images and dense per-pixel labeling with 13 classes. We use this dataset as a multitask learning benchmark for semantic segmentation, depth estimation, and surface normal prediction.
249
+
250
+ The CityScapes dataset (Cordts et al., 2016) contains 5000 high-resolution street-view images with dense per-pixel annotations. We use this dataset as a multitask learning benchmark for semantic segmentation and depth estimation. To speed up the training phase, all images were resized to $128 \times 256$ . The original dataset contains 19 categories for pixel-wise semantic segmentation, together with ground-truth depth maps. For segmentation, we used a coarser version of the labels with 7 classes.
251
+
252
+ For all MTL methods, we train a Multi-Task Attention Network (MTAN) (Liu et al., 2019b) which adds an attention mechanism on top of the SegNet architecture (Badrinarayanan et al., 2017). We follow the training procedure from Liu et al. (2019b); Yu et al. (2020a); Liu et al. (2021a). Each method is trained for 200 epochs with the Adam optimizer (Kingma & Ba, 2015) and an initial learning-rate
253
+
254
+ Table 4. MT10. Average success over 10 random seeds.
255
+
256
+ <table><tr><td></td><td>Success ± SEM</td></tr><tr><td>STL SAC</td><td>0.90 ± 0.032</td></tr><tr><td>MTL SAC</td><td>0.49 ± 0.073</td></tr><tr><td>MTL SAC + TE</td><td>0.54 ± 0.047</td></tr><tr><td>MH SAC</td><td>0.61 ± 0.036</td></tr><tr><td>SM</td><td>0.73 ± 0.043</td></tr><tr><td>CARE</td><td>0.84 ± 0.051</td></tr><tr><td>PCGrad</td><td>0.72 ± 0.022</td></tr><tr><td>CAGrad</td><td>0.83 ± 0.045</td></tr><tr><td>Nash-MTL</td><td>0.91 ± 0.031</td></tr></table>
257
+
258
+ of $1e - 4$ . The learning-rate is halved to $5e - 5$ after 100 epochs. As in (Liu et al., 2021a) The STL baseline refers to training task-specific SegNet models.
259
+
260
+ The results are presented in Table 2 and Table 3. Our method, Nash-MTL, achieves the best MR in both datasets, the best $\Delta_{m}$ in NYUv2 and the seconds to best $\Delta_{m}$ in the CityScapes experiment. Nash-MTL performance is well balanced across tasks. MGDA is primarily focused on the task of predicting surface normals and achieves poor performance on the other two tasks. The inherent biasedness of MGDA towards the task with the smallest gradient magnitude was previously discussed in Liu et al. (2021b). We note that the optimal solution under Nash-MTL for the two tasks case is equivalent to independently normalizing each gradient and summing with equal weights. While this is a fairly simple approach for MTL, we show that it outperforms almost all the compared MTL methods on the two-tasks CityScapes benchmark.
261
+
262
+ # 6.3. Multi-Task Reinforcement Learning
263
+
264
+ We consider a multi-task RL problem and evaluate Nash-MTL on the MT10 environment from the Meta-World benchmark (Yu et al., 2020b). This benchmark involves a simulated robot trained to perform actions like pressing a button and opening a window, each action treated as a task, for a total of 10 tasks. The goal is to learn a policy that can succeed across all the diverse sets of manipulation tasks. Following previous works on MTL-RL (Yu et al., 2020a; Liu et al., 2021a; Sodhani et al., 2021), we use Soft Actor-Critic (SAC) (Haarnoja et al., 2018) as the base RL algorithm. Along with the MTL methods (1) CAGrad (Liu et al., 2021a) and (2) PCGrad (Yu et al., 2020a) applied to a shared model SAC, we evaluate the following methods: (3) STL, one SAC model per task; (4) MTL SAC with a shared model; (5) Multi-task SAC with task encoder (MTLSAC + TE, Yu et al. (2020b)); (6) Multi-headed SAC (MH SAC) with task-specific heads (Yu et al., 2020b); (7) Soft Modularization (SM, Yang et al. (2020)) which estimates per-task routes for different tasks in a shared model, and;
265
+
266
+ Table 5. Training runtime per episode and average success for the MT10 benchmark, computed over 10 random seeds while varying the frequency of task weights updates in Nash-MTL.
267
+
268
+ <table><tr><td></td><td>Success ± SEM</td><td>Runtime[Sec.]</td></tr><tr><td>MTL-SAC</td><td>0.49 ± 0.073</td><td>7.3</td></tr><tr><td>PCGrad</td><td>0.72 ± 0.022</td><td>9.7</td></tr><tr><td>CAGrad</td><td>0.83 ± 0.045</td><td>20.9</td></tr><tr><td>Nash-MTL</td><td>0.91 ± 0.031</td><td>40.7</td></tr><tr><td>Nash-MTL-50</td><td>0.85 ± 0.022</td><td>8.6</td></tr><tr><td>Nash-MTL-100</td><td>0.87 ± 0.033</td><td>7.9</td></tr></table>
269
+
270
+ (8) CARE (Sodhani et al., 2021) which utilizes language metadata and employs a mixture of encoders. We follow the same experiment setup from Sodhani et al. (2021); Liu et al. (2021a) to train all methods over 2 million steps and report the mean success over 10 random seeds with fixed evaluation frequency. The results are presented in Table 4.
271
+
272
+ Nash-MTL achieves the best performance by a large margin. In addition, Nash-MTL is the only MTL method to reach the same performance as the per-task SAC STL baseline.
273
+
274
+ # 6.4. Scaling-up Nash-MTL
275
+
276
+ One of the major drawbacks of the SOTA MTL methods is that they require access to all task gradients to compute the optimal update direction (Sener & Koltun, 2018; Yu et al., 2020a; Liu et al., 2021b;a). This requires one to perform $K$ backward passes at each optimization step, thus scales poorly with the number of tasks. Previous works suggested using a subset of tasks (Liu et al., 2021a) or replacing the task gradients with the feature-level gradient (Sener & Koltun, 2018; Liu et al., 2021b; Javaloy & Valera, 2021) as potential speedups. In our experiments, we found that using the feature-level gradients can greatly reduce Nash-MTL performance (Appendix C). However, here we show that the simple solution of updating task weights less frequently maintains good performance while dramatically reducing the training time.
277
+
278
+ One approach to alleviate this issue is to update the task weights less frequently, and use these weights in subsequent steps. We evaluate this approach using the QM9 dataset and the MT10 benchmark and present the result in Figure 4 and Table 5. We denote Nash-MTL with task weight update every $T$ optimization steps with Nash-MTL- $T$ .
279
+
280
+ The results show that Nash-MTL is fairly robust to varying intervals between weights updates. While this simple approach results in a small degradation in performance, it can dramatically decrease the training time of our method. For example, on the QM9, updating the weights every 5/50 steps results in a $\times 3.7 / 9.8$ speedup w.r.t updating the weights at
281
+
282
+ ![](images/ebee34900e10268308525d307fd6d7f372f02caafd376cd5aea955ee5a6c6179.jpg)
283
+ Figure 4. Test $\Delta_{m}$ for the QM9 dataset, averaged over 3 random seeds, for different intervals of task weights update.
284
+
285
+ each step. On the MT10 environment, updating the weights every 100 steps result in $\sim \times 10$ speedup (only $\sim \times 1.1$ slower than the fastest baseline) while outperforming all other MTL baseline method (Table 5).
286
+
287
+ # 7. Conclusion
288
+
289
+ In this work, we present Nash-MTL, a novel and principled approach for multitask learning. We frame the gradient combination step in MTL as a bargaining game and use the Nash bargaining solution to find the optimal update direction. We highlight the importance of the scale invariance approach for multitask learning, specifically for setups with varying loss scales and gradient magnitudes. We provide a theoretical convergence analysis for Nash-MTL, showing that it converges to a Pareto optimal and Pareto stationary points in the convex and non-convex settings, respectively. Finally, our experiments show that Nash-MTL achieves state-of-the-art results on various benchmarks across multiple domains.
290
+
291
+ # 8. Acknowledgements
292
+
293
+ This work was funded by the Israeli innovation authority through the AVATAR consortium; by the Israel Science Foundation (ISF grant 737/2018); and by an equipment grant to GC and Bar Ilan University (ISF grant 2332/18).
294
+
295
+ # References
296
+
297
+ Achituve, I., Maron, H., and Chechik, G. Self-supervised learning for domain adaptation on point clouds. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 123-133, 2021.
298
+ Badrinarayanan, V., Kendall, A., and Cipolla, R. Segnet: A deep convolutional encoder-decoder architecture for image segmentation. IEEE transactions on pattern analysis and machine intelligence, 39(12):2481-2495, 2017.
299
+
300
+ Baek, J. and Farias, V. F. Fair exploration via axiomatic bargaining. arXiv preprint arXiv:2106.02553, 2021.
301
+ Baxter, J. A model of inductive bias learning. J. Artif. Intell. Res., 2000.
302
+ Binois, M., Picheny, V., Taillandier, P., and Habbal, A. The Kalai-Smorodinsky solution for many-objective Bayesian optimization. J. Mach. Learn. Res., 21(150):1-42, 2020.
303
+ Caruana, R. Multitask learning. Machine learning, 28(1): 41-75, 1997.
304
+ Chen, Z., Badrinarayanan, V., Lee, C.-Y., and Rabinovich, A. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In International Conference on Machine Learning, pp. 794-803. PMLR, 2018.
305
+ Chen, Z., Ngiam, J., Huang, Y., Luong, T., Kretzschmar, H., Chai, Y., and Anguelov, D. Just pick a sign: Optimizing deep multitask models with gradient sign dropout. ArXiv, abs/2010.06808, 2020.
306
+ Cordts, M., Omran, M., Ramos, S., Rehfeld, T., Enzweiler, M., Benenson, R., Franke, U., Roth, S., and Schiele, B. The cityscapes dataset for semantic urban scene understanding. In Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016.
307
+ Crawshaw, M. Multi-task learning with deep neural networks: A survey. arXiv preprint arXiv:2009.09796, 2020.
308
+ Dagan, N. and Volij, O. The bankruptcy problem: a cooperative bargaining approach. Mathematical Social Sciences, 26(3):287-297, 1993.
309
+ Dai, J., He, K., and Sun, J. Instance-aware semantic segmentation via multi-task network cascades. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3150-3158, 2016.
310
+ Désideri, J.-A. Multiple-gradient descent algorithm (MGDA) for multiobjective optimization. Comptes Rendus Mathematique, 350(5-6):313-318, 2012.
311
+ Du, Y., Czarnecki, W. M., Jayakumar, S. M., Farajtabar, M., Pascanu, R., and Lakshminarayanan, B. Adapting auxiliary losses using gradient similarity. arXiv preprint arXiv:1812.02224, 2018.
312
+ Fey, M. and Lenssen, J. E. Fast graph representation learning with PyTorch Geometric. In ICLR Workshop on Representation Learning on Graphs and Manifolds, 2019.
313
+ Gilmer, J., Schoenholz, S. S., Riley, P. F., Vinyals, O., and Dahl, G. E. Neural message passing for quantum chemistry. In International conference on machine learning, pp. 1263-1272. PMLR, 2017.
314
+
315
+ Grosu, D., Chronopoulos, A. T., and Leung, M.-Y. Load balancing in distributed systems: An approach using cooperative games. In Proceedings 16th International Parallel and Distributed Processing Symposium, pp. 10-pp. IEEE, 2002.
316
+ Haarnoja, T., Zhou, A., Abbeel, P., and Levine, S. Soft actor-critic: Off-policy maximum entropy deep reinforcement, 2018.
317
+ Hashimoto, K., Xiong, C., Tsuruoka, Y., and Socher, R. A joint many-task model: Growing a neural network for multiple nlp tasks. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pp. 1923-1933, 2017.
318
+ Jaggi, M. Revisiting Frank-Wolfe: Projection-free sparse convex optimization. In International Conference on Machine Learning, pp. 427-435. PMLR, 2013.
319
+ Javaloy, A. and Valera, I. Rotograd: Dynamic gradient homogenization for multi-task learning. arXiv preprint arXiv:2103.02631, 2021.
320
+ Kendall, A., Gal, Y., and Cipolla, R. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 7482-7491, 2018.
321
+ Kim, S. Cooperative federated learning-based task offloading scheme for tactical edge networks. IEEE Access, 9: 145739-145747, 2021.
322
+ Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. CoRR, abs/1412.6980, 2015.
323
+ Klicpera, J., Groß, J., and Gunnemann, S. Directional message passing for molecular graphs. *ArXiv*, abs/2003.03123, 2020.
324
+ Leshem, A. and Zehavi, E. Smart carrier sensing for distributed computation of the generalized nash bargaining solution. In 2011 17th International Conference on Digital Signal Processing (DSP), pp. 1-5. IEEE, 2011.
325
+ Lin, B., Ye, F., and Zhang, Y. A closer look at loss weighting in multi-task learning. arXiv preprint arXiv:2111.10603, 2021.
326
+ Lipp, T. and Boyd, S. Variations and extension of the convex-concave procedure. Optimization and Engineering, 17(2):263-287, 2016.
327
+ Liu, B., Liu, X., Jin, X., Stone, P., and Liu, Q. Conflict-averse gradient descent for multi-task learning. Advances in Neural Information Processing Systems, 34, 2021a.
328
+
329
+ Liu, L., Li, Y., Kuang, Z., Xue, J.-H., Chen, Y., Yang, W., Liao, Q., and Zhang, W. Towards impartial multitask learning. In International Conference on Learning Representations, 2021b.
330
+ Liu, S., Davison, A., and Johns, E. Self-supervised generalisation with meta auxiliary learning. Advances in Neural Information Processing Systems, 32, 2019a.
331
+ Liu, S., Johns, E., and Davison, A. J. End-to-end multi-task learning with attention. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1871-1880, 2019b.
332
+ Maninis, K.-K., Radosavovic, I., and Kokkinos, I. Attentive single-tasking of multiple tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1851-1860, 2019.
333
+ Maron, H., Ben-Hamu, H., Serviansky, H., and Lipman, Y. Provably powerful graph networks. arXiv preprint arXiv:1905.11136, 2019.
334
+ Misra, I., Shrivastava, A., Gupta, A., and Hebert, M. Cross-stitch networks for multi-task learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3994-4003, 2016.
335
+ Nash, J. Two-person cooperative games. *Econometrica*, 21 (1):128-140, 1953. ISSN 00129682, 14680262. URL http://www.jstor.org/stable/1906951.
336
+ Navon, A., Achituve, I., Maron, H., Chechik, G., and Fetaya, E. Auxiliary learning by implicit differentiation. In International Conference on Learning Representations (ICLR), 2021a.
337
+ Navon, A., Shamsian, A., Chechik, G., and Fetaya, E. Learning the pareto front with hypernetworks. In International Conference on Learning Representations, 2021b. URL https://openreview.net/forum?id=NjF772F4ZZR.
338
+ Panageas, I., Piliouras, G., and Wang, X. First-order methods almost always avoid saddle points: The case of vanishing step-sizes. In Neural Information Processing Systems (NeurIPS), 2019.
339
+ Pinto, L. and Gupta, A. Learning to push by grasping: Using multiple tasks for effective learning. In 2017 IEEE international conference on robotics and automation (ICRA), pp. 2161-2168. IEEE, 2017.
340
+ Qiao, H., Rozenblit, J., Szidarovszky, F., and Yang, L. Multiagent learning model with bargaining. In Proceedings of the 2006 winter simulation conference, pp. 934-940. IEEE, 2006.
341
+
342
+ Ramakrishnan, R., Dral, P. O., Rupp, M., and Von Lilienfeld, O. A. Quantum chemistry structures and properties of 134 kilo molecules. Scientific data, 1(1):1-7, 2014.
343
+ Rezaee, M. J., Eshkevari, M., Saberi, M., and Hussain, O. GBK-means clustering algorithm: An improvement to the K-means algorithm based on the bargaining game. Knowledge-Based Systems, 213:106672, 2021.
344
+ Ruder, S. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017.
345
+ Schaul, T., Borsa, D., Modayil, J., and Pascanu, R. Ray interference: a source of plateaus in deep reinforcement learning. arXiv preprint arXiv:1904.11455, 2019.
346
+ Sener, O. and Koltun, V. Multi-task learning as multi-objective optimization. In Advances in Neural Information Processing Systems, pp. 527-538, 2018.
347
+ Shi, C., Wang, F., Salous, S., Zhou, J., and Hu, Z. Nash bargaining game-theoretic framework for power control in distributed multiple-radar architecture underlying wireless communication system. Entropy, 20(4):267, 2018.
348
+ Silberman, N., Hoiem, D., Kohli, P., and Fergus, R. Indoor segmentation and support inference from rgbd images. In European conference on computer vision, pp. 746-760. Springer, 2012.
349
+ Sodhani, S., Zhang, A., and Pineau, J. Multi-task reinforcement learning with context-based representations. arXiv preprint arXiv:2102.06177, 2021.
350
+ Sriperumbudur, B. K. and Lanckriet, G. R. On the convergence of the concave-convex procedure. In Nips, volume 9, pp. 1759-1767. Citeseer, 2009.
351
+ Standley, T., Zamir, A. R., Chen, D., Guibas, L. J., Malik, J., and Savarese, S. Which tasks should be learned together in multi-task learning? In International Conference on Machine Learning ICML, 2020.
352
+ Suteu, M. and Guo, Y. Regularizing deep multi-task networks using orthogonal gradients. arXiv preprint arXiv:1912.06844, 2019.
353
+ Szép, J. and Forgó, F. Introduction to the Theory of Games. Springer, 1985.
354
+ Thomson, W. Chapter 35 cooperative models of bargaining volume 2 of Handbook of Game Theory with Economic Applications, pp. 1237-1284. Elsevier, 1994.
355
+ Vinyals, O., Bengio, S., and Kudlur, M. Order matters: Sequence to sequence for sets. arXiv preprint arXiv:1511.06391, 2015.
356
+
357
+ Wang, Z., Tsvetkov, Y., First, O., and Cao, Y. Gradient vaccine: Investigating and improving multi-task optimization in massively multilingual models. In International Conference on Learning Representations, 2020.
358
+ Yang, R., Xu, H., Wu, Y., and Wang, X. Multi-task reinforcement learning with soft modularization. ArXiv, abs/2003.13661, 2020.
359
+ Yu, T., Kumar, S., Gupta, A., Levine, S., Hausman, K., and Finn, C. Gradient surgery for multi-task learning. In Advances in Neural Information Processing Systems, 2020a.
360
+ Yu, T., Quillen, D., He, Z., Julian, R., Hausman, K., Finn, C., and Levine, S. Meta-world: A benchmark and evaluation for multi-task and meta reinforcement learning. In Conference on Robot Learning, pp. 1094-1100. PMLR, 2020b.
361
+ Yuille, A. L. and Ranganarajan, A. The concave-convex procedure. Neural computation, 15(4):915-936, 2003.
362
+ Zhang, Z., Shi, J., Chen, H.-H., Guizani, M., and Qiu, P. A cooperation strategy based on nash bargaining solution in cooperative relay networks. IEEE Transactions on Vehicular Technology, 57(4):2570-2577, 2008.
363
+ Zhang, Z., Luo, P., Loy, C. C., and Tang, X. Facial landmark detection by deep multi-task learning. In European conference on computer vision, pp. 94-108. Springer, 2014.
364
+ Zhao, X., Li, H., Shen, X., Liang, X., and Wu, Y. A modulation module for multi-task learning with applications in image retrieval. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 401-416, 2018.
365
+
366
+ # A. Proofs
367
+
368
+ Lemma A.1. If $\mathcal{L}$ is differential and $L$ -smooth (assumption 5.3) then $\mathcal{L}(\theta') \leq \mathcal{L}(\theta) + \nabla \mathcal{L}(\theta)^{\top} (\theta' - \theta) + \frac{L}{2} \| \theta' - \theta \|^{2}$ .
369
+
370
+ Proof. Fix $\theta, \theta' \in \mathrm{dom}(\mathcal{L}) \subseteq \mathbb{R}^d$ . Since $\mathrm{dom}(\mathcal{L})$ is a convex and open set, there exists $\epsilon > 0$ such that $\theta + t(\theta' - \theta) \in \mathrm{dom}(\mathcal{L})$ for all $t \in [-\epsilon, 1 + \epsilon]$ . Set $\epsilon > 0$ to be such a number. Thus, we can define a function $\bar{\mathcal{L}}: [-\epsilon, 1 + \epsilon] \to \mathbb{R}$ by $\bar{\mathcal{L}}(t) = \mathcal{L}(\theta + t(\theta' - \theta))$ . With this, $\bar{\mathcal{L}}(1) = \mathcal{L}(\theta')$ , $\bar{\mathcal{L}}(0) = \mathcal{L}(\theta)$ , and $\nabla \bar{\mathcal{L}}(t) = \nabla \mathcal{L}(\theta + t(\theta' - \theta))^{\top}(\theta' - \theta)$ for $t \in [0,1] \subset (-\epsilon, 1 + \epsilon)$ . From Assumption 5.3, $\| \nabla \mathcal{L}(\theta') - \nabla \mathcal{L}(\theta) \| \leq L \| \theta' - \theta \|$ , therefore
371
+
372
+ $$
373
+ \begin{array}{l} \left\| \nabla \bar {\mathcal {L}} \left(t ^ {\prime}\right) - \nabla \bar {\mathcal {L}} (t) \right\| = \left\| \left[ \nabla \mathcal {L} \left(\theta + t ^ {\prime} \left(\theta^ {\prime} - \theta\right)\right) - \nabla \mathcal {L} \left(\theta + t \left(\theta^ {\prime} - \theta\right)\right) ^ {\top} \left(\theta^ {\prime} - \theta\right) \right. \right\| \\ \leq \| \theta^ {\prime} - \theta \| \| \nabla \mathcal {L} \left(\theta + t ^ {\prime} \left(\theta^ {\prime} - \theta\right)\right) - \nabla \mathcal {L} \left(\theta + t \left(\theta^ {\prime} - \theta\right)\right) \| \\ \leq L \| \theta^ {\prime} - \theta \| \left\| \left(t ^ {\prime} - t\right) \left(\theta^ {\prime} - \theta\right) \right\| \\ \leq L \| \theta^ {\prime} - \theta \| ^ {2} \| t ^ {\prime} - t \|. \\ \end{array}
374
+ $$
375
+
376
+ Hence, $\nabla \bar{\mathcal{L}}:[0,1]\to \mathbb{R}$ is Lipschitz continuous, and therefore continuous. By using the fundamental theorem of calculus with the continuous function $\nabla \bar{\mathcal{L}}:[0,1]\to \mathbb{R}$ ,
377
+
378
+ $$
379
+ \begin{array}{l} \mathcal {L} \left(\theta^ {\prime}\right) = \mathcal {L} (\theta) + \int_ {0} ^ {1} \nabla \mathcal {L} \left(\theta + t \left(\theta^ {\prime} - \theta\right)\right) ^ {\top} \left(\theta^ {\prime} - \theta\right) d t \\ = \mathcal {L} (\theta) + \nabla \mathcal {L} (\theta) ^ {\top} (\theta^ {\prime} - \theta) + \int_ {0} ^ {1} \left(\nabla \mathcal {L} (\theta + t (\theta^ {\prime} - \theta)) - \nabla \mathcal {L} (\theta)\right) ^ {\top} (\theta^ {\prime} - \theta) d t \\ \leq \mathcal {L} (\theta) + \nabla \mathcal {L} (\theta) ^ {\top} (\theta^ {\prime} - \theta) + \int_ {0} ^ {1} \| \nabla \mathcal {L} (\theta + t (\theta^ {\prime} - \theta)) - \nabla \mathcal {L} (\theta) \| \| \theta^ {\prime} - \theta \| d t \\ \leq \mathcal {L} (\theta) + \nabla \mathcal {L} (\theta) ^ {\top} \left(\theta^ {\prime} - \theta\right) + \int_ {0} ^ {1} t L \| \theta^ {\prime} - \theta \| ^ {2} d t \\ = \mathcal {L} (\theta) + \nabla \mathcal {L} (\theta) ^ {\top} \left(\theta^ {\prime} - \theta\right) + \frac {L}{2} \| \theta^ {\prime} - \theta \| ^ {2}. \tag {7} \\ \end{array}
380
+ $$
381
+
382
+ Theorem (5.4). Let $\{\theta^{(t)}\}_{t=1}^{\infty}$ be the sequence generated by the update rule $\theta^{(t+1)} = \theta^{(t)} - \mu^{(t)}\Delta\theta^{(t)}$ where $\Delta\theta^{(t)} = \sum_{i=1}^{K}\alpha_{i}^{(t)}g_{i}^{(t)}$ is the Nash bargaining solution $(G^{(t)})^\top G^{(t)}\alpha^{(t)} = 1/\alpha^{(t)}$ . Set $\mu^{(t)} = \min_{i\in [K]}\frac{1}{LK\alpha_{i}^{(t)}}$ . The sequence $\{\theta^{(t)}\}_{t=1}^{\infty}$ has a subsequence that converges to a Pareto stationary point $\theta^*$ . Moreover all the loss functions $(\ell_1(\theta^{(t)}),\dots,\ell_K(\theta^{(t)}))$ converge to $(\ell_1(\theta^*),\dots,\ell_K(\theta^*))$ .
383
+
384
+ Proof. We first note that if for some step we reach a Pareto stationary solution the algorithm halts and sequence stays fixed at that point and therefore converges; Next, we assume that we never get to an exact Pareto stationary solution at any finite step.
385
+
386
+ We note that the norm of $\Delta \theta^{(t)}$ is $\sqrt{K}$ as $||\Delta \theta^{(t)}||^2 = \sum_{i=1}^{K} \alpha_i g_i^\top \Delta \theta^{(t)} = \sum_{i=1}^{K} \alpha_i \cdot 1 / \alpha_i = K$ . For each loss $\ell_i$ we have using Lemma A.1
387
+
388
+ $$
389
+ \begin{array}{l} \ell_ {i} \left(\theta^ {(t + 1)}\right) \leq \ell_ {i} \left(\theta^ {(t)}\right) - \mu^ {(t)} \nabla \ell_ {i} \left(\theta^ {(t)}\right) ^ {\top} \Delta \theta^ {(t)} + \frac {L}{2} \left\| \mu^ {(t)} \Delta \theta^ {(t)} \right\| ^ {2} = (8) \\ \ell_ {i} \left(\theta^ {(t)}\right) - \mu^ {(t)} \frac {1}{\alpha_ {i} ^ {(t)}} + \frac {\left(\mu^ {(t)}\right) ^ {2} L K}{2} (9) \\ = \ell_ {i} \left(\theta^ {(t)}\right) - \frac {\mu^ {(t)}}{\alpha_ {i} ^ {(t)}} + \frac {\mu^ {(t)}}{2} \min _ {j} \frac {1}{\alpha_ {j} ^ {(t)}} \leq \ell_ {i} \left(\theta^ {(t)}\right) - \frac {\mu^ {(t)}}{2 \alpha_ {i} ^ {(t)}} < \ell_ {i} \left(\theta^ {(t)}\right) (10) \\ \end{array}
390
+ $$
391
+
392
+ This shows that our update decreases all the loss functions. We can average over inequality 9 over all losses and get for $\mathcal{L}(\theta) = \frac{1}{K}\sum_{i=1}^{K}\ell_i(\theta)$ :
393
+
394
+ $$
395
+ \mathcal {L} \left(\theta^ {(t + 1)}\right) \leq \mathcal {L} \left(\theta^ {(t)}\right) - \mu^ {(t)} \frac {1}{K} \sum_ {i = 1} ^ {K} \frac {1}{\alpha_ {i} ^ {(t)}} + \frac {\left(\mu^ {(t)}\right) ^ {2} L K}{2} \leq \mathcal {L} \left(\theta^ {(t)}\right) - L K \left(\mu^ {(t)}\right) ^ {2} + \frac {\left(\mu^ {(t)}\right) ^ {2} L K}{2} = \mathcal {L} \left(\theta^ {(t)}\right) - \frac {L K \left(\mu^ {(t)}\right) ^ {2}}{2}. \tag {11}
396
+ $$
397
+
398
+ From this we can conclude that $\sum_{\tau = 1}^{t}\frac{LK(\mu^{(\tau)})^2}{2}\leq \mathcal{L}(\theta_1) - \mathcal{L}(\theta^{(t + 1)})$ . As $\mathcal{L}(\theta^{(t)})$ is bounded below we must have that the infinite series $\sum_{t = 1}^{\infty}\frac{LK(\mu^{(t)})^2}{2} < \infty$ , and also $\mu^{(t)}\to 0$ . It follows that $\min_{i\in [K]}1 / \alpha_i^{(t)}\to 0$ and therefore $||\alpha^{(t)}||\to \infty$ .
399
+
400
+ We will now show that $||1 / \alpha^{(t)}||$ is bounded for $t\to \infty$ . As the sequence $\mathcal{L}(\theta^{(t)})$ is decreasing we have that the sequence $\theta^{(t)}$ is in the sublevel set $\{\theta :\mathcal{L}(\theta)\leq \mathcal{L}(\theta_0)\}$ which is closed and bounded and therefore compact. If follows that there exists $M < \infty$ such that $||g_i^{(t)}||\leq M$ for all $t$ and $i\in [K]$ . We have for all $i$ and $t$ , $|1 / \alpha_{i}^{(t)}| = |(g_{i}^{(t)})^{T}\theta^{(t)}|\leq \sqrt{K} ||g_{i}^{(t)}||\leq \sqrt{K} M < \infty$ , and so $||1 / \alpha^{(t)}||$ is bounded. Combining these two results we have $||1 / \alpha^{(t)}||\geq \sigma_K((G^{(t)})^\top G^{(t)})||\alpha^{(t)}||$ where $\sigma_K((G^{(t)})^\top G^{(t)})$ is the smallest singular value of $(G^{(t)})^\top G^{(t)}$ . Since the norm of $\alpha^{(t)}$ goes to infinity and the norm $1 / \alpha^{(t)}$ is bounded, it follows that $\sigma_K((G^{(t)})^\top G^{(t)})\to 0$ .
401
+
402
+ Now, since $\{\theta : \mathcal{L}(\theta) \leq \mathcal{L}\theta_0\}$ is compact there exists a subsequence $\theta^{(t_j)}$ that converges to some point $\theta^*$ . As $\sigma_K((G^{(t)})^T G^{(t)}) \to 0$ we have from continuity that $\sigma_K(G_*^\top G_*) = 0$ where $G_*$ is the matrix of gradients at $\theta^*$ . This means that the gradients at $\theta$ are linearly dependent and therefore $\theta^*$ is Pareto stationary by assumption 5.1. As for all $i$ the sequence $\{\ell_i(\theta^{(t)})\}_{t=1}^{\infty}$ is monotonically decreasing and bounded below they all converge. Since $\ell_i(\theta^*)$ is the limit of a subsequence we get that $\ell_i(\theta^{(t)}) \xrightarrow{t \to \infty} \ell_i(\theta^*)$ .
403
+
404
+ We now show that if we add a convexity assumption then we can prove convergence to the Pareto front.
405
+
406
+ Theorem (5.5). Let $\{\theta^{(t)}\}_{t=1}^{\infty}$ be the sequence generated by the update rule $\theta^{(t+1)} = \theta^{(t)} - \mu^{(t)}\Delta\theta^{(t)}$ where $\Delta\theta^{(t)} = \sum_{i=1}^{K}\alpha_{i}^{(t)}g_{i}^{(t)}$ is the Nash bargaining solution $(G^{(t)})^{\top}G^{(t)}\alpha^{(t)} = 1/\alpha^{(t)}$ . Set $\mu^{(t)} = \min_{i\in [K]}\frac{1}{LK\alpha_{i}^{(t)}}$ . If we also assume that all the loss functions are convex then the sequence $\{\theta^{(t)}\}_{t=1}^{\infty}$ converges to a Pareto optimal point $\theta^{*}$ .
407
+
408
+ Proof. We note that this proof uses intermediate results from the proof of theorem 5.4. Given theorem 5.4 it suffices to prove that the sequence $\{\theta^{(t)}\}_{t=1}^{\infty}$ converges, that would mean it converges to the partial limit $\theta^{*}$ that is Pareto stationary, and from convexity it would be Pareto optimal (as the optimizer of the convex combination of losses). For a convex and differential loss function, we have
409
+
410
+ $$
411
+ \ell \left(\theta^ {\prime}\right) \geq \ell (\theta) + \nabla \ell \left(\theta\right) ^ {\top} \left(\theta^ {\prime} - \theta\right) \tag {12}
412
+ $$
413
+
414
+ We can bound
415
+
416
+ $$
417
+ \begin{array}{l} \left\| \theta^ {(t + 1)} - \theta^ {*} \right\| ^ {2} = \left\| \theta^ {(t)} - \mu^ {(t)} \Delta \theta^ {(t)} - \theta^ {*} \right\| ^ {2} (13) \\ = \left\| \theta^ {(t)} - \theta^ {*} \right\| ^ {2} + \left(\mu^ {(t)}\right) ^ {2} \left\| \Delta \theta^ {(t)} \right\| ^ {2} - 2 \mu^ {(t)} \left(\Delta \theta^ {(t)}\right) ^ {\top} \left(\theta^ {(t)} - \theta^ {*}\right) (14) \\ = \left\| \theta^ {(t)} - \theta^ {*} \right\| ^ {2} + \left(\mu^ {(t)}\right) ^ {2} K - 2 \mu^ {(t)} \sum_ {i} \alpha_ {i} ^ {(t)} \left(g _ {i} ^ {(t)}\right) ^ {\top} \left(\theta^ {(t)} - \theta^ {*}\right) (15) \\ \leq \left| \left| \theta^ {(t)} - \theta^ {*} \right| \right| ^ {2} + \left(\mu^ {(t)}\right) ^ {2} K + 2 \mu^ {(t)} \sum_ {i} \alpha_ {i} ^ {(t)} \left(\ell_ {i} \left(\theta^ {*}\right) - \ell_ {i} \left(\theta^ {(t)}\right)\right) (16) \\ \leq \left| \left| \theta^ {(t)} - \theta^ {*} \right| \right| ^ {2} + \left(\mu^ {(t)}\right) ^ {2} K + 2 \mu^ {(t)} \sum_ {i} \alpha_ {i} ^ {(t)} \left(\ell_ {i} \left(\theta^ {(t + 1)}\right) - \ell_ {i} \left(\theta^ {(t)}\right)\right) (17) \\ \leq \left\| \theta^ {(t)} - \theta^ {*} \right\| ^ {2} + \left(\mu^ {(t)}\right) ^ {2} K - 2 \mu^ {(t)} \sum_ {i} \alpha_ {i} ^ {(t)} \frac {\mu^ {(t)}}{2 \alpha_ {i} ^ {(t)}} (18) \\ = \left\| \theta^ {(t)} - \theta^ {*} \right\| ^ {2} (19) \\ \end{array}
418
+ $$
419
+
420
+ In Eq. 15 we use the definition of $\Delta \theta^{(t)}$ and the fact that its norm equals $\sqrt{K}$ . In Eq. 16 we use convexity and Eq. 12. Eq. 17 uses the fact that we show the losses are monotonically decreasing and converging to $\ell_i(\theta^*)$ . In Eq. 18 we use Eq. 10.
421
+
422
+ We have that the sequence $||\theta^{(t)} - \theta^{*}||$ is monotonically decreasing and bounded below by zero. Also, it has a subsequence that converges to zero, and so it must hold that the sequence $||\theta^{(t)} - \theta^{*}||$ also converge to zero, or equivalently $\theta^{(t)}\to \theta^{*}$
423
+
424
+ ![](images/e238a06deb18031b2796c8696e337f400756b6e60185086abd7f47d4323f8863.jpg)
425
+
426
+ Proposition (3.1). Denote the objective for the optimization problem in Eq. 5 by $\phi (\alpha) = \sum_{i}\beta_{i}(\alpha) + \varphi (\alpha)$ . Then, $\phi \left(\alpha^{(\tau +1)}\right)\leq \phi \left(\alpha^{(\tau)}\right)$ for all $\tau \geq 1$ .
427
+
428
+ Proof. In our concave-convex procedure, we use the following linearization at the $\tau$ -th iteration:
429
+
430
+ $$
431
+ \tilde {\varphi} _ {\tau} (\alpha) = \varphi (\alpha^ {(\tau)}) + \nabla \varphi (\alpha^ {(\tau)}) ^ {\top} (\alpha - \alpha^ {(\tau)}).
432
+ $$
433
+
434
+ Then,
435
+
436
+ $$
437
+ \tilde {\varphi} _ {\tau} \left(\alpha^ {(\tau)}\right) = \varphi \left(\alpha^ {(\tau)}\right). \tag {20}
438
+ $$
439
+
440
+ Moreover, since $\varphi$ is concave and differentiable, we have that
441
+
442
+ $$
443
+ \varphi \left(\alpha^ {(\tau + 1)}\right) \leq \varphi \left(\alpha^ {(\tau)}\right) + \nabla \varphi \left(\alpha^ {(\tau)}\right) ^ {\top} \left(\alpha^ {(\tau + 1)} - \alpha^ {(\tau)}\right) = \tilde {\varphi} _ {\tau} \left(\alpha^ {(\tau + 1)}\right). \tag {21}
444
+ $$
445
+
446
+ Furthermore, since we minimize the convex objective $\sum_{i}\beta_{i}(\alpha) + \tilde{\varphi} (\alpha)$ at each iteration of our concave-convex procedure (in the convex feasible set),
447
+
448
+ $$
449
+ \sum_ {i} \beta_ {i} \left(\alpha^ {(\tau)}\right) + \tilde {\varphi} _ {\tau} \left(\alpha^ {(\tau)}\right) \geq \sum_ {i} \beta_ {i} \left(\alpha^ {(\tau + 1)}\right) + \tilde {\varphi} _ {\tau} \left(\alpha^ {(\tau + 1)}\right). \tag {22}
450
+ $$
451
+
452
+ Using Eq. 20-Eq. 22, we have that
453
+
454
+ $$
455
+ \begin{array}{l} \phi (\alpha^ {(\tau)}) = \sum_ {i} \beta_ {i} (\alpha^ {(\tau)}) + \varphi (\alpha^ {(\tau)}) = \sum_ {i} \beta_ {i} (\alpha^ {(\tau)}) + \tilde {\varphi} _ {\tau} (\alpha^ {(\tau)}) \geq \sum_ {i} \beta_ {i} (\alpha^ {(\tau + 1)}) + \tilde {\varphi} _ {t} (\alpha^ {(\tau + 1)}) \\ \geq \sum_ {i} \beta_ {i} (\alpha^ {(\tau + 1)}) + \varphi (\alpha^ {(\tau + 1)}) = \phi (\alpha^ {(\tau + 1)}). \\ \end{array}
456
+ $$
457
+
458
+ This proves the statement.
459
+
460
+ # B. Experimental Details
461
+
462
+ We provide here full experimental details for all experiments described in the main text.
463
+
464
+ Implementation Details. We apply all gradient manipulation methods to the gradients of the shared weights, with the exception of IMTL-G, which was applied to the feature-level gradients, as was originally proposed by the authors. We also tried applying IMTL-G to the shared-parameters gradient for a fair comparison, but it did not perform as well. We set the CAGrad's $c$ hyperparameter to 0.4, which was reported to yield the best performance for NYUv2 and Cityscapes (Liu et al., 2021a). For DWA (Liu et al., 2019b) we set the temperature hyperparameter to 2 which was found empirically to be optimum across all architectures. For RLW (Lin et al., 2021) we sample the weights from a normal distribution.
465
+
466
+ QM9. We adapt the QM9 example in PyTorch Geometric (Fey & Lenssen, 2019), and train the popular GNN model from Gilmer et al. (2017). We use the publicly available<sup>1</sup> implementation, the implementation is provided by Fey & Lenssen (2019). We use 110K molecules for training, 10K for validation, and 10K as a test set. Each task's targets are normalized to have zero mean and unit standard deviation. We train each method for 300 epochs with batch-size of 120 and search for learning-rate (lr) in $\{1e - 3,5e - 4,1e - 4\}$ . We use a ReduceOnPlateau scheduler to decrease the lr when the validation $\Delta_{m}$ metric stops improving. Additionally, we use the validation $\Delta_{m}$ for early stopping.
467
+
468
+ Scene Understanding. We follow the training and evaluation procedure used in previous work on MTL (Liu et al., 2019b; Yu et al., 2020a; Liu et al., 2021a). However, unlike (Liu et al., 2019b), we add data augmentations (DA) during training for
469
+
470
+ ![](images/0c4c9b230248a60b8bfa3b346e1be2fec9f81ee9919068a44c68299f01c48c14.jpg)
471
+ (a) Average loss
472
+
473
+ ![](images/3e986b0e04938f68df04e8650803de83f83971272558423f2376be0aa6e7ae4a.jpg)
474
+ (b) $\ell_1$
475
+ Figure 5. Illustrative example. Visualization of the loss surfaces in our illustrative example of Figure 1
476
+
477
+ ![](images/67edc42ad38f85a74d6a8d188c5a13435917a8da2c3c4e5931bd201d664df963.jpg)
478
+ (c) $\ell_2$
479
+
480
+ all the compared methods, similar to (Liu et al., 2021a;b). We train each method for 200 epochs with an initial learning-rate of $1e - 4$ . The learning-rate is reduced to $5e - 5$ after 100 epochs. For MTL methods, we train a Multi-Task Attention Network (MTAN) (Liu et al., 2019b) built upon SegNet (Badrinarayanan et al., 2017). Similar to previous works (Liu et al., 2021a), the STL baseline refers to training task-specific SegNet models. We use a batch size of 2 and 8 for NYUv2 and CityScapes respectively. To align with previous work on MTL Liu et al. (2019b); Yu et al. (2020a); Liu et al. (2021a) we report the test performance averaged over the last 10 epochs.
481
+
482
+ MT10. Following previous works (Yu et al., 2020a; Liu et al., 2021a; Sodhani et al., 2021), we use multitask Soft Actor-Critic (SAC) (Haarnoja et al., 2018) as the base RL algorithm for PCGrad, CAGrad, and Nash-MTL. We follow the same experiment setup from and evaluation protocol as in Sodhani et al. (2021); Liu et al. (2021a). Each method is trained over 2 million steps with a batch size of 1280. The agent is evaluated once every 10K environment steps to obtain the average success over tasks. The reported success rate for the agent is the best average performance over all evaluation steps. We repeat this procedure over 10 random seeds, and the performance of each method is obtained by averaging the mean success over all random seeds. For all Nash-MTL experiments, we use a single CCP step in order to speed up computation.
483
+
484
+ Illustrative Example. We provide here the details for the illustrative example of Figure 1. We use a slightly modified version of the illustrative example in (Liu et al., 2021a). We first present the learning problem from (Liu et al., 2021a): Let $\theta = (\theta_{1},\theta_{2})\in \mathbb{R}^{2}$ , and consider the following objectives:
485
+
486
+ $$
487
+ \tilde {\ell} _ {1} (\theta) = c _ {1} (\theta) f _ {1} (\theta) + c _ {2} (\theta) g _ {1} (\theta) \quad \text {a n d} \quad \tilde {\ell} _ {2} (\theta) = c _ {1} (\theta) f _ {2} (\theta) + c _ {2} (\theta) g _ {2} (\theta), \text {w h e r e}
488
+ $$
489
+
490
+ $$
491
+ f _ {1} (\theta) = \log \left(\max \left(| 0. 5 (- \theta_ {1} - 7) - \tanh (- \theta_ {2}) |, 5 e - 6\right)\right) + 6,
492
+ $$
493
+
494
+ $$
495
+ f _ {2} (\theta) = \log \left(\max \left(\left| 0. 5 \left(- \theta_ {1} + 3\right) - \tanh \left(- \theta_ {2}\right) + 2 \right|, 5 e - 6\right)\right) + 6,
496
+ $$
497
+
498
+ $$
499
+ g _ {1} (\theta) = \left(\left(- \theta_ {1} + 7\right) ^ {2} + 0. 1 \cdot \left(- \theta_ {2} - 8\right) ^ {2}\right) / 1 0 - 2 0,
500
+ $$
501
+
502
+ $$
503
+ g _ {2} (\theta) = \left(\left(- \theta_ {1} - 7\right) ^ {2} + 0. 1 \cdot \left(- \theta_ {2} - 8\right) ^ {2}\right) / 1 0 - 2 0,
504
+ $$
505
+
506
+ $$
507
+ c _ {1} (\theta) = \max (\tanh (0. 5 \theta_ {2}), 0) \quad \text {a n d} \quad c _ {2} (\theta) = \max (\tanh (- 0. 5 \theta_ {2}), 0)
508
+ $$
509
+
510
+ We now set $\ell_1 = 0.1\cdot \tilde{\ell}_1$ and $\ell_{2} = \tilde{\ell}_{2}$ as our objectives, see Figure 5. We use five different initialization points $\{(-8.5,7.5),(0.0,0.0),(9.0,9.0),(-7.5, - 0.5),(9, - 1.0)\}$ . We use the Adam optimizer and train each method for $35\mathrm{K}$ iteration with learning rate of $1e - 3$ .
511
+
512
+ # C. Computing Task Gradient at the Features-Level
513
+
514
+ One common approach for speeding and scaling up MTL methods is using feature-level gradients (from the representation layer) as a surrogate for the task-level gradients computed over the entire shared backbone (Sener & Koltun, 2018; Liu et al., 2021b; Javaloy & Valera, 2021). In this section we evaluate Nash-MTL while using the feature-level gradients for computing the Nash bargaining solution. On the QM9 dataset, we found this approach to accelerate training by $\sim \times 6$ . However, this acceleration method greatly hurts the performance of Nash-MTL, yielding a test $\Delta_{m}$ of 179.2 (compared to 62.0 when using full gradients). This result is not surprising, since we are mainly interested in the inner products of gradients. Consider
515
+
516
+ ![](images/77ae7f8aea677b5d7f2d924e00e353889f9ae0979305b71bee4bcee35f653ea7.jpg)
517
+ Figure 6. NYUv2. The mean and standard divination of test $\Delta_{m}$ throughout the training process, for Nash-MTL with 1, 20, and 40 CCP steps.
518
+
519
+ $g_{i}^{\top}g_{j} = (\nabla_{\theta}z\nabla_{z}\ell_{i})^{\top}\nabla_{\theta}z\nabla_{z}\ell_{j}$ , where $z$ is the feature representation and $\theta$ the shared parameters vector. We see that for $\nabla_{z}\ell_{i}^{\top}\nabla_{z}\ell_{j}$ to accurately approximate $g_{i}^{\top}g_{j}$ we need $\nabla_{\theta}z^{\top}\nabla_{\theta}z\approx I$ which is a strong and restricting requirement.
520
+
521
+ # D. Additional Experiments
522
+
523
+ # D.1. Full Results for Multi-task Regression
524
+
525
+ We provide here the full results for the QM9 experiment of Section 6.1. The results for all methods over all 11 tasks are presented in Table 6. Nash-MTL achieves the best $\Delta_{m}$ and MR performance. Despite being a simple approach, $SI$ performs well compared to more sophisticated baselines. It achieves the third/second best $\Delta_{m}$ and MR respectively. The other scale-invariant method, IMTL-G, also performs well in this learning setup.
526
+
527
+ Table 6. QM9. Test performance averaged over 3 random seeds.
528
+
529
+ <table><tr><td></td><td>μ</td><td>α</td><td>εHOMO</td><td>εLUMO</td><td>(R2)</td><td>ZPVE</td><td>U0</td><td>U</td><td>H</td><td>G</td><td>cv</td><td>MR↓</td><td>Δm%↓</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td>MAE↓</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>STL</td><td>0.067</td><td>0.181</td><td>60.57</td><td>53.91</td><td>0.502</td><td>4.53</td><td>58.8</td><td>64.2</td><td>63.8</td><td>66.2</td><td>0.072</td><td></td><td></td></tr><tr><td>LS</td><td>0.106</td><td>0.325</td><td>73.57</td><td>89.67</td><td>5.19</td><td>14.06</td><td>143.4</td><td>144.2</td><td>144.6</td><td>140.3</td><td>0.128</td><td>6.8</td><td>177.6</td></tr><tr><td>SI</td><td>0.309</td><td>0.345</td><td>149.8</td><td>135.7</td><td>1.00</td><td>4.50</td><td>55.3</td><td>55.75</td><td>55.82</td><td>55.27</td><td>0.112</td><td>4.0</td><td>77.8</td></tr><tr><td>RLW</td><td>0.113</td><td>0.340</td><td>76.95</td><td>92.76</td><td>5.86</td><td>15.46</td><td>156.3</td><td>157.1</td><td>157.6</td><td>153.0</td><td>0.137</td><td>8.2</td><td>203.8</td></tr><tr><td>DWA</td><td>0.107</td><td>0.325</td><td>74.06</td><td>90.61</td><td>5.09</td><td>13.99</td><td>142.3</td><td>143.0</td><td>143.4</td><td>139.3</td><td>0.125</td><td>6.4</td><td>175.3</td></tr><tr><td>UW</td><td>0.386</td><td>0.425</td><td>166.2</td><td>155.8</td><td>1.06</td><td>4.99</td><td>66.4</td><td>66.78</td><td>66.80</td><td>66.24</td><td>0.122</td><td>5.3</td><td>108.0</td></tr><tr><td>MGDA</td><td>0.217</td><td>0.368</td><td>126.8</td><td>104.6</td><td>3.22</td><td>5.69</td><td>88.37</td><td>89.4</td><td>89.32</td><td>88.01</td><td>0.120</td><td>5.9</td><td>120.5</td></tr><tr><td>PCGrad</td><td>0.106</td><td>0.293</td><td>75.85</td><td>88.33</td><td>3.94</td><td>9.15</td><td>116.36</td><td>116.8</td><td>117.2</td><td>114.5</td><td>0.110</td><td>5.0</td><td>125.7</td></tr><tr><td>CAGrad</td><td>0.118</td><td>0.321</td><td>83.51</td><td>94.81</td><td>3.21</td><td>6.93</td><td>113.99</td><td>114.3</td><td>114.5</td><td>112.3</td><td>0.116</td><td>5.7</td><td>112.8</td></tr><tr><td>IMTL-G</td><td>0.136</td><td>0.287</td><td>98.31</td><td>93.96</td><td>1.75</td><td>5.69</td><td>101.4</td><td>102.4</td><td>102.0</td><td>100.1</td><td>0.096</td><td>4.7</td><td>77.2</td></tr><tr><td>Nash-MTL</td><td>0.102</td><td>0.248</td><td>82.95</td><td>81.89</td><td>2.42</td><td>5.38</td><td>74.5</td><td>75.02</td><td>75.10</td><td>74.16</td><td>0.093</td><td>2.5</td><td>62.0</td></tr></table>
530
+
531
+ # D.2. Effect of the Number of CCP steps
532
+
533
+ In this section, we investigate the effect of varying the number of CCP steps in our efficient approximation to $G^{\top}G\alpha = 1 / \alpha$ (presented in Section 3.2). We use the NYUv2 dataset and train Nash-MTL with CCP sequences of 1, 20, and 40 steps at each (parameters) optimization step.
534
+
535
+ We found that increasing the CCP sequence improves the approximation to the optimal $\alpha$ . Using a single CCP iteration results with $G^{\top} G \alpha \approx 1 / \alpha$ in $91.5\%$ of the optimization steps, whereas increasing the number of iterations to 20 increases the proportion of optimal solutions to $93.5\%$ . However, we found the improved solution to have no significant improvement
536
+
537
+ ![](images/10721265a04f2fc2dcb3ef0980fc1dc3c150595e1d7dad3b87e4b063ff5531ae.jpg)
538
+ (a) NYUv2
539
+
540
+ ![](images/b6442a8beeb2585c7d6aaa2c6177ab176d5f403abc5544588654320b8b3d440e.jpg)
541
+ (b) MT10
542
+ Figure 7. Task Weights. Task weights obtained from Nash-MTL throughout the optimization process, for (a) NYUv2, and; (b) MT10 with weight update frequency of 100. For better visualization, each point corresponds to a moving average with window size 200.
543
+
544
+ Table 7. QM9. Runtime per epoch in minutes.
545
+
546
+ <table><tr><td></td><td>Runtime [Min.]</td></tr><tr><td>LS</td><td>0.54</td></tr><tr><td>MGDA</td><td>7.25</td></tr><tr><td>PCGrad</td><td>7.47</td></tr><tr><td>CAGrad</td><td>6.85</td></tr><tr><td>Nash-MTL</td><td>6.76</td></tr><tr><td>Nash-MTL-5</td><td>1.81</td></tr><tr><td>Nash-MTL-50</td><td>0.69</td></tr></table>
547
+
548
+ in MTL performance. Figure 6 presents the test $\Delta_{m}$ throughout the training process.
549
+
550
+ # D.3. Modifying the CCP Objective
551
+
552
+ In this section we examine the effect of changing the objective of the CCP procedure described in 3.2 (Eq. 5). Here we first solve the convex optimization problem of Eq. 4 to obtain $\alpha_0$ . If $G^\top G\alpha_0 \approx 1 / \alpha_0$ we stop. Else we use the CCP procedure with objective $\varphi(\alpha)$ , starting at $\alpha_0$ (dropping the addition $\sum_{i} \beta_i$ term from Eq. 5). While this objective is more natural, in practice we observe a performance degradation in terms of MTL performance. We obtain $\Delta_m = 64.4$ for the QM9 dataset (vs. 62 reported in the paper), $\Delta_m = -3.5$ (vs. -4) for NYUv2 and $\Delta_m = 8.8$ (vs. 6.8) for Cityscapes.
553
+
554
+ # D.4. Visualizing Task Weights
555
+
556
+ Our method, Nash-MTL, can essentially be viewed as a principled approach for producing dynamic task weights. Here we visualize these task weights throughout the training process using the NYUv2 dataset (Figure 7) and the MT10 dataset (Figure 7(b)).
557
+
558
+ # D.5. Verifying the Task Independence Assumption
559
+
560
+ Here we provide an empirical justification for our assumption in Section 3 which we state here once again: we assume that the task gradients are linearly independent for each point $\theta$ that is not Pareto stationary. To investigate whether this assumption holds in our experiments, we observe the smallest singular value of gradients Gram matrix $\sigma_{K}(G^{\top}G)$ . The results are presented in Figure 8. We see that for both datasets the $\sigma_{k}$ decreases as the learning progresses. For the NYUv2 experiment, the smallest singular value remains fairly large throughout the entire training process. On the QM9 dataset, $\sigma_{K}$ decreases more significantly, to around $\sim 1e - 8$ .
561
+
562
+ ![](images/8838f2247b0c306a2f9882bce855b6989ce6387af77048d6c9dfd63cd49c67c1.jpg)
563
+ (a) NYUv2
564
+ Figure 8. Smallest singular value of $G^\top G$ throughout the training process.
565
+
566
+ ![](images/7ecda676888c6e8d8fe2d04aa242d33287f03d85346850897274d90308a4526b.jpg)
567
+ (b) QM9
2202.01xxx/2202.01017/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f7c4b6a61a7f946d18b9391828d403a436511c7da8324326dcef5e68bbcf437
3
+ size 905761
2202.01xxx/2202.01017/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01020/3f9e10da-85b7-4cf2-a6b2-fc1e487efd0e_content_list.json ADDED
@@ -0,0 +1,1046 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "MedNeRF: Medical Neural Radiance Fields for Reconstructing 3D-aware CT-Projections from a Single X-ray",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 143,
8
+ 88,
9
+ 851,
10
+ 138
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Abril Corona-Figueroa<sup>1</sup>, Jonathan Frawley<sup>1</sup>, Sam Bond-Taylor<sup>1</sup>, Sarath Bethapudi<sup>2</sup>, Hubert P. H. Shum<sup>1</sup>, Chris G. Willcocks<sup>1</sup>",
17
+ "bbox": [
18
+ 259,
19
+ 157,
20
+ 736,
21
+ 191
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract—Computed tomography (CT) is an effective medical imaging modality, widely used in the field of clinical medicine for the diagnosis of various pathologies. Advances in Multidetector CT imaging technology have enabled additional functionalities, including generation of thin slice multiplanar cross-sectional body imaging and 3D reconstructions. However, this involves patients being exposed to a considerable dose of ionising radiation. Excessive ionising radiation can lead to deterministic and harmful effects on the body. This paper proposes a Deep Learning model that learns to reconstruct CT projections from a few or even a single-view X-ray. This is based on a novel architecture that builds from neural radiance fields, which learns a continuous representation of CT scans by disentangling the shape and volumetric depth of surface and internal anatomical structures from 2D images. Our model is trained on chest and knee datasets, and we demonstrate qualitative and quantitative high-fidelity renderings and compare our approach to other recent radiance field-based methods. Our code and link to our datasets are available at https://github.com/abrilcf/mednerf",
28
+ "bbox": [
29
+ 81,
30
+ 217,
31
+ 488,
32
+ 469
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Clinical relevance—Our model is able to infer the anatomical 3D structure from a few or a single-view X-ray, showing future potential for reduced ionising radiation exposure during the imaging process.",
39
+ "bbox": [
40
+ 81,
41
+ 469,
42
+ 488,
43
+ 522
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "I. INTRODUCTION",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 207,
53
+ 532,
54
+ 364,
55
+ 545
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "3D medical imaging often involves joining multiple 2D slices from CT or Magnetic Resonance Imaging (MRI), and part of their workflow consists of specifying values for the position of the patient, the imaging source, and the detector. The quality and accuracy of a CT 3D representation require hundreds of X-ray projections with a thin slice thickness [1]. Moreover, this process exposes patients to more ionising radiation than typical X-rays and requires the patient to remain immobile for up to more than 1 hour, depending on the type of test [2]. Continuous 3D representations would give radiologists optics of every point in the internal anatomy captured. While such representations are useful, there are practical challenges in CT due to the increased radiation exposure, angle-dependent structures, and time consumption [3].",
62
+ "bbox": [
63
+ 81,
64
+ 551,
65
+ 488,
66
+ 777
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Earlier approaches in medical image reconstruction used analytic and iterative methods [4], [5] on given input data. However, they often encounter mismatches between the mathematical model and physical properties of the imaging system. Instead, several recent approaches leverage deep",
73
+ "bbox": [
74
+ 81,
75
+ 779,
76
+ 488,
77
+ 854
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "learning [6] for sparse view reconstruction [7], [8], [9], 3D CT reconstruction from 2D images [10], and anomaly detection [11]. These deep learning approaches solved the mismatches between the mathematical model and imaging system and reported improved reconstructions by fine-tuning state-of-the-art architectures. However, they require a large amount of training data, which may be difficult to meet in the medical domain where acquiring expert annotations is both cost and time prohibitive.",
84
+ "bbox": [
85
+ 504,
86
+ 215,
87
+ 913,
88
+ 351
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "The Neural Radiance Fields (NeRF) [12] model is a recent reformulation for estimating a 3D volumetric representation from images. Such representations encode the radiance field and density of the scene in the parameters of a neural network. The neural network learns to synthesize new views via volume rendering from point samples along cast rays. However, these representations are often captured in controlled settings [13]. First, the scene is taken by a set of fixed cameras within a short time frame. Second, all content in the scene is static and real images often need masking. These constraints prohibit the direct application of NeRF to the medical domain, where the imaging system greatly differs from conventional cameras, and the images are captured over a long time frame hampering the patient's stillness. Moreover, the overlapping of anatomical structures in medical images hinders the definition of edges which cannot be easily solved with masking. These aspects explain why the NeRF approach especially shows successes for \"natural images\".",
95
+ "bbox": [
96
+ 504,
97
+ 352,
98
+ 911,
99
+ 638
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "To address these challenges, we propose MedNeRF, a model that adapts Generative Radiance Fields (GRAF) [14] in the medical domain to render CT projections given a few or even a single-view X-ray. Our approach not only synthesizes realistic images, but also captures the data manifold and provides a continuous representation of how the attenuation and volumetric depth of anatomical structures vary with the viewpoint without 3D supervision. This is achieved via a new discriminator architecture that provides a stronger and more comprehensive signal to GRAF when dealing with CT scans.",
106
+ "bbox": [
107
+ 504,
108
+ 638,
109
+ 913,
110
+ 803
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "Closest to our goal are [8], [9], which both train a coordinate-based network in sinograms of low-dose CT of phantom objects and apply it to the sparse-view tomography reconstruction problem. In contrast to [8], we learn multiple representations in a single model by randomly feeding data of different medical instances instead of separately optimizing for each collection of images. For testing [9] reconstruction ability, they integrate it into reconstruction methods and use",
117
+ "bbox": [
118
+ 504,
119
+ 805,
120
+ 913,
121
+ 926
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "aside_text",
127
+ "text": "arXiv:2202.01020v3 [eess.IV] 8 Apr 2022",
128
+ "bbox": [
129
+ 22,
130
+ 267,
131
+ 60,
132
+ 715
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "page_footnote",
138
+ "text": "<sup>1</sup>Corona-Figueroa, Frawley, Bond-Taylor, Shum and Willcocks are with the Computer Science Department, Durham University, Durham, DH1 3LE, UK abril.corona-figueroa@durham.ac.uk",
139
+ "bbox": [
140
+ 81,
141
+ 866,
142
+ 488,
143
+ 901
144
+ ],
145
+ "page_idx": 0
146
+ },
147
+ {
148
+ "type": "page_footnote",
149
+ "text": "$^{2}$ Bethapudi is with the County Durham and Darlington NHS Foundation Trust, Durham, DL3 6HX, UK",
150
+ "bbox": [
151
+ 81,
152
+ 901,
153
+ 488,
154
+ 924
155
+ ],
156
+ "page_idx": 0
157
+ },
158
+ {
159
+ "type": "text",
160
+ "text": "at least 60 views. Different from their methods, we do not rely on additional reconstruction algorithms, and we only require multiple views during training.",
161
+ "bbox": [
162
+ 81,
163
+ 65,
164
+ 486,
165
+ 109
166
+ ],
167
+ "page_idx": 1
168
+ },
169
+ {
170
+ "type": "text",
171
+ "text": "We render CT projections of our two datasets of digitally reconstructed radiographs (DRR) from chest and knee. We qualitative and quantitative demonstrate high-fidelity renderings and compare our approach to other recent radiance field-based methods. Furthermore, we render CT projections of a medical instance given a single-view X-ray and show the effectiveness of our model to cover surface and internal structures.",
172
+ "bbox": [
173
+ 81,
174
+ 111,
175
+ 486,
176
+ 231
177
+ ],
178
+ "page_idx": 1
179
+ },
180
+ {
181
+ "type": "text",
182
+ "text": "II. METHODS",
183
+ "text_level": 1,
184
+ "bbox": [
185
+ 228,
186
+ 239,
187
+ 343,
188
+ 253
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "A. Dataset Preparation",
195
+ "text_level": 1,
196
+ "bbox": [
197
+ 81,
198
+ 260,
199
+ 246,
200
+ 275
201
+ ],
202
+ "page_idx": 1
203
+ },
204
+ {
205
+ "type": "text",
206
+ "text": "To train our models, we generate DRRs instead of collecting paired X-rays and corresponding CT reconstructions, which would expose patients to more radiation. Furthermore, DRR generation removes patient data and enables control in capture ranges and resolutions. We generated DRRs by using 20 CT chest scans from [15], [16] and five CT knee scans from [17], [18]. These scans cover a diverse group of patients at different contrast types showing both normal and abnormal anatomy. The radiation source and imaging panel are assumed to rotate around the vertical-axis, generating a DRR of $128 \\times 128$ resolution at every five degrees, resulting in 72 DRRs for each object. During training we use the whole set of 72 DRRs (a fifth of all views within a full 360-degree vertical rotation) per patient and let the model render the rest. Our work did not involve experimental procedures on human subjects or animals and thus did not require Institutional Review Board approval.",
207
+ "bbox": [
208
+ 81,
209
+ 279,
210
+ 488,
211
+ 536
212
+ ],
213
+ "page_idx": 1
214
+ },
215
+ {
216
+ "type": "text",
217
+ "text": "B. GRAF Overview",
218
+ "text_level": 1,
219
+ "bbox": [
220
+ 83,
221
+ 545,
222
+ 220,
223
+ 558
224
+ ],
225
+ "page_idx": 1
226
+ },
227
+ {
228
+ "type": "text",
229
+ "text": "GRAF [14] is a model that builds from NeRF and defines it within an Generative Adversarial Network (GAN). It consists of a generator $G_{\\theta}$ that predicts an image patch $P_{\\mathrm{pred}}$ and a discriminator $D_{\\phi}$ that compares the predicted patch to a patch $P_{\\mathrm{real}}$ extracted from a real image. GRAF has shown an effective capacity to disentangle 3D shape and viewpoint of objects from 2D images alone, in contrast to the original NeRF [12] and similar approaches such as [19]. Therefore, we aim to translate GRAF's methods to our task, and in subsection II-C we describe our new discriminator architecture, which allows us to disentangle 3D properties from DRRs.",
230
+ "bbox": [
231
+ 81,
232
+ 563,
233
+ 488,
234
+ 743
235
+ ],
236
+ "page_idx": 1
237
+ },
238
+ {
239
+ "type": "text",
240
+ "text": "We consider the experimental setting to obtain the radiation attenuation response instead of the color used in natural images. To obtain the attenuation response at a pixel location for an arbitrary projection $\\pmb{K}$ with pose $\\pmb{\\xi}$ , first, we consider a pattern $\\pmb{\\nu} = (\\pmb{u}, s)$ to sample $R$ X-ray beams within a $K \\times K$ image-patch $\\pmb{P}$ . Then, we sample $N$ 3D points $\\pmb{x}_r^i$ along the X-ray beam $r$ originating from the pixel location and ordered between the near and far planes of the projection (Fig. 1a).",
241
+ "bbox": [
242
+ 81,
243
+ 744,
244
+ 486,
245
+ 880
246
+ ],
247
+ "page_idx": 1
248
+ },
249
+ {
250
+ "type": "text",
251
+ "text": "The object representation is encoded in a multi-layer perceptron (MLP) that takes as input a 3D position $\\pmb{x} = (x, y, z)$ and a viewing direction $\\pmb{d} = (\\theta, \\phi)$ , and produces",
252
+ "bbox": [
253
+ 83,
254
+ 881,
255
+ 488,
256
+ 926
257
+ ],
258
+ "page_idx": 1
259
+ },
260
+ {
261
+ "type": "image",
262
+ "img_path": "images/a1a71685023e7c68424f07b1784e12fd42527cc9ae3e108f61665d9b36a529bf.jpg",
263
+ "image_caption": [
264
+ "Fig. 1. An overview of GRAF's generator."
265
+ ],
266
+ "image_footnote": [],
267
+ "bbox": [
268
+ 513,
269
+ 61,
270
+ 911,
271
+ 220
272
+ ],
273
+ "page_idx": 1
274
+ },
275
+ {
276
+ "type": "text",
277
+ "text": "as output a density scalar $\\sigma$ and a pixel value $c$ . To learn high-frequency features, the input is mapped into a $2L$ -dimensional representation (Fig. 1b):",
278
+ "bbox": [
279
+ 504,
280
+ 272,
281
+ 911,
282
+ 316
283
+ ],
284
+ "page_idx": 1
285
+ },
286
+ {
287
+ "type": "equation",
288
+ "text": "\n$$\n\\gamma (p) = \\dots , \\cos \\left(2 ^ {j} \\pi p\\right), \\sin \\left(2 ^ {j} \\pi p\\right), \\dots \\tag {1}\n$$\n",
289
+ "text_format": "latex",
290
+ "bbox": [
291
+ 589,
292
+ 325,
293
+ 911,
294
+ 342
295
+ ],
296
+ "page_idx": 1
297
+ },
298
+ {
299
+ "type": "text",
300
+ "text": "where $p$ represents the 3D position or viewing direction, for $j = 0,\\dots,m - 1$ .",
301
+ "bbox": [
302
+ 504,
303
+ 349,
304
+ 911,
305
+ 378
306
+ ],
307
+ "page_idx": 1
308
+ },
309
+ {
310
+ "type": "text",
311
+ "text": "For modeling the shape and appearance of anatomical structures, let $z_{s} \\sim p_{s}$ and $z_{a} \\sim p_{a}$ be the latent codes sampled from a standard Gaussian distribution, respectively (Fig. 1c). To obtain the density prediction $\\sigma$ , the shape encoding $q$ is transformed to volume density through a density head $\\sigma_{\\theta}$ . Then, the network $g_{\\theta}(\\cdot)$ operates on a shape encoding $q = (\\gamma(x), z_{s})$ that is later concatenated with the positional encoding of $d$ and appearance code $z_{a}$ (Fig. 1c):",
312
+ "bbox": [
313
+ 504,
314
+ 380,
315
+ 911,
316
+ 501
317
+ ],
318
+ "page_idx": 1
319
+ },
320
+ {
321
+ "type": "equation",
322
+ "text": "\n$$\n(\\gamma (\\boldsymbol {x}), \\boldsymbol {z} _ {s}) \\mapsto \\boldsymbol {q} \\tag {2}\n$$\n",
323
+ "text_format": "latex",
324
+ "bbox": [
325
+ 686,
326
+ 508,
327
+ 911,
328
+ 525
329
+ ],
330
+ "page_idx": 1
331
+ },
332
+ {
333
+ "type": "equation",
334
+ "text": "\n$$\n\\left(\\boldsymbol {q} \\left(\\boldsymbol {x}, \\boldsymbol {z} _ {s}\\right), \\gamma (\\boldsymbol {d}), \\boldsymbol {z} _ {a}\\right) \\mapsto c \\tag {3}\n$$\n",
335
+ "text_format": "latex",
336
+ "bbox": [
337
+ 624,
338
+ 527,
339
+ 911,
340
+ 545
341
+ ],
342
+ "page_idx": 1
343
+ },
344
+ {
345
+ "type": "equation",
346
+ "text": "\n$$\n\\boldsymbol {q} \\left(\\boldsymbol {x}, \\boldsymbol {z} _ {s}\\right) \\mapsto \\sigma \\tag {4}\n$$\n",
347
+ "text_format": "latex",
348
+ "bbox": [
349
+ 700,
350
+ 547,
351
+ 911,
352
+ 561
353
+ ],
354
+ "page_idx": 1
355
+ },
356
+ {
357
+ "type": "text",
358
+ "text": "The final pixel response $c_{r}$ is computed by the compositing operation (Fig. 1c):",
359
+ "bbox": [
360
+ 506,
361
+ 571,
362
+ 911,
363
+ 599
364
+ ],
365
+ "page_idx": 1
366
+ },
367
+ {
368
+ "type": "equation",
369
+ "text": "\n$$\nc _ {r} = \\sum_ {i = 1} ^ {N} c _ {r} ^ {i} \\alpha_ {r} ^ {i} \\exp \\left(- \\sum_ {j = 1} ^ {i - 1} \\sigma_ {r} ^ {j} \\delta_ {r} ^ {j}\\right) \\tag {5}\n$$\n",
370
+ "text_format": "latex",
371
+ "bbox": [
372
+ 601,
373
+ 607,
374
+ 911,
375
+ 648
376
+ ],
377
+ "page_idx": 1
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "where $\\alpha_r^i = 1 - \\exp \\left(-\\sigma_r^i\\delta_r^i\\right)$ is the alpha compositing value of sampled point $i$ and $\\delta_r^i = \\| \\pmb{x}_r^{i + 1} - \\pmb{x}_r^i\\| _2$ is the distance between the adjacent sampled points.",
382
+ "bbox": [
383
+ 504,
384
+ 656,
385
+ 911,
386
+ 700
387
+ ],
388
+ "page_idx": 1
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "In this way, both the density and pixel values are computed at each sampled point along the beam $r$ with network $g_{\\theta}$ . Finally, combining the results of all $R$ beams, the generator $G_{\\theta}$ predicts an image patch $P_{\\mathrm{pred}}$ , as illustrated in Fig. 1d.",
393
+ "bbox": [
394
+ 504,
395
+ 702,
396
+ 911,
397
+ 763
398
+ ],
399
+ "page_idx": 1
400
+ },
401
+ {
402
+ "type": "text",
403
+ "text": "C. MedNeRF",
404
+ "text_level": 1,
405
+ "bbox": [
406
+ 506,
407
+ 771,
408
+ 602,
409
+ 784
410
+ ],
411
+ "page_idx": 1
412
+ },
413
+ {
414
+ "type": "text",
415
+ "text": "We investigate how we can adapt GRAF to the medical domain and apply it to render a volumetric representation from DRRs. Leveraging a large dataset, GRAF's discriminator $D_{\\phi}$ is able to continuously provide useful signals to train the generator $G_{\\theta}$ . However, medical datasets like those considered in our problem are generally small, which causes two sequential issues:",
416
+ "bbox": [
417
+ 504,
418
+ 790,
419
+ 911,
420
+ 895
421
+ ],
422
+ "page_idx": 1
423
+ },
424
+ {
425
+ "type": "text",
426
+ "text": "The lack of real information to the generator: In GRAF (and in GAN in general), the only source of features of",
427
+ "bbox": [
428
+ 504,
429
+ 896,
430
+ 911,
431
+ 926
432
+ ],
433
+ "page_idx": 1
434
+ },
435
+ {
436
+ "type": "text",
437
+ "text": "the training data contributing to the generator is the indirect gradient transferred from the discriminator. We find that the single convolutional feedback from GRAF's discriminator poorly conveys refined features from DRRs resulting in inaccurate volumetric estimation.",
438
+ "bbox": [
439
+ 81,
440
+ 65,
441
+ 488,
442
+ 141
443
+ ],
444
+ "page_idx": 2
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "Brittle adversarial training: With a limited training dataset, the generator or discriminator may fall into ill-posed settings such as mode collapse, which would lead to generating a limited number of instances and consequently, a suboptimal data distribution estimation. While some works have applied data augmentation techniques to leverage more data in the medical domain, some transformations could mislead the generator to learn the infrequent or even non-existent augmented data distribution [20]. We find that naively applying classic data augmentation works less favorably than our adopted framework.",
449
+ "bbox": [
450
+ 81,
451
+ 141,
452
+ 490,
453
+ 308
454
+ ],
455
+ "page_idx": 2
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "1) Self-supervised Learning for High-Fidelity Synthesis: To allow richer feature-maps covering from the DRRs such that it produces more comprehensive signals to train $G_{\\theta}$ , we replace GRAF's discriminator architecture with recent advancements in self-supervised approaches. We allow $D_{\\phi}$ to learn useful global and local features training it on a pretext task, in particular, the self-supervision method based on auto-encoding [21]. Different from [21], we only use two decoders for the feature-maps on scales: $f_{1}$ on $32^{2}$ and $f_{2}$ on $8^{2}$ (Fig. 2a). We find that this choice allows better performance and enables a correct volumetric depth estimation. $D_{\\phi}$ must therefore not only discriminate $P_{\\mathrm{pred}}$ predicted from $G_{\\theta}$ but also extract comprehensive features from real image patches $P_{\\mathrm{real}}$ that enable the decoders to resemble the data distribution.",
460
+ "bbox": [
461
+ 81,
462
+ 327,
463
+ 488,
464
+ 551
465
+ ],
466
+ "page_idx": 2
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "To assess global structure in decoded patches from $D_{\\phi}$ , we use the Learned Perceptual Image Patch Similarity (LPIPS) metric [22]. We compute the weighted pairwise image distance between two VGG16 feature spaces, where the pretrained weights are fit to better match human perceptual judgments. The additional discriminator loss is therefore:",
471
+ "bbox": [
472
+ 81,
473
+ 554,
474
+ 488,
475
+ 645
476
+ ],
477
+ "page_idx": 2
478
+ },
479
+ {
480
+ "type": "equation",
481
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {r}} = \\mathbb {E} _ {\\boldsymbol {f} \\sim D (\\boldsymbol {p}), \\boldsymbol {p} \\sim P} \\left[ \\frac {1}{w h d} \\| \\phi_ {i} (\\mathcal {G} (\\boldsymbol {f})) - \\phi_ {i} (\\mathcal {T} (\\boldsymbol {p})) \\| _ {2} \\right] \\tag {6}\n$$\n",
482
+ "text_format": "latex",
483
+ "bbox": [
484
+ 91,
485
+ 652,
486
+ 488,
487
+ 686
488
+ ],
489
+ "page_idx": 2
490
+ },
491
+ {
492
+ "type": "text",
493
+ "text": "where $\\phi_i(\\cdot)$ denotes the $i$ th layer output of a pretrained VGG16 network, and $w$ , $h$ , and $d$ stand for the width, height and depth of a feature space, respectively. Let $\\mathcal{G}$ be the processing on the intermediate feature-maps $\\pmb{f}$ from $D_{\\phi}$ , and $\\mathcal{T}$ the processing on real image patches. When coupled with this additional reconstruction loss, the network learns representations that transfer across tasks.",
494
+ "bbox": [
495
+ 81,
496
+ 695,
497
+ 488,
498
+ 800
499
+ ],
500
+ "page_idx": 2
501
+ },
502
+ {
503
+ "type": "text",
504
+ "text": "2) Improving Learning via Data Augmentation:",
505
+ "text_level": 1,
506
+ "bbox": [
507
+ 98,
508
+ 820,
509
+ 424,
510
+ 834
511
+ ],
512
+ "page_idx": 2
513
+ },
514
+ {
515
+ "type": "text",
516
+ "text": "We improve learning of $G_{\\theta}$ and $D_{\\phi}$ by adopting the Data Augmentation Optimized for GAN (DAG) framework [20] in which a data augmentation transformation $\\mathcal{T}_k$ (Fig. 2b) is applied using multiple discriminator heads $\\{D_k\\}$ . To further reduce memory usage, we share all layers of $D_{\\phi}$ except the last layers corresponding to each head (Fig.",
517
+ "bbox": [
518
+ 81,
519
+ 835,
520
+ 490,
521
+ 928
522
+ ],
523
+ "page_idx": 2
524
+ },
525
+ {
526
+ "type": "image",
527
+ "img_path": "images/f10282d655e5831e9fbb817a741ca909fbe4c13c66eb839fc02d059db8787566.jpg",
528
+ "image_caption": [
529
+ "Fig. 2. An overview of our discriminator with self-supervised learning and DAG."
530
+ ],
531
+ "image_footnote": [],
532
+ "bbox": [
533
+ 511,
534
+ 61,
535
+ 911,
536
+ 250
537
+ ],
538
+ "page_idx": 2
539
+ },
540
+ {
541
+ "type": "text",
542
+ "text": "2c). Because applying differentiable and invertible data augmentation transformations $\\mathcal{T}_k$ has the Jenssen-Shannon (JS) preserving property [20]:",
543
+ "bbox": [
544
+ 504,
545
+ 311,
546
+ 911,
547
+ 357
548
+ ],
549
+ "page_idx": 2
550
+ },
551
+ {
552
+ "type": "equation",
553
+ "text": "\n$$\n\\mathrm {J S} \\left(p _ {d} ^ {\\mathcal {T} _ {k}} \\| p _ {g} ^ {\\mathcal {T} _ {k}}\\right) = \\mathrm {J S} \\left(p _ {d} \\| p _ {g}\\right) \\tag {7}\n$$\n",
554
+ "text_format": "latex",
555
+ "bbox": [
556
+ 609,
557
+ 364,
558
+ 911,
559
+ 386
560
+ ],
561
+ "page_idx": 2
562
+ },
563
+ {
564
+ "type": "text",
565
+ "text": "where $p_d^{\\mathcal{T}_k}$ is the transformed training data distribution and $p_g^{\\mathcal{T}_k}$ the transformed distribution captured by $G_{\\theta}$ . By using a total of four transformations combining flipping and rotation, we encourage optimization to the original data distribution, which also brings the most performance boost. These choices allow our model to benefit from not only $\\mathrm{JS}(p_d \\parallel p_g)$ but also $\\mathrm{JS}(p_d^{\\mathcal{T}_k} \\parallel p_g^{\\mathcal{T}_k})$ , thereby improving the learning of $G_{\\theta}$ and generalization of $D_{\\phi}$ . Furthermore, using multiple discriminators with weight-sharing provides learning regularization of $D_{\\phi}$ .",
566
+ "bbox": [
567
+ 504,
568
+ 393,
569
+ 913,
570
+ 546
571
+ ],
572
+ "page_idx": 2
573
+ },
574
+ {
575
+ "type": "text",
576
+ "text": "Replacing GRAF's logistic objective with a hinge loss, we then define our overall loss as below:",
577
+ "bbox": [
578
+ 504,
579
+ 547,
580
+ 911,
581
+ 575
582
+ ],
583
+ "page_idx": 2
584
+ },
585
+ {
586
+ "type": "equation",
587
+ "text": "\n$$\n\\mathcal {L} (\\theta , \\left\\{\\phi_ {k} \\right\\}) = \\mathcal {L} (\\theta , \\phi_ {0}) + \\frac {\\lambda}{n - 1} \\sum_ {k = 1} ^ {n} \\mathcal {L} (\\theta , \\phi_ {k}) \\tag {8}\n$$\n",
588
+ "text_format": "latex",
589
+ "bbox": [
590
+ 557,
591
+ 582,
592
+ 911,
593
+ 622
594
+ ],
595
+ "page_idx": 2
596
+ },
597
+ {
598
+ "type": "equation",
599
+ "text": "\n$$\n\\begin{array}{l} \\mathcal {L} (\\theta , \\phi_ {k}) = \\\\ \\mathbb {E} _ {\\boldsymbol {z} _ {s} \\sim p _ {s}, \\boldsymbol {z} _ {a} \\sim p _ {a}, \\boldsymbol {\\xi} \\sim p _ {\\xi}, \\boldsymbol {\\nu} \\sim p _ {\\nu}} [ f (D _ {\\phi} (G _ {\\theta} (\\boldsymbol {z} _ {s}, \\boldsymbol {z} _ {a}, \\boldsymbol {\\xi}, \\boldsymbol {\\nu}))) ] \\tag {9} \\\\ + \\mathbb {E} _ {\\boldsymbol {I} \\sim p _ {D}, \\boldsymbol {\\nu} \\sim p _ {\\nu}} [ f (- D _ {\\phi} (\\boldsymbol {I}, \\boldsymbol {\\nu})) ] + \\mathcal {L} _ {\\mathrm {r}} \\\\ \\end{array}\n$$\n",
600
+ "text_format": "latex",
601
+ "bbox": [
602
+ 521,
603
+ 633,
604
+ 911,
605
+ 688
606
+ ],
607
+ "page_idx": 2
608
+ },
609
+ {
610
+ "type": "text",
611
+ "text": "where $f(u) = \\max (0,1 + u)$ . We optimize this loss with $n = 4$ , where $k = 0$ corresponds to the identity transformation and $\\lambda = 0.2$ (as in [20]).",
612
+ "bbox": [
613
+ 504,
614
+ 695,
615
+ 911,
616
+ 742
617
+ ],
618
+ "page_idx": 2
619
+ },
620
+ {
621
+ "type": "text",
622
+ "text": "3) Volumetric Rendering from a Single View $X$ -ray:",
623
+ "text_level": 1,
624
+ "bbox": [
625
+ 522,
626
+ 760,
627
+ 874,
628
+ 773
629
+ ],
630
+ "page_idx": 2
631
+ },
632
+ {
633
+ "type": "text",
634
+ "text": "After training a model, we reconstruct the complete X-ray projections within a full vertical rotation of a medical instance given a single view X-ray. We follow the relaxed reconstruction formulation in [23], which fits the generator to a single image. Then, we allow the parameters of the generator $G_{\\theta}$ to be slightly fine-tuned along with the shape and appearance latent vectors $\\mathbf{z}_s$ and $\\mathbf{z}_a$ . The distortion and perception tradeoff is well known in GAN methods [24] and therefore we modify our generation objective by adding the distortion Mean Square Error (MSE) loss, which incentivises",
635
+ "bbox": [
636
+ 504,
637
+ 775,
638
+ 913,
639
+ 926
640
+ ],
641
+ "page_idx": 2
642
+ },
643
+ {
644
+ "type": "image",
645
+ "img_path": "images/8b5cb166172786a1654207b8a8cd716372a53cb7b3c2205dcd4806d14edbb52f.jpg",
646
+ "image_caption": [
647
+ "Fig. 3. Knee renderings from continuous viewpoint rotations showing tissue and bone. Given a single-view X-ray from a CT, we can generate the complete set of CT-projections within a full vertical rotation by slightly fine-tuning a pretrained model along with the shape and appearance latent codes."
648
+ ],
649
+ "image_footnote": [],
650
+ "bbox": [
651
+ 84,
652
+ 60,
653
+ 915,
654
+ 292
655
+ ],
656
+ "page_idx": 3
657
+ },
658
+ {
659
+ "type": "table",
660
+ "img_path": "images/124cb157c9a46cc14bf06eb9550b312af3dc95cccb5ab21372da436e6f3d9d73.jpg",
661
+ "table_caption": [
662
+ "TABLE I. Quantitative results based on PSNR and SSIM of rendered X-ray projections with single-view X-ray input."
663
+ ],
664
+ "table_footnote": [],
665
+ "table_body": "<table><tr><td>Dataset</td><td>↑ PSNR (dB) (μ ± σ)</td><td>↑ SSIM (μ ± σ)</td></tr><tr><td>Knee</td><td>30.17 ± 1.93</td><td>0.670 ± 0.040</td></tr><tr><td>Chest</td><td>28.54 ± 0.79</td><td>0.462 ± 0.082</td></tr></table>",
666
+ "bbox": [
667
+ 130,
668
+ 390,
669
+ 442,
670
+ 429
671
+ ],
672
+ "page_idx": 3
673
+ },
674
+ {
675
+ "type": "text",
676
+ "text": "a balance between blurriness and accuracy:",
677
+ "bbox": [
678
+ 83,
679
+ 455,
680
+ 377,
681
+ 470
682
+ ],
683
+ "page_idx": 3
684
+ },
685
+ {
686
+ "type": "equation",
687
+ "text": "\n$$\n\\mathcal {L} _ {\\text {g e n}} = \\lambda_ {1} \\mathcal {L} _ {\\mathrm {r}} (V G G 1 6) + \\lambda_ {2} \\mathcal {L} _ {\\mathrm {M S E}} (G) + \\lambda_ {3} \\mathcal {L} _ {\\mathrm {N L L L}} \\left(z _ {s}, z _ {a}\\right) \\tag {10}\n$$\n",
688
+ "text_format": "latex",
689
+ "bbox": [
690
+ 94,
691
+ 479,
692
+ 486,
693
+ 508
694
+ ],
695
+ "page_idx": 3
696
+ },
697
+ {
698
+ "type": "text",
699
+ "text": "where NLLL corresponds to the negative log-likelihood loss and the tuned hyperparameters $lr = 0.0005$ , $\\beta_{1} = 0$ , $\\beta_{2} = 0.999$ , $\\lambda_{1} = 0.3$ , $\\lambda_{2} = 0.1$ and $\\lambda_{3} = 0.3$ .",
700
+ "bbox": [
701
+ 83,
702
+ 511,
703
+ 488,
704
+ 555
705
+ ],
706
+ "page_idx": 3
707
+ },
708
+ {
709
+ "type": "text",
710
+ "text": "Once the model locates an optimal combination of $\\mathbf{z}_s$ and $\\mathbf{z}_a$ , we replicate them and use them to render the rest of the X-ray projections by continuously controlling the angle viewpoint.",
711
+ "bbox": [
712
+ 83,
713
+ 555,
714
+ 488,
715
+ 616
716
+ ],
717
+ "page_idx": 3
718
+ },
719
+ {
720
+ "type": "text",
721
+ "text": "III. RESULTS",
722
+ "text_level": 1,
723
+ "bbox": [
724
+ 230,
725
+ 625,
726
+ 341,
727
+ 638
728
+ ],
729
+ "page_idx": 3
730
+ },
731
+ {
732
+ "type": "text",
733
+ "text": "Here we provide an evaluation of MedNeRF on our datasets. We compare our model's results to the ground truth, two baselines, perform an ablation study, and show qualitative and quantitative evaluations. We train all models for 100,000 iterations with a batch size of 8. Projection parameters $(u,v)$ are chosen to evenly sample points on the surface of a sphere, specifically a slight horizontal elevation of 70-85 degrees and $u_{\\mathrm{min}} = 0$ , $u_{\\mathrm{max}} = 1$ for a full 360-degree vertical rotation. However, we only provide a fifth of the views (72-views each at five degrees) during training and let the model render the rest.",
734
+ "bbox": [
735
+ 81,
736
+ 642,
737
+ 488,
738
+ 808
739
+ ],
740
+ "page_idx": 3
741
+ },
742
+ {
743
+ "type": "text",
744
+ "text": "A. Reconstruction from Single View X-ray",
745
+ "text_level": 1,
746
+ "bbox": [
747
+ 81,
748
+ 816,
749
+ 372,
750
+ 832
751
+ ],
752
+ "page_idx": 3
753
+ },
754
+ {
755
+ "type": "text",
756
+ "text": "We evaluate our model's representation for 3D-aware DRR synthesis given a single-view X-ray as input. We find that despite the implicit linear network's limited capacity, our model can disentangle 3D anatomy identity and attenuation response of different medical instances, which are retrieved through the described reconstruction reformulation in II-C.3.",
757
+ "bbox": [
758
+ 81,
759
+ 835,
760
+ 488,
761
+ 926
762
+ ],
763
+ "page_idx": 3
764
+ },
765
+ {
766
+ "type": "image",
767
+ "img_path": "images/55ddd5b41f7a73cb740f008bc96f1ade6fc9bdae224187130a82cd7e2ea564d3.jpg",
768
+ "image_caption": [
769
+ "Fig. 4. Volumetric maps and attenuation renderings on our dataset."
770
+ ],
771
+ "image_footnote": [],
772
+ "bbox": [
773
+ 509,
774
+ 349,
775
+ 910,
776
+ 623
777
+ ],
778
+ "page_idx": 3
779
+ },
780
+ {
781
+ "type": "text",
782
+ "text": "Our model can also facilitate distinguishing bone from tissue via a contrast transformation, as it renders a brighter pixel value for denser structures (e.g. bone) (Fig. 3).",
783
+ "bbox": [
784
+ 504,
785
+ 672,
786
+ 911,
787
+ 718
788
+ ],
789
+ "page_idx": 3
790
+ },
791
+ {
792
+ "type": "text",
793
+ "text": "Table I summarises our results based on the peak signal-to-noise ratio (PSNR) and structural similarity (SSIM), which measure the quality of reconstructed signals and human subjective similarity, respectively. We find that our generative loss can achieve a reasonable perception-distortion curve in renderings and show consistency with the location and volumetric depth of anatomical structures at continuous viewpoints compared to the ground truth.",
794
+ "bbox": [
795
+ 504,
796
+ 719,
797
+ 913,
798
+ 839
799
+ ],
800
+ "page_idx": 3
801
+ },
802
+ {
803
+ "type": "text",
804
+ "text": "B. 2D DRR Rendering",
805
+ "text_level": 1,
806
+ "bbox": [
807
+ 506,
808
+ 847,
809
+ 666,
810
+ 863
811
+ ],
812
+ "page_idx": 3
813
+ },
814
+ {
815
+ "type": "text",
816
+ "text": "We evaluate our model on the task of 2D rendering and compare it to pixelNeRF [19], and GRAF [14] baseline, wherein the original architecture is used. Our model can more accurately estimate volumetric depth compared to",
817
+ "bbox": [
818
+ 504,
819
+ 866,
820
+ 911,
821
+ 926
822
+ ],
823
+ "page_idx": 3
824
+ },
825
+ {
826
+ "type": "table",
827
+ "img_path": "images/98c9784d4f5fe1eb9f486d940837a6272539806f849616761781f228d1458076.jpg",
828
+ "table_caption": [
829
+ "TABLE II. FID and KID analysis comparing other methods."
830
+ ],
831
+ "table_footnote": [],
832
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"2\">Chest dataset</td><td colspan=\"2\">Knee dataset</td></tr><tr><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td></tr><tr><td>GRAF [14]</td><td>68.25 ± 0.954</td><td>0.053 ± 0.0008</td><td>76.70 ± 0.302</td><td>0.058 ± 0.0001</td></tr><tr><td>pixelNeRF [19]</td><td>112.96 ± 2.356</td><td>0.084 ± 0.0012</td><td>166.40 ± 2.153</td><td>0.158 ± 0.0010</td></tr><tr><td>Ours</td><td>60.26 ± 0.322</td><td>0.041 ± 0.0005</td><td>76.12 ± 0.193</td><td>0.052 ± 0.0004</td></tr></table>",
833
+ "bbox": [
834
+ 84,
835
+ 84,
836
+ 488,
837
+ 130
838
+ ],
839
+ "page_idx": 4
840
+ },
841
+ {
842
+ "type": "table",
843
+ "img_path": "images/3a93c83cb9fa223408637572f4628e1126c818a54a5043c902887d34bc6413e6.jpg",
844
+ "table_caption": [
845
+ "TABLE III. FID and KID analysis of ablations of our model."
846
+ ],
847
+ "table_footnote": [],
848
+ "table_body": "<table><tr><td rowspan=\"2\">Ablation</td><td colspan=\"2\">Chest dataset</td></tr><tr><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td></tr><tr><td>MedNeRF - 3 SD, logistic loss, classic DA</td><td>84.85 ± 1.025</td><td>0.069 ± 0.0031</td></tr><tr><td>MedNeRF - 2 SD, logistic loss, classic DA</td><td>67.73 ± 0.712</td><td>0.051 ± 0.0006</td></tr><tr><td>MedNeRF - 2 SD, hinge loss, classic DA</td><td>65.34 ± 0.353</td><td>0.045 ± 0.0004</td></tr><tr><td>MedNeRF - 2 SD, hinge loss, DAG</td><td>60.26 ± 0.322</td><td>0.041 ± 0.0005</td></tr></table>",
849
+ "bbox": [
850
+ 84,
851
+ 167,
852
+ 488,
853
+ 229
854
+ ],
855
+ "page_idx": 4
856
+ },
857
+ {
858
+ "type": "text",
859
+ "text": "GRAF and pixelNeRF (Fig. 4). For each category, we find an unseen target instance with a similar view direction and shape. Volumetric depth estimation is given by bright colors (far) and dark colors (near). Lacking a perceptual loss, GRAF is not incentivized to produce high-frequency textures. In contrast, we find our model renders a more detailed internal structure with varied attenuation. GRAF produces a consistent attenuation response, but seems to be unable to distinguish the anatomical shape from the background. Our self-supervised discriminator enables the generator to disentangle shape and background by rendering a brighter color for the background and a darker color for the shape, while GRAF renders a bright or dark color for both.",
860
+ "bbox": [
861
+ 81,
862
+ 258,
863
+ 488,
864
+ 467
865
+ ],
866
+ "page_idx": 4
867
+ },
868
+ {
869
+ "type": "text",
870
+ "text": "We find pixelNeRF produces blurred attenuation renderings for all datasets, and volumetric maps tend to exhibit strong color shifts (Fig. 4). We believe these artifacts are due to the see-through nature of the dataset, compared to solid-like natural objects on which NeRFs are trained. This data characteristic impairs not only volumetric maps but also fine anatomical structures. In contrast, our model is better able to render both volumetric depth and attenuation response. We also find pixelNeRF is sensitive to slight changes in projection parameters, hampering optimization for the knee category. Our model produces a consistent 3D geometry and does not rely on explicit projection matrices.",
871
+ "bbox": [
872
+ 81,
873
+ 470,
874
+ 488,
875
+ 650
876
+ ],
877
+ "page_idx": 4
878
+ },
879
+ {
880
+ "type": "text",
881
+ "text": "Table II compares image quality based on Frechet Inception Distance (FID) and Kernel Inception Distance (KID) metrics, in which lower values mean better. Optimizing pixelNeRF on our datasets leads to particularly poor results that are unable to compete with the GRAF baseline and our model. In contrast, our model outperforms the baselines on FID and KID metrics for all datasets.",
882
+ "bbox": [
883
+ 81,
884
+ 651,
885
+ 488,
886
+ 755
887
+ ],
888
+ "page_idx": 4
889
+ },
890
+ {
891
+ "type": "text",
892
+ "text": "C. Ablation Study",
893
+ "text_level": 1,
894
+ "bbox": [
895
+ 83,
896
+ 768,
897
+ 210,
898
+ 784
899
+ ],
900
+ "page_idx": 4
901
+ },
902
+ {
903
+ "type": "text",
904
+ "text": "We evaluate our model with three ablations (Table III): wherein an additional simple decoder (SD) is included; the adversarial logistic loss is replaced by its hinge version; and wherein the non-classical DAG approach is adopted. We find that the DAG approach brings the most performance boost compared to naively applying classical DA, while the use of a hinge loss performs slightly better than its logistic version. However, an additional decoder in our self-supervised discriminator can lead to a significant drop in",
905
+ "bbox": [
906
+ 81,
907
+ 790,
908
+ 490,
909
+ 926
910
+ ],
911
+ "page_idx": 4
912
+ },
913
+ {
914
+ "type": "text",
915
+ "text": "performance.",
916
+ "bbox": [
917
+ 506,
918
+ 66,
919
+ 598,
920
+ 80
921
+ ],
922
+ "page_idx": 4
923
+ },
924
+ {
925
+ "type": "text",
926
+ "text": "IV. CONCLUSION",
927
+ "text_level": 1,
928
+ "bbox": [
929
+ 635,
930
+ 92,
931
+ 784,
932
+ 106
933
+ ],
934
+ "page_idx": 4
935
+ },
936
+ {
937
+ "type": "text",
938
+ "text": "We have presented a novel Deep Learning architecture based on Neural Radiance Fields for learning a continuous representation of CT scans. We learn a medical category encoding of the attenuation response of a set of 2D DRRs in the weights of a generator. Furthermore, we have found that a stronger and more comprehensive signal from our discriminator allows generative radiance fields to model 3D-aware CT-projections. Experimental evaluation demonstrates significant qualitative and quantitative reconstructions and improvements over other Neural Radiance Field approaches. Whilst the proposed model may not replace CT entirely, the functionality of generating 3D-aware CT-projections from X-rays has great potential for clinical use in osseous trauma, skeletal evaluation in dysplasia and for orthopaedic presurgical planning. This could cut down on the radiation dose given to patients, with significant economic implications such as bringing down the cost of investigations.",
939
+ "bbox": [
940
+ 504,
941
+ 114,
942
+ 913,
943
+ 372
944
+ ],
945
+ "page_idx": 4
946
+ },
947
+ {
948
+ "type": "text",
949
+ "text": "ACKNOWLEDGMENT",
950
+ "text_level": 1,
951
+ "bbox": [
952
+ 620,
953
+ 383,
954
+ 799,
955
+ 396
956
+ ],
957
+ "page_idx": 4
958
+ },
959
+ {
960
+ "type": "text",
961
+ "text": "This work is partially supported by the Mexican Council of Science and Technology (CONACyT).",
962
+ "bbox": [
963
+ 504,
964
+ 404,
965
+ 911,
966
+ 434
967
+ ],
968
+ "page_idx": 4
969
+ },
970
+ {
971
+ "type": "text",
972
+ "text": "REFERENCES",
973
+ "text_level": 1,
974
+ "bbox": [
975
+ 661,
976
+ 446,
977
+ 758,
978
+ 459
979
+ ],
980
+ "page_idx": 4
981
+ },
982
+ {
983
+ "type": "list",
984
+ "sub_type": "ref_text",
985
+ "list_items": [
986
+ "[1] Paul Suetens, Visualization for diagnosis and therapy, p. 190-218, Cambridge University Press, 2 edition, 2009.",
987
+ "[2] Pechin Lo, Bram van Ginneken, Joseph M. Reinhardt, Tarunashree Yavarna, Pim A. de Jong, Benjamin Irving, Catalin Fetita, Margarete Ortner, Rómulo Pinho, Jan Sijbers, Marco Feuerstein, Anna Fabijanska, Christian Bauer, Reinhard Beichel, Carlos S. Mendoza, Rafael Wiemker, Jaesung Lee, Anthony P. Reeves, Silvia Born, Oliver Weinheimer, Eva M. van Rikxooft, Juerg Tschirren, Ken Mori, Benjamin Odry, David P. Naidich, Ieneke Hartmann, Eric A. Hoffman, Mathias Prokop, Jesper H. Pedersen, and Marleen de Bruijne, \"Extraction of airways from ct (exact'09),\" IEEE Transactions on Medical Imaging, vol. 31, no. 11, pp. 2093-2107, 2012.",
988
+ "[3] Mary Coffey and Aude Vaandering, \"Patient setup for pet/ct acquisition in radiotherapy planning,\" Radiotherapy and Oncology, vol. 96, no. 3, pp. 298-301, 2010, PET in Radiotherapy Planning.",
989
+ "[4] Tri Huynh, Yaozong Gao, Jiayin Kang, Li Wang, Pei Zhang, Jun Lian, and Dinggang Shen, \"Estimating ct image from mri data using structured random forest and auto-context model,\" IEEE Transactions on Medical Imaging, vol. 35, no. 1, pp. 174-183, 2016.",
990
+ "[5] Shoulie Xie, Weimin Huang, Tao Yang, Dajun Wu, and Huiying Liu, \"Compressed sensing based image reconstruction with projection recovery for limited angle cone-beam ct imaging,\" in 2020 42nd Annual International Conference of the IEEE Engineering in Medicine Biology Society (EMBC), 2020, pp. 1307-1310.",
991
+ "[6] Ge Wang, Jong Chu Ye, Klaus Mueller, and Jeffrey A. Fessler, \"Image reconstruction is a new frontier of machine learning,\" IEEE Transactions on Medical Imaging, vol. 37, no. 6, pp. 1289-1296, 2018.",
992
+ "[7] Yinsheng Li, Ke Li, Chengzhu Zhang, Juan Montoya, and Guang-Hong Chen, \"Learning to reconstruct computed tomography images directly from sinogram data under a variety of data acquisition conditions,\" IEEE Transactions on Medical Imaging, vol. 38, no. 10, pp. 2469-2481, 2019.",
993
+ "[8] David B. Lindell, Julien N. P. Martel, and Gordon Wetzstein, \"Autoint: Automatic integration for fast neural volume rendering,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2021, pp. 14556-14565.",
994
+ "[9] Yu Sun, Jiaming Liu, Mingyang Xie, Brendt Wohlberg, and Ulugbek S. Kamilov, \"Coil: Coordinate-based internal learning for tomographic imaging,\" IEEE Transactions on Computational Imaging, vol. 7, pp. 1400-1412, 2021."
995
+ ],
996
+ "bbox": [
997
+ 514,
998
+ 469,
999
+ 913,
1000
+ 925
1001
+ ],
1002
+ "page_idx": 4
1003
+ },
1004
+ {
1005
+ "type": "list",
1006
+ "sub_type": "ref_text",
1007
+ "list_items": [
1008
+ "[10] Xingde Ying, Heng Guo, Kai Ma, Jian Wu, Zhengxin Weng, and Yefeng Zheng, “X2ct-gan: Reconstructing ct from biplanar x-rays with generative adversarial networks,” in 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 10611-10620.",
1009
+ "[11] Bao Nguyen, Adam Feldman, Sarath Bethapudi, Andrew Jennings, and Chris G. Willcocks, \"Unsupervised region-based anomaly detection in brain mri with adversarial image inpainting,\" in 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), 2021, pp. 1127-1131.",
1010
+ "[12] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng, “Nerf: Representing scenes as neural radiance fields for view synthesis,” in Computer Vision – ECCV 2020, Andreae Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, Eds., Cham, 2020, pp. 405–421, Springer International Publishing.",
1011
+ "[13] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth, \"NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections,\" in CVPR, 2021.",
1012
+ "[14] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger, \"Graf: Generative radiance fields for 3d-aware image synthesis,\" in Advances in Neural Information Processing Systems (NeurIPS), 2020.",
1013
+ "[15] E B Tsai, S Lungren, M P Hershman, M Roshkovan, L Colak, E Erickson, B J Shih, G Stein, A Kalpathy-Cramer, J Shen, J Hafez, M A F John, S Rajiah, P Pogatchnik, B P Mongan, J T Altinmakas, E Ranschaert, E R Kitamura, and C C. Wu, “Medical imaging data resource center (midrc) - rsna international pandemic open research database (ricord) release 1b - chest ct pandemic-[dataset],” The Cancer Imaging Archive, 2021.",
1014
+ "[16] K Clark, B Vendt, K Smith, J Freymann, J Kirby, P Koppel, S Moore, S Phillips, D Maffitt, M Pringle, L Tarbox, and F Prior, \"The cancer imaging archive (tcia): Maintaining and operating a public information repository,\" Journal of Digital Imaging, vol. 26, no. 6, pp. 1045-1057, December 2013.",
1015
+ "[17] Michael D Harris, Adam J Cyr, Azhar A Ali, Clare K Fitzpatrick, Paul J Rullkoetter, Lorin P Maletsky, and Kevin B Shelburne, “A"
1016
+ ],
1017
+ "bbox": [
1018
+ 84,
1019
+ 66,
1020
+ 488,
1021
+ 496
1022
+ ],
1023
+ "page_idx": 5
1024
+ },
1025
+ {
1026
+ "type": "list",
1027
+ "sub_type": "ref_text",
1028
+ "list_items": [
1029
+ "combined experimental and computational approach to subject-specific analysis of knee joint laxity,\" Journal of biomechanical engineering, vol. 138, no. 8, August 2016.",
1030
+ "[18] Azhar A Ali, Sami S Shalhoub, Adam J Cyr, Clare K Fitzpatrick, Lorin P Maletsky, Paul J Rullkoetter, and Kevin B Shelburne, \"Validation of predicted patellofemoral mechanics in a finite element model of the healthy and cruciate-deficient knee,\" Journal of biomechanics, vol. 49, no. 2, pp. 302-309, January 2016.",
1031
+ "[19] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa, “pixelNeRF: Neural radiance fields from one or few images,” in CVPR, 2021.",
1032
+ "[20] Ngoc-Trung Tran, Viet-Hung Tran, Ngoc-Bao Nguyen, Trung-Kien Nguyen, and Ngai-Man Cheung, “On data augmentation for gan training,” IEEE Transactions on Image Processing, vol. 30, pp. 1882–1897, 2021.",
1033
+ "[21] Bingchen Liu, Yizhe Zhu, Kunpeng Song, and Ahmed Elgammal, \"Towards faster and stabilized $\\{\\mathrm{gan}\\}$ training for high-fidelity few-shot image synthesis,\" in International Conference on Learning Representations, 2021.",
1034
+ "[22] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang, \"The unreasonable effectiveness of deep features as a perceptual metric,\" in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018.",
1035
+ "[23] Xingang Pan, Xiaohang Zhan, Bo Dai, Dahua Lin, Chen Change Loy, and Ping Luo, \"Exploiting deep generative prior for versatile image restoration and manipulation,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, pp. 1-1, 2021.",
1036
+ "[24] Yochai Blau and Tomer Michaeli, \"The perception-distortion tradeoff,\" in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018."
1037
+ ],
1038
+ "bbox": [
1039
+ 509,
1040
+ 66,
1041
+ 911,
1042
+ 409
1043
+ ],
1044
+ "page_idx": 5
1045
+ }
1046
+ ]
2202.01xxx/2202.01020/3f9e10da-85b7-4cf2-a6b2-fc1e487efd0e_model.json ADDED
@@ -0,0 +1,1334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.268,
8
+ 0.061,
9
+ 0.717
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2202.01020v3 [eess.IV] 8 Apr 2022"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.145,
18
+ 0.089,
19
+ 0.852,
20
+ 0.139
21
+ ],
22
+ "angle": 0,
23
+ "content": "MedNeRF: Medical Neural Radiance Fields for Reconstructing 3D-aware CT-Projections from a Single X-ray"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.26,
29
+ 0.158,
30
+ 0.737,
31
+ 0.193
32
+ ],
33
+ "angle": 0,
34
+ "content": "Abril Corona-Figueroa<sup>1</sup>, Jonathan Frawley<sup>1</sup>, Sam Bond-Taylor<sup>1</sup>, Sarath Bethapudi<sup>2</sup>, Hubert P. H. Shum<sup>1</sup>, Chris G. Willcocks<sup>1</sup>"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.082,
40
+ 0.218,
41
+ 0.49,
42
+ 0.47
43
+ ],
44
+ "angle": 0,
45
+ "content": "Abstract—Computed tomography (CT) is an effective medical imaging modality, widely used in the field of clinical medicine for the diagnosis of various pathologies. Advances in Multidetector CT imaging technology have enabled additional functionalities, including generation of thin slice multiplanar cross-sectional body imaging and 3D reconstructions. However, this involves patients being exposed to a considerable dose of ionising radiation. Excessive ionising radiation can lead to deterministic and harmful effects on the body. This paper proposes a Deep Learning model that learns to reconstruct CT projections from a few or even a single-view X-ray. This is based on a novel architecture that builds from neural radiance fields, which learns a continuous representation of CT scans by disentangling the shape and volumetric depth of surface and internal anatomical structures from 2D images. Our model is trained on chest and knee datasets, and we demonstrate qualitative and quantitative high-fidelity renderings and compare our approach to other recent radiance field-based methods. Our code and link to our datasets are available at https://github.com/abrilcf/mednerf"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.082,
51
+ 0.47,
52
+ 0.49,
53
+ 0.523
54
+ ],
55
+ "angle": 0,
56
+ "content": "Clinical relevance—Our model is able to infer the anatomical 3D structure from a few or a single-view X-ray, showing future potential for reduced ionising radiation exposure during the imaging process."
57
+ },
58
+ {
59
+ "type": "title",
60
+ "bbox": [
61
+ 0.208,
62
+ 0.533,
63
+ 0.365,
64
+ 0.546
65
+ ],
66
+ "angle": 0,
67
+ "content": "I. INTRODUCTION"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.082,
73
+ 0.553,
74
+ 0.49,
75
+ 0.779
76
+ ],
77
+ "angle": 0,
78
+ "content": "3D medical imaging often involves joining multiple 2D slices from CT or Magnetic Resonance Imaging (MRI), and part of their workflow consists of specifying values for the position of the patient, the imaging source, and the detector. The quality and accuracy of a CT 3D representation require hundreds of X-ray projections with a thin slice thickness [1]. Moreover, this process exposes patients to more ionising radiation than typical X-rays and requires the patient to remain immobile for up to more than 1 hour, depending on the type of test [2]. Continuous 3D representations would give radiologists optics of every point in the internal anatomy captured. While such representations are useful, there are practical challenges in CT due to the increased radiation exposure, angle-dependent structures, and time consumption [3]."
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.082,
84
+ 0.78,
85
+ 0.49,
86
+ 0.856
87
+ ],
88
+ "angle": 0,
89
+ "content": "Earlier approaches in medical image reconstruction used analytic and iterative methods [4], [5] on given input data. However, they often encounter mismatches between the mathematical model and physical properties of the imaging system. Instead, several recent approaches leverage deep"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.505,
95
+ 0.217,
96
+ 0.914,
97
+ 0.352
98
+ ],
99
+ "angle": 0,
100
+ "content": "learning [6] for sparse view reconstruction [7], [8], [9], 3D CT reconstruction from 2D images [10], and anomaly detection [11]. These deep learning approaches solved the mismatches between the mathematical model and imaging system and reported improved reconstructions by fine-tuning state-of-the-art architectures. However, they require a large amount of training data, which may be difficult to meet in the medical domain where acquiring expert annotations is both cost and time prohibitive."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.505,
106
+ 0.353,
107
+ 0.913,
108
+ 0.639
109
+ ],
110
+ "angle": 0,
111
+ "content": "The Neural Radiance Fields (NeRF) [12] model is a recent reformulation for estimating a 3D volumetric representation from images. Such representations encode the radiance field and density of the scene in the parameters of a neural network. The neural network learns to synthesize new views via volume rendering from point samples along cast rays. However, these representations are often captured in controlled settings [13]. First, the scene is taken by a set of fixed cameras within a short time frame. Second, all content in the scene is static and real images often need masking. These constraints prohibit the direct application of NeRF to the medical domain, where the imaging system greatly differs from conventional cameras, and the images are captured over a long time frame hampering the patient's stillness. Moreover, the overlapping of anatomical structures in medical images hinders the definition of edges which cannot be easily solved with masking. These aspects explain why the NeRF approach especially shows successes for \"natural images\"."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.505,
117
+ 0.64,
118
+ 0.914,
119
+ 0.804
120
+ ],
121
+ "angle": 0,
122
+ "content": "To address these challenges, we propose MedNeRF, a model that adapts Generative Radiance Fields (GRAF) [14] in the medical domain to render CT projections given a few or even a single-view X-ray. Our approach not only synthesizes realistic images, but also captures the data manifold and provides a continuous representation of how the attenuation and volumetric depth of anatomical structures vary with the viewpoint without 3D supervision. This is achieved via a new discriminator architecture that provides a stronger and more comprehensive signal to GRAF when dealing with CT scans."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.505,
128
+ 0.806,
129
+ 0.914,
130
+ 0.927
131
+ ],
132
+ "angle": 0,
133
+ "content": "Closest to our goal are [8], [9], which both train a coordinate-based network in sinograms of low-dose CT of phantom objects and apply it to the sparse-view tomography reconstruction problem. In contrast to [8], we learn multiple representations in a single model by randomly feeding data of different medical instances instead of separately optimizing for each collection of images. For testing [9] reconstruction ability, they integrate it into reconstruction methods and use"
134
+ },
135
+ {
136
+ "type": "page_footnote",
137
+ "bbox": [
138
+ 0.082,
139
+ 0.867,
140
+ 0.49,
141
+ 0.902
142
+ ],
143
+ "angle": 0,
144
+ "content": "<sup>1</sup>Corona-Figueroa, Frawley, Bond-Taylor, Shum and Willcocks are with the Computer Science Department, Durham University, Durham, DH1 3LE, UK abril.corona-figueroa@durham.ac.uk"
145
+ },
146
+ {
147
+ "type": "page_footnote",
148
+ "bbox": [
149
+ 0.082,
150
+ 0.902,
151
+ 0.49,
152
+ 0.925
153
+ ],
154
+ "angle": 0,
155
+ "content": "\\(^{2}\\)Bethapudi is with the County Durham and Darlington NHS Foundation Trust, Durham, DL3 6HX, UK"
156
+ },
157
+ {
158
+ "type": "list",
159
+ "bbox": [
160
+ 0.082,
161
+ 0.867,
162
+ 0.49,
163
+ 0.925
164
+ ],
165
+ "angle": 0,
166
+ "content": null
167
+ }
168
+ ],
169
+ [
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.082,
174
+ 0.066,
175
+ 0.488,
176
+ 0.111
177
+ ],
178
+ "angle": 0,
179
+ "content": "at least 60 views. Different from their methods, we do not rely on additional reconstruction algorithms, and we only require multiple views during training."
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.082,
185
+ 0.112,
186
+ 0.488,
187
+ 0.232
188
+ ],
189
+ "angle": 0,
190
+ "content": "We render CT projections of our two datasets of digitally reconstructed radiographs (DRR) from chest and knee. We qualitative and quantitative demonstrate high-fidelity renderings and compare our approach to other recent radiance field-based methods. Furthermore, we render CT projections of a medical instance given a single-view X-ray and show the effectiveness of our model to cover surface and internal structures."
191
+ },
192
+ {
193
+ "type": "title",
194
+ "bbox": [
195
+ 0.229,
196
+ 0.241,
197
+ 0.344,
198
+ 0.255
199
+ ],
200
+ "angle": 0,
201
+ "content": "II. METHODS"
202
+ },
203
+ {
204
+ "type": "title",
205
+ "bbox": [
206
+ 0.082,
207
+ 0.261,
208
+ 0.248,
209
+ 0.276
210
+ ],
211
+ "angle": 0,
212
+ "content": "A. Dataset Preparation"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.082,
218
+ 0.28,
219
+ 0.49,
220
+ 0.537
221
+ ],
222
+ "angle": 0,
223
+ "content": "To train our models, we generate DRRs instead of collecting paired X-rays and corresponding CT reconstructions, which would expose patients to more radiation. Furthermore, DRR generation removes patient data and enables control in capture ranges and resolutions. We generated DRRs by using 20 CT chest scans from [15], [16] and five CT knee scans from [17], [18]. These scans cover a diverse group of patients at different contrast types showing both normal and abnormal anatomy. The radiation source and imaging panel are assumed to rotate around the vertical-axis, generating a DRR of \\(128 \\times 128\\) resolution at every five degrees, resulting in 72 DRRs for each object. During training we use the whole set of 72 DRRs (a fifth of all views within a full 360-degree vertical rotation) per patient and let the model render the rest. Our work did not involve experimental procedures on human subjects or animals and thus did not require Institutional Review Board approval."
224
+ },
225
+ {
226
+ "type": "title",
227
+ "bbox": [
228
+ 0.084,
229
+ 0.546,
230
+ 0.221,
231
+ 0.559
232
+ ],
233
+ "angle": 0,
234
+ "content": "B. GRAF Overview"
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.082,
240
+ 0.564,
241
+ 0.49,
242
+ 0.744
243
+ ],
244
+ "angle": 0,
245
+ "content": "GRAF [14] is a model that builds from NeRF and defines it within an Generative Adversarial Network (GAN). It consists of a generator \\( G_{\\theta} \\) that predicts an image patch \\( P_{\\mathrm{pred}} \\) and a discriminator \\( D_{\\phi} \\) that compares the predicted patch to a patch \\( P_{\\mathrm{real}} \\) extracted from a real image. GRAF has shown an effective capacity to disentangle 3D shape and viewpoint of objects from 2D images alone, in contrast to the original NeRF [12] and similar approaches such as [19]. Therefore, we aim to translate GRAF's methods to our task, and in subsection II-C we describe our new discriminator architecture, which allows us to disentangle 3D properties from DRRs."
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.082,
251
+ 0.746,
252
+ 0.488,
253
+ 0.881
254
+ ],
255
+ "angle": 0,
256
+ "content": "We consider the experimental setting to obtain the radiation attenuation response instead of the color used in natural images. To obtain the attenuation response at a pixel location for an arbitrary projection \\( \\pmb{K} \\) with pose \\( \\pmb{\\xi} \\), first, we consider a pattern \\( \\pmb{\\nu} = (\\pmb{u}, s) \\) to sample \\( R \\) X-ray beams within a \\( K \\times K \\) image-patch \\( \\pmb{P} \\). Then, we sample \\( N \\) 3D points \\( \\pmb{x}_r^i \\) along the X-ray beam \\( r \\) originating from the pixel location and ordered between the near and far planes of the projection (Fig. 1a)."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.084,
262
+ 0.882,
263
+ 0.49,
264
+ 0.927
265
+ ],
266
+ "angle": 0,
267
+ "content": "The object representation is encoded in a multi-layer perceptron (MLP) that takes as input a 3D position \\( \\pmb{x} = (x, y, z) \\) and a viewing direction \\( \\pmb{d} = (\\theta, \\phi) \\), and produces"
268
+ },
269
+ {
270
+ "type": "image",
271
+ "bbox": [
272
+ 0.514,
273
+ 0.062,
274
+ 0.912,
275
+ 0.222
276
+ ],
277
+ "angle": 0,
278
+ "content": null
279
+ },
280
+ {
281
+ "type": "image_caption",
282
+ "bbox": [
283
+ 0.588,
284
+ 0.232,
285
+ 0.831,
286
+ 0.245
287
+ ],
288
+ "angle": 0,
289
+ "content": "Fig. 1. An overview of GRAF's generator."
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.505,
295
+ 0.273,
296
+ 0.912,
297
+ 0.318
298
+ ],
299
+ "angle": 0,
300
+ "content": "as output a density scalar \\(\\sigma\\) and a pixel value \\(c\\). To learn high-frequency features, the input is mapped into a \\(2L\\)-dimensional representation (Fig. 1b):"
301
+ },
302
+ {
303
+ "type": "equation",
304
+ "bbox": [
305
+ 0.59,
306
+ 0.327,
307
+ 0.912,
308
+ 0.343
309
+ ],
310
+ "angle": 0,
311
+ "content": "\\[\n\\gamma (p) = \\dots , \\cos \\left(2 ^ {j} \\pi p\\right), \\sin \\left(2 ^ {j} \\pi p\\right), \\dots \\tag {1}\n\\]"
312
+ },
313
+ {
314
+ "type": "text",
315
+ "bbox": [
316
+ 0.506,
317
+ 0.351,
318
+ 0.912,
319
+ 0.38
320
+ ],
321
+ "angle": 0,
322
+ "content": "where \\( p \\) represents the 3D position or viewing direction, for \\( j = 0,\\dots,m - 1 \\)."
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.506,
328
+ 0.381,
329
+ 0.912,
330
+ 0.502
331
+ ],
332
+ "angle": 0,
333
+ "content": "For modeling the shape and appearance of anatomical structures, let \\( z_{s} \\sim p_{s} \\) and \\( z_{a} \\sim p_{a} \\) be the latent codes sampled from a standard Gaussian distribution, respectively (Fig. 1c). To obtain the density prediction \\( \\sigma \\), the shape encoding \\( q \\) is transformed to volume density through a density head \\( \\sigma_{\\theta} \\). Then, the network \\( g_{\\theta}(\\cdot) \\) operates on a shape encoding \\( q = (\\gamma(x), z_{s}) \\) that is later concatenated with the positional encoding of \\( d \\) and appearance code \\( z_{a} \\) (Fig. 1c):"
334
+ },
335
+ {
336
+ "type": "equation",
337
+ "bbox": [
338
+ 0.687,
339
+ 0.51,
340
+ 0.912,
341
+ 0.526
342
+ ],
343
+ "angle": 0,
344
+ "content": "\\[\n(\\gamma (\\boldsymbol {x}), \\boldsymbol {z} _ {s}) \\mapsto \\boldsymbol {q} \\tag {2}\n\\]"
345
+ },
346
+ {
347
+ "type": "equation",
348
+ "bbox": [
349
+ 0.625,
350
+ 0.529,
351
+ 0.912,
352
+ 0.546
353
+ ],
354
+ "angle": 0,
355
+ "content": "\\[\n\\left(\\boldsymbol {q} \\left(\\boldsymbol {x}, \\boldsymbol {z} _ {s}\\right), \\gamma (\\boldsymbol {d}), \\boldsymbol {z} _ {a}\\right) \\mapsto c \\tag {3}\n\\]"
356
+ },
357
+ {
358
+ "type": "equation",
359
+ "bbox": [
360
+ 0.701,
361
+ 0.549,
362
+ 0.912,
363
+ 0.563
364
+ ],
365
+ "angle": 0,
366
+ "content": "\\[\n\\boldsymbol {q} \\left(\\boldsymbol {x}, \\boldsymbol {z} _ {s}\\right) \\mapsto \\sigma \\tag {4}\n\\]"
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.507,
372
+ 0.572,
373
+ 0.912,
374
+ 0.601
375
+ ],
376
+ "angle": 0,
377
+ "content": "The final pixel response \\( c_{r} \\) is computed by the compositing operation (Fig. 1c):"
378
+ },
379
+ {
380
+ "type": "equation",
381
+ "bbox": [
382
+ 0.602,
383
+ 0.608,
384
+ 0.912,
385
+ 0.65
386
+ ],
387
+ "angle": 0,
388
+ "content": "\\[\nc _ {r} = \\sum_ {i = 1} ^ {N} c _ {r} ^ {i} \\alpha_ {r} ^ {i} \\exp \\left(- \\sum_ {j = 1} ^ {i - 1} \\sigma_ {r} ^ {j} \\delta_ {r} ^ {j}\\right) \\tag {5}\n\\]"
389
+ },
390
+ {
391
+ "type": "text",
392
+ "bbox": [
393
+ 0.506,
394
+ 0.657,
395
+ 0.912,
396
+ 0.702
397
+ ],
398
+ "angle": 0,
399
+ "content": "where \\(\\alpha_r^i = 1 - \\exp \\left(-\\sigma_r^i\\delta_r^i\\right)\\) is the alpha compositing value of sampled point \\(i\\) and \\(\\delta_r^i = \\| \\pmb{x}_r^{i + 1} - \\pmb{x}_r^i\\| _2\\) is the distance between the adjacent sampled points."
400
+ },
401
+ {
402
+ "type": "text",
403
+ "bbox": [
404
+ 0.506,
405
+ 0.703,
406
+ 0.912,
407
+ 0.765
408
+ ],
409
+ "angle": 0,
410
+ "content": "In this way, both the density and pixel values are computed at each sampled point along the beam \\( r \\) with network \\( g_{\\theta} \\). Finally, combining the results of all \\( R \\) beams, the generator \\( G_{\\theta} \\) predicts an image patch \\( P_{\\mathrm{pred}} \\), as illustrated in Fig. 1d."
411
+ },
412
+ {
413
+ "type": "title",
414
+ "bbox": [
415
+ 0.508,
416
+ 0.772,
417
+ 0.604,
418
+ 0.785
419
+ ],
420
+ "angle": 0,
421
+ "content": "C. MedNeRF"
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.505,
427
+ 0.791,
428
+ 0.912,
429
+ 0.896
430
+ ],
431
+ "angle": 0,
432
+ "content": "We investigate how we can adapt GRAF to the medical domain and apply it to render a volumetric representation from DRRs. Leveraging a large dataset, GRAF's discriminator \\(D_{\\phi}\\) is able to continuously provide useful signals to train the generator \\(G_{\\theta}\\). However, medical datasets like those considered in our problem are generally small, which causes two sequential issues:"
433
+ },
434
+ {
435
+ "type": "text",
436
+ "bbox": [
437
+ 0.506,
438
+ 0.897,
439
+ 0.912,
440
+ 0.927
441
+ ],
442
+ "angle": 0,
443
+ "content": "The lack of real information to the generator: In GRAF (and in GAN in general), the only source of features of"
444
+ }
445
+ ],
446
+ [
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.082,
451
+ 0.066,
452
+ 0.49,
453
+ 0.142
454
+ ],
455
+ "angle": 0,
456
+ "content": "the training data contributing to the generator is the indirect gradient transferred from the discriminator. We find that the single convolutional feedback from GRAF's discriminator poorly conveys refined features from DRRs resulting in inaccurate volumetric estimation."
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.082,
462
+ 0.142,
463
+ 0.491,
464
+ 0.309
465
+ ],
466
+ "angle": 0,
467
+ "content": "Brittle adversarial training: With a limited training dataset, the generator or discriminator may fall into ill-posed settings such as mode collapse, which would lead to generating a limited number of instances and consequently, a suboptimal data distribution estimation. While some works have applied data augmentation techniques to leverage more data in the medical domain, some transformations could mislead the generator to learn the infrequent or even non-existent augmented data distribution [20]. We find that naively applying classic data augmentation works less favorably than our adopted framework."
468
+ },
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.082,
473
+ 0.328,
474
+ 0.49,
475
+ 0.553
476
+ ],
477
+ "angle": 0,
478
+ "content": "1) Self-supervised Learning for High-Fidelity Synthesis: To allow richer feature-maps covering from the DRRs such that it produces more comprehensive signals to train \\( G_{\\theta} \\), we replace GRAF's discriminator architecture with recent advancements in self-supervised approaches. We allow \\( D_{\\phi} \\) to learn useful global and local features training it on a pretext task, in particular, the self-supervision method based on auto-encoding [21]. Different from [21], we only use two decoders for the feature-maps on scales: \\( f_{1} \\) on \\( 32^{2} \\) and \\( f_{2} \\) on \\( 8^{2} \\) (Fig. 2a). We find that this choice allows better performance and enables a correct volumetric depth estimation. \\( D_{\\phi} \\) must therefore not only discriminate \\( P_{\\mathrm{pred}} \\) predicted from \\( G_{\\theta} \\) but also extract comprehensive features from real image patches \\( P_{\\mathrm{real}} \\) that enable the decoders to resemble the data distribution."
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.082,
484
+ 0.555,
485
+ 0.49,
486
+ 0.646
487
+ ],
488
+ "angle": 0,
489
+ "content": "To assess global structure in decoded patches from \\( D_{\\phi} \\), we use the Learned Perceptual Image Patch Similarity (LPIPS) metric [22]. We compute the weighted pairwise image distance between two VGG16 feature spaces, where the pretrained weights are fit to better match human perceptual judgments. The additional discriminator loss is therefore:"
490
+ },
491
+ {
492
+ "type": "equation",
493
+ "bbox": [
494
+ 0.092,
495
+ 0.654,
496
+ 0.49,
497
+ 0.688
498
+ ],
499
+ "angle": 0,
500
+ "content": "\\[\n\\mathcal {L} _ {\\mathrm {r}} = \\mathbb {E} _ {\\boldsymbol {f} \\sim D (\\boldsymbol {p}), \\boldsymbol {p} \\sim P} \\left[ \\frac {1}{w h d} \\| \\phi_ {i} (\\mathcal {G} (\\boldsymbol {f})) - \\phi_ {i} (\\mathcal {T} (\\boldsymbol {p})) \\| _ {2} \\right] \\tag {6}\n\\]"
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.082,
506
+ 0.696,
507
+ 0.49,
508
+ 0.801
509
+ ],
510
+ "angle": 0,
511
+ "content": "where \\(\\phi_i(\\cdot)\\) denotes the \\(i\\)th layer output of a pretrained VGG16 network, and \\(w\\), \\(h\\), and \\(d\\) stand for the width, height and depth of a feature space, respectively. Let \\(\\mathcal{G}\\) be the processing on the intermediate feature-maps \\(\\pmb{f}\\) from \\(D_{\\phi}\\), and \\(\\mathcal{T}\\) the processing on real image patches. When coupled with this additional reconstruction loss, the network learns representations that transfer across tasks."
512
+ },
513
+ {
514
+ "type": "title",
515
+ "bbox": [
516
+ 0.099,
517
+ 0.821,
518
+ 0.425,
519
+ 0.835
520
+ ],
521
+ "angle": 0,
522
+ "content": "2) Improving Learning via Data Augmentation:"
523
+ },
524
+ {
525
+ "type": "text",
526
+ "bbox": [
527
+ 0.082,
528
+ 0.836,
529
+ 0.491,
530
+ 0.929
531
+ ],
532
+ "angle": 0,
533
+ "content": "We improve learning of \\( G_{\\theta} \\) and \\( D_{\\phi} \\) by adopting the Data Augmentation Optimized for GAN (DAG) framework [20] in which a data augmentation transformation \\( \\mathcal{T}_k \\) (Fig. 2b) is applied using multiple discriminator heads \\( \\{D_k\\} \\). To further reduce memory usage, we share all layers of \\( D_{\\phi} \\) except the last layers corresponding to each head (Fig."
534
+ },
535
+ {
536
+ "type": "image",
537
+ "bbox": [
538
+ 0.513,
539
+ 0.062,
540
+ 0.912,
541
+ 0.25
542
+ ],
543
+ "angle": 0,
544
+ "content": null
545
+ },
546
+ {
547
+ "type": "image_caption",
548
+ "bbox": [
549
+ 0.506,
550
+ 0.26,
551
+ 0.914,
552
+ 0.284
553
+ ],
554
+ "angle": 0,
555
+ "content": "Fig. 2. An overview of our discriminator with self-supervised learning and DAG."
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.506,
561
+ 0.313,
562
+ 0.913,
563
+ 0.358
564
+ ],
565
+ "angle": 0,
566
+ "content": "2c). Because applying differentiable and invertible data augmentation transformations \\(\\mathcal{T}_k\\) has the Jenssen-Shannon (JS) preserving property [20]:"
567
+ },
568
+ {
569
+ "type": "equation",
570
+ "bbox": [
571
+ 0.611,
572
+ 0.366,
573
+ 0.913,
574
+ 0.387
575
+ ],
576
+ "angle": 0,
577
+ "content": "\\[\n\\mathrm {J S} \\left(p _ {d} ^ {\\mathcal {T} _ {k}} \\| p _ {g} ^ {\\mathcal {T} _ {k}}\\right) = \\mathrm {J S} \\left(p _ {d} \\| p _ {g}\\right) \\tag {7}\n\\]"
578
+ },
579
+ {
580
+ "type": "text",
581
+ "bbox": [
582
+ 0.506,
583
+ 0.394,
584
+ 0.915,
585
+ 0.547
586
+ ],
587
+ "angle": 0,
588
+ "content": "where \\( p_d^{\\mathcal{T}_k} \\) is the transformed training data distribution and \\( p_g^{\\mathcal{T}_k} \\) the transformed distribution captured by \\( G_{\\theta} \\). By using a total of four transformations combining flipping and rotation, we encourage optimization to the original data distribution, which also brings the most performance boost. These choices allow our model to benefit from not only \\( \\mathrm{JS}(p_d \\parallel p_g) \\) but also \\( \\mathrm{JS}(p_d^{\\mathcal{T}_k} \\parallel p_g^{\\mathcal{T}_k}) \\), thereby improving the learning of \\( G_{\\theta} \\) and generalization of \\( D_{\\phi} \\). Furthermore, using multiple discriminators with weight-sharing provides learning regularization of \\( D_{\\phi} \\)."
589
+ },
590
+ {
591
+ "type": "text",
592
+ "bbox": [
593
+ 0.506,
594
+ 0.548,
595
+ 0.913,
596
+ 0.577
597
+ ],
598
+ "angle": 0,
599
+ "content": "Replacing GRAF's logistic objective with a hinge loss, we then define our overall loss as below:"
600
+ },
601
+ {
602
+ "type": "equation",
603
+ "bbox": [
604
+ 0.558,
605
+ 0.583,
606
+ 0.913,
607
+ 0.623
608
+ ],
609
+ "angle": 0,
610
+ "content": "\\[\n\\mathcal {L} (\\theta , \\left\\{\\phi_ {k} \\right\\}) = \\mathcal {L} (\\theta , \\phi_ {0}) + \\frac {\\lambda}{n - 1} \\sum_ {k = 1} ^ {n} \\mathcal {L} (\\theta , \\phi_ {k}) \\tag {8}\n\\]"
611
+ },
612
+ {
613
+ "type": "equation",
614
+ "bbox": [
615
+ 0.522,
616
+ 0.634,
617
+ 0.913,
618
+ 0.689
619
+ ],
620
+ "angle": 0,
621
+ "content": "\\[\n\\begin{array}{l} \\mathcal {L} (\\theta , \\phi_ {k}) = \\\\ \\mathbb {E} _ {\\boldsymbol {z} _ {s} \\sim p _ {s}, \\boldsymbol {z} _ {a} \\sim p _ {a}, \\boldsymbol {\\xi} \\sim p _ {\\xi}, \\boldsymbol {\\nu} \\sim p _ {\\nu}} [ f (D _ {\\phi} (G _ {\\theta} (\\boldsymbol {z} _ {s}, \\boldsymbol {z} _ {a}, \\boldsymbol {\\xi}, \\boldsymbol {\\nu}))) ] \\tag {9} \\\\ + \\mathbb {E} _ {\\boldsymbol {I} \\sim p _ {D}, \\boldsymbol {\\nu} \\sim p _ {\\nu}} [ f (- D _ {\\phi} (\\boldsymbol {I}, \\boldsymbol {\\nu})) ] + \\mathcal {L} _ {\\mathrm {r}} \\\\ \\end{array}\n\\]"
622
+ },
623
+ {
624
+ "type": "text",
625
+ "bbox": [
626
+ 0.506,
627
+ 0.696,
628
+ 0.913,
629
+ 0.743
630
+ ],
631
+ "angle": 0,
632
+ "content": "where \\( f(u) = \\max (0,1 + u) \\). We optimize this loss with \\( n = 4 \\), where \\( k = 0 \\) corresponds to the identity transformation and \\( \\lambda = 0.2 \\) (as in [20])."
633
+ },
634
+ {
635
+ "type": "title",
636
+ "bbox": [
637
+ 0.523,
638
+ 0.761,
639
+ 0.875,
640
+ 0.775
641
+ ],
642
+ "angle": 0,
643
+ "content": "3) Volumetric Rendering from a Single View \\(X\\)-ray:"
644
+ },
645
+ {
646
+ "type": "text",
647
+ "bbox": [
648
+ 0.505,
649
+ 0.776,
650
+ 0.914,
651
+ 0.927
652
+ ],
653
+ "angle": 0,
654
+ "content": "After training a model, we reconstruct the complete X-ray projections within a full vertical rotation of a medical instance given a single view X-ray. We follow the relaxed reconstruction formulation in [23], which fits the generator to a single image. Then, we allow the parameters of the generator \\( G_{\\theta} \\) to be slightly fine-tuned along with the shape and appearance latent vectors \\( \\mathbf{z}_s \\) and \\( \\mathbf{z}_a \\). The distortion and perception tradeoff is well known in GAN methods [24] and therefore we modify our generation objective by adding the distortion Mean Square Error (MSE) loss, which incentivises"
655
+ }
656
+ ],
657
+ [
658
+ {
659
+ "type": "image",
660
+ "bbox": [
661
+ 0.086,
662
+ 0.061,
663
+ 0.916,
664
+ 0.293
665
+ ],
666
+ "angle": 0,
667
+ "content": null
668
+ },
669
+ {
670
+ "type": "image_caption",
671
+ "bbox": [
672
+ 0.082,
673
+ 0.305,
674
+ 0.913,
675
+ 0.331
676
+ ],
677
+ "angle": 0,
678
+ "content": "Fig. 3. Knee renderings from continuous viewpoint rotations showing tissue and bone. Given a single-view X-ray from a CT, we can generate the complete set of CT-projections within a full vertical rotation by slightly fine-tuning a pretrained model along with the shape and appearance latent codes."
679
+ },
680
+ {
681
+ "type": "table_caption",
682
+ "bbox": [
683
+ 0.097,
684
+ 0.35,
685
+ 0.477,
686
+ 0.38
687
+ ],
688
+ "angle": 0,
689
+ "content": "TABLE I. Quantitative results based on PSNR and SSIM of rendered X-ray projections with single-view X-ray input."
690
+ },
691
+ {
692
+ "type": "table",
693
+ "bbox": [
694
+ 0.131,
695
+ 0.391,
696
+ 0.444,
697
+ 0.43
698
+ ],
699
+ "angle": 0,
700
+ "content": "<table><tr><td>Dataset</td><td>↑ PSNR (dB) (μ ± σ)</td><td>↑ SSIM (μ ± σ)</td></tr><tr><td>Knee</td><td>30.17 ± 1.93</td><td>0.670 ± 0.040</td></tr><tr><td>Chest</td><td>28.54 ± 0.79</td><td>0.462 ± 0.082</td></tr></table>"
701
+ },
702
+ {
703
+ "type": "text",
704
+ "bbox": [
705
+ 0.084,
706
+ 0.457,
707
+ 0.379,
708
+ 0.472
709
+ ],
710
+ "angle": 0,
711
+ "content": "a balance between blurriness and accuracy:"
712
+ },
713
+ {
714
+ "type": "equation",
715
+ "bbox": [
716
+ 0.095,
717
+ 0.48,
718
+ 0.488,
719
+ 0.51
720
+ ],
721
+ "angle": 0,
722
+ "content": "\\[\n\\mathcal {L} _ {\\text {g e n}} = \\lambda_ {1} \\mathcal {L} _ {\\mathrm {r}} (V G G 1 6) + \\lambda_ {2} \\mathcal {L} _ {\\mathrm {M S E}} (G) + \\lambda_ {3} \\mathcal {L} _ {\\mathrm {N L L L}} \\left(z _ {s}, z _ {a}\\right) \\tag {10}\n\\]"
723
+ },
724
+ {
725
+ "type": "text",
726
+ "bbox": [
727
+ 0.084,
728
+ 0.512,
729
+ 0.489,
730
+ 0.556
731
+ ],
732
+ "angle": 0,
733
+ "content": "where NLLL corresponds to the negative log-likelihood loss and the tuned hyperparameters \\(lr = 0.0005\\), \\(\\beta_{1} = 0\\), \\(\\beta_{2} = 0.999\\), \\(\\lambda_{1} = 0.3\\), \\(\\lambda_{2} = 0.1\\) and \\(\\lambda_{3} = 0.3\\)."
734
+ },
735
+ {
736
+ "type": "text",
737
+ "bbox": [
738
+ 0.084,
739
+ 0.556,
740
+ 0.49,
741
+ 0.617
742
+ ],
743
+ "angle": 0,
744
+ "content": "Once the model locates an optimal combination of \\( \\mathbf{z}_s \\) and \\( \\mathbf{z}_a \\), we replicate them and use them to render the rest of the X-ray projections by continuously controlling the angle viewpoint."
745
+ },
746
+ {
747
+ "type": "title",
748
+ "bbox": [
749
+ 0.231,
750
+ 0.625,
751
+ 0.342,
752
+ 0.639
753
+ ],
754
+ "angle": 0,
755
+ "content": "III. RESULTS"
756
+ },
757
+ {
758
+ "type": "text",
759
+ "bbox": [
760
+ 0.082,
761
+ 0.643,
762
+ 0.49,
763
+ 0.809
764
+ ],
765
+ "angle": 0,
766
+ "content": "Here we provide an evaluation of MedNeRF on our datasets. We compare our model's results to the ground truth, two baselines, perform an ablation study, and show qualitative and quantitative evaluations. We train all models for 100,000 iterations with a batch size of 8. Projection parameters \\((u,v)\\) are chosen to evenly sample points on the surface of a sphere, specifically a slight horizontal elevation of 70-85 degrees and \\(u_{\\mathrm{min}} = 0\\), \\(u_{\\mathrm{max}} = 1\\) for a full 360-degree vertical rotation. However, we only provide a fifth of the views (72-views each at five degrees) during training and let the model render the rest."
767
+ },
768
+ {
769
+ "type": "title",
770
+ "bbox": [
771
+ 0.083,
772
+ 0.818,
773
+ 0.373,
774
+ 0.833
775
+ ],
776
+ "angle": 0,
777
+ "content": "A. Reconstruction from Single View X-ray"
778
+ },
779
+ {
780
+ "type": "text",
781
+ "bbox": [
782
+ 0.082,
783
+ 0.836,
784
+ 0.49,
785
+ 0.927
786
+ ],
787
+ "angle": 0,
788
+ "content": "We evaluate our model's representation for 3D-aware DRR synthesis given a single-view X-ray as input. We find that despite the implicit linear network's limited capacity, our model can disentangle 3D anatomy identity and attenuation response of different medical instances, which are retrieved through the described reconstruction reformulation in II-C.3."
789
+ },
790
+ {
791
+ "type": "image",
792
+ "bbox": [
793
+ 0.511,
794
+ 0.351,
795
+ 0.911,
796
+ 0.624
797
+ ],
798
+ "angle": 0,
799
+ "content": null
800
+ },
801
+ {
802
+ "type": "image_caption",
803
+ "bbox": [
804
+ 0.523,
805
+ 0.635,
806
+ 0.896,
807
+ 0.648
808
+ ],
809
+ "angle": 0,
810
+ "content": "Fig. 4. Volumetric maps and attenuation renderings on our dataset."
811
+ },
812
+ {
813
+ "type": "text",
814
+ "bbox": [
815
+ 0.506,
816
+ 0.674,
817
+ 0.913,
818
+ 0.719
819
+ ],
820
+ "angle": 0,
821
+ "content": "Our model can also facilitate distinguishing bone from tissue via a contrast transformation, as it renders a brighter pixel value for denser structures (e.g. bone) (Fig. 3)."
822
+ },
823
+ {
824
+ "type": "text",
825
+ "bbox": [
826
+ 0.506,
827
+ 0.72,
828
+ 0.914,
829
+ 0.84
830
+ ],
831
+ "angle": 0,
832
+ "content": "Table I summarises our results based on the peak signal-to-noise ratio (PSNR) and structural similarity (SSIM), which measure the quality of reconstructed signals and human subjective similarity, respectively. We find that our generative loss can achieve a reasonable perception-distortion curve in renderings and show consistency with the location and volumetric depth of anatomical structures at continuous viewpoints compared to the ground truth."
833
+ },
834
+ {
835
+ "type": "title",
836
+ "bbox": [
837
+ 0.507,
838
+ 0.848,
839
+ 0.667,
840
+ 0.864
841
+ ],
842
+ "angle": 0,
843
+ "content": "B. 2D DRR Rendering"
844
+ },
845
+ {
846
+ "type": "text",
847
+ "bbox": [
848
+ 0.506,
849
+ 0.867,
850
+ 0.913,
851
+ 0.927
852
+ ],
853
+ "angle": 0,
854
+ "content": "We evaluate our model on the task of 2D rendering and compare it to pixelNeRF [19], and GRAF [14] baseline, wherein the original architecture is used. Our model can more accurately estimate volumetric depth compared to"
855
+ }
856
+ ],
857
+ [
858
+ {
859
+ "type": "table_caption",
860
+ "bbox": [
861
+ 0.121,
862
+ 0.06,
863
+ 0.454,
864
+ 0.073
865
+ ],
866
+ "angle": 0,
867
+ "content": "TABLE II. FID and KID analysis comparing other methods."
868
+ },
869
+ {
870
+ "type": "table",
871
+ "bbox": [
872
+ 0.086,
873
+ 0.085,
874
+ 0.49,
875
+ 0.131
876
+ ],
877
+ "angle": 0,
878
+ "content": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"2\">Chest dataset</td><td colspan=\"2\">Knee dataset</td></tr><tr><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td></tr><tr><td>GRAF [14]</td><td>68.25 ± 0.954</td><td>0.053 ± 0.0008</td><td>76.70 ± 0.302</td><td>0.058 ± 0.0001</td></tr><tr><td>pixelNeRF [19]</td><td>112.96 ± 2.356</td><td>0.084 ± 0.0012</td><td>166.40 ± 2.153</td><td>0.158 ± 0.0010</td></tr><tr><td>Ours</td><td>60.26 ± 0.322</td><td>0.041 ± 0.0005</td><td>76.12 ± 0.193</td><td>0.052 ± 0.0004</td></tr></table>"
879
+ },
880
+ {
881
+ "type": "table_caption",
882
+ "bbox": [
883
+ 0.119,
884
+ 0.142,
885
+ 0.456,
886
+ 0.156
887
+ ],
888
+ "angle": 0,
889
+ "content": "TABLE III. FID and KID analysis of ablations of our model."
890
+ },
891
+ {
892
+ "type": "table",
893
+ "bbox": [
894
+ 0.086,
895
+ 0.168,
896
+ 0.49,
897
+ 0.23
898
+ ],
899
+ "angle": 0,
900
+ "content": "<table><tr><td rowspan=\"2\">Ablation</td><td colspan=\"2\">Chest dataset</td></tr><tr><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td></tr><tr><td>MedNeRF - 3 SD, logistic loss, classic DA</td><td>84.85 ± 1.025</td><td>0.069 ± 0.0031</td></tr><tr><td>MedNeRF - 2 SD, logistic loss, classic DA</td><td>67.73 ± 0.712</td><td>0.051 ± 0.0006</td></tr><tr><td>MedNeRF - 2 SD, hinge loss, classic DA</td><td>65.34 ± 0.353</td><td>0.045 ± 0.0004</td></tr><tr><td>MedNeRF - 2 SD, hinge loss, DAG</td><td>60.26 ± 0.322</td><td>0.041 ± 0.0005</td></tr></table>"
901
+ },
902
+ {
903
+ "type": "text",
904
+ "bbox": [
905
+ 0.082,
906
+ 0.259,
907
+ 0.49,
908
+ 0.468
909
+ ],
910
+ "angle": 0,
911
+ "content": "GRAF and pixelNeRF (Fig. 4). For each category, we find an unseen target instance with a similar view direction and shape. Volumetric depth estimation is given by bright colors (far) and dark colors (near). Lacking a perceptual loss, GRAF is not incentivized to produce high-frequency textures. In contrast, we find our model renders a more detailed internal structure with varied attenuation. GRAF produces a consistent attenuation response, but seems to be unable to distinguish the anatomical shape from the background. Our self-supervised discriminator enables the generator to disentangle shape and background by rendering a brighter color for the background and a darker color for the shape, while GRAF renders a bright or dark color for both."
912
+ },
913
+ {
914
+ "type": "text",
915
+ "bbox": [
916
+ 0.082,
917
+ 0.471,
918
+ 0.49,
919
+ 0.651
920
+ ],
921
+ "angle": 0,
922
+ "content": "We find pixelNeRF produces blurred attenuation renderings for all datasets, and volumetric maps tend to exhibit strong color shifts (Fig. 4). We believe these artifacts are due to the see-through nature of the dataset, compared to solid-like natural objects on which NeRFs are trained. This data characteristic impairs not only volumetric maps but also fine anatomical structures. In contrast, our model is better able to render both volumetric depth and attenuation response. We also find pixelNeRF is sensitive to slight changes in projection parameters, hampering optimization for the knee category. Our model produces a consistent 3D geometry and does not rely on explicit projection matrices."
923
+ },
924
+ {
925
+ "type": "text",
926
+ "bbox": [
927
+ 0.082,
928
+ 0.652,
929
+ 0.49,
930
+ 0.756
931
+ ],
932
+ "angle": 0,
933
+ "content": "Table II compares image quality based on Frechet Inception Distance (FID) and Kernel Inception Distance (KID) metrics, in which lower values mean better. Optimizing pixelNeRF on our datasets leads to particularly poor results that are unable to compete with the GRAF baseline and our model. In contrast, our model outperforms the baselines on FID and KID metrics for all datasets."
934
+ },
935
+ {
936
+ "type": "title",
937
+ "bbox": [
938
+ 0.084,
939
+ 0.77,
940
+ 0.212,
941
+ 0.785
942
+ ],
943
+ "angle": 0,
944
+ "content": "C. Ablation Study"
945
+ },
946
+ {
947
+ "type": "text",
948
+ "bbox": [
949
+ 0.082,
950
+ 0.791,
951
+ 0.491,
952
+ 0.927
953
+ ],
954
+ "angle": 0,
955
+ "content": "We evaluate our model with three ablations (Table III): wherein an additional simple decoder (SD) is included; the adversarial logistic loss is replaced by its hinge version; and wherein the non-classical DAG approach is adopted. We find that the DAG approach brings the most performance boost compared to naively applying classical DA, while the use of a hinge loss performs slightly better than its logistic version. However, an additional decoder in our self-supervised discriminator can lead to a significant drop in"
956
+ },
957
+ {
958
+ "type": "text",
959
+ "bbox": [
960
+ 0.508,
961
+ 0.067,
962
+ 0.599,
963
+ 0.081
964
+ ],
965
+ "angle": 0,
966
+ "content": "performance."
967
+ },
968
+ {
969
+ "type": "title",
970
+ "bbox": [
971
+ 0.637,
972
+ 0.093,
973
+ 0.785,
974
+ 0.107
975
+ ],
976
+ "angle": 0,
977
+ "content": "IV. CONCLUSION"
978
+ },
979
+ {
980
+ "type": "text",
981
+ "bbox": [
982
+ 0.506,
983
+ 0.115,
984
+ 0.914,
985
+ 0.373
986
+ ],
987
+ "angle": 0,
988
+ "content": "We have presented a novel Deep Learning architecture based on Neural Radiance Fields for learning a continuous representation of CT scans. We learn a medical category encoding of the attenuation response of a set of 2D DRRs in the weights of a generator. Furthermore, we have found that a stronger and more comprehensive signal from our discriminator allows generative radiance fields to model 3D-aware CT-projections. Experimental evaluation demonstrates significant qualitative and quantitative reconstructions and improvements over other Neural Radiance Field approaches. Whilst the proposed model may not replace CT entirely, the functionality of generating 3D-aware CT-projections from X-rays has great potential for clinical use in osseous trauma, skeletal evaluation in dysplasia and for orthopaedic presurgical planning. This could cut down on the radiation dose given to patients, with significant economic implications such as bringing down the cost of investigations."
989
+ },
990
+ {
991
+ "type": "title",
992
+ "bbox": [
993
+ 0.622,
994
+ 0.384,
995
+ 0.8,
996
+ 0.397
997
+ ],
998
+ "angle": 0,
999
+ "content": "ACKNOWLEDGMENT"
1000
+ },
1001
+ {
1002
+ "type": "text",
1003
+ "bbox": [
1004
+ 0.506,
1005
+ 0.405,
1006
+ 0.913,
1007
+ 0.435
1008
+ ],
1009
+ "angle": 0,
1010
+ "content": "This work is partially supported by the Mexican Council of Science and Technology (CONACyT)."
1011
+ },
1012
+ {
1013
+ "type": "title",
1014
+ "bbox": [
1015
+ 0.663,
1016
+ 0.448,
1017
+ 0.759,
1018
+ 0.46
1019
+ ],
1020
+ "angle": 0,
1021
+ "content": "REFERENCES"
1022
+ },
1023
+ {
1024
+ "type": "ref_text",
1025
+ "bbox": [
1026
+ 0.515,
1027
+ 0.47,
1028
+ 0.914,
1029
+ 0.494
1030
+ ],
1031
+ "angle": 0,
1032
+ "content": "[1] Paul Suetens, Visualization for diagnosis and therapy, p. 190-218, Cambridge University Press, 2 edition, 2009."
1033
+ },
1034
+ {
1035
+ "type": "ref_text",
1036
+ "bbox": [
1037
+ 0.516,
1038
+ 0.495,
1039
+ 0.915,
1040
+ 0.607
1041
+ ],
1042
+ "angle": 0,
1043
+ "content": "[2] Pechin Lo, Bram van Ginneken, Joseph M. Reinhardt, Tarunashree Yavarna, Pim A. de Jong, Benjamin Irving, Catalin Fetita, Margarete Ortner, Rómulo Pinho, Jan Sijbers, Marco Feuerstein, Anna Fabijanska, Christian Bauer, Reinhard Beichel, Carlos S. Mendoza, Rafael Wiemker, Jaesung Lee, Anthony P. Reeves, Silvia Born, Oliver Weinheimer, Eva M. van Rikxooft, Juerg Tschirren, Ken Mori, Benjamin Odry, David P. Naidich, Ieneke Hartmann, Eric A. Hoffman, Mathias Prokop, Jesper H. Pedersen, and Marleen de Bruijne, \"Extraction of airways from ct (exact'09),\" IEEE Transactions on Medical Imaging, vol. 31, no. 11, pp. 2093-2107, 2012."
1044
+ },
1045
+ {
1046
+ "type": "ref_text",
1047
+ "bbox": [
1048
+ 0.516,
1049
+ 0.608,
1050
+ 0.915,
1051
+ 0.641
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "[3] Mary Coffey and Aude Vaandering, \"Patient setup for pet/ct acquisition in radiotherapy planning,\" Radiotherapy and Oncology, vol. 96, no. 3, pp. 298-301, 2010, PET in Radiotherapy Planning."
1055
+ },
1056
+ {
1057
+ "type": "ref_text",
1058
+ "bbox": [
1059
+ 0.517,
1060
+ 0.642,
1061
+ 0.914,
1062
+ 0.687
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "[4] Tri Huynh, Yaozong Gao, Jiayin Kang, Li Wang, Pei Zhang, Jun Lian, and Dinggang Shen, \"Estimating ct image from mri data using structured random forest and auto-context model,\" IEEE Transactions on Medical Imaging, vol. 35, no. 1, pp. 174-183, 2016."
1066
+ },
1067
+ {
1068
+ "type": "ref_text",
1069
+ "bbox": [
1070
+ 0.517,
1071
+ 0.688,
1072
+ 0.915,
1073
+ 0.744
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": "[5] Shoulie Xie, Weimin Huang, Tao Yang, Dajun Wu, and Huiying Liu, \"Compressed sensing based image reconstruction with projection recovery for limited angle cone-beam ct imaging,\" in 2020 42nd Annual International Conference of the IEEE Engineering in Medicine Biology Society (EMBC), 2020, pp. 1307-1310."
1077
+ },
1078
+ {
1079
+ "type": "ref_text",
1080
+ "bbox": [
1081
+ 0.517,
1082
+ 0.745,
1083
+ 0.914,
1084
+ 0.778
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": "[6] Ge Wang, Jong Chu Ye, Klaus Mueller, and Jeffrey A. Fessler, \"Image reconstruction is a new frontier of machine learning,\" IEEE Transactions on Medical Imaging, vol. 37, no. 6, pp. 1289-1296, 2018."
1088
+ },
1089
+ {
1090
+ "type": "ref_text",
1091
+ "bbox": [
1092
+ 0.517,
1093
+ 0.779,
1094
+ 0.914,
1095
+ 0.835
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "[7] Yinsheng Li, Ke Li, Chengzhu Zhang, Juan Montoya, and Guang-Hong Chen, \"Learning to reconstruct computed tomography images directly from sinogram data under a variety of data acquisition conditions,\" IEEE Transactions on Medical Imaging, vol. 38, no. 10, pp. 2469-2481, 2019."
1099
+ },
1100
+ {
1101
+ "type": "ref_text",
1102
+ "bbox": [
1103
+ 0.517,
1104
+ 0.836,
1105
+ 0.913,
1106
+ 0.88
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": "[8] David B. Lindell, Julien N. P. Martel, and Gordon Wetzstein, \"Autoint: Automatic integration for fast neural volume rendering,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2021, pp. 14556-14565."
1110
+ },
1111
+ {
1112
+ "type": "ref_text",
1113
+ "bbox": [
1114
+ 0.517,
1115
+ 0.88,
1116
+ 0.913,
1117
+ 0.926
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": "[9] Yu Sun, Jiaming Liu, Mingyang Xie, Brendt Wohlberg, and Ulugbek S. Kamilov, \"Coil: Coordinate-based internal learning for tomographic imaging,\" IEEE Transactions on Computational Imaging, vol. 7, pp. 1400-1412, 2021."
1121
+ },
1122
+ {
1123
+ "type": "list",
1124
+ "bbox": [
1125
+ 0.515,
1126
+ 0.47,
1127
+ 0.915,
1128
+ 0.926
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": null
1132
+ }
1133
+ ],
1134
+ [
1135
+ {
1136
+ "type": "ref_text",
1137
+ "bbox": [
1138
+ 0.086,
1139
+ 0.068,
1140
+ 0.49,
1141
+ 0.126
1142
+ ],
1143
+ "angle": 0,
1144
+ "content": "[10] Xingde Ying, Heng Guo, Kai Ma, Jian Wu, Zhengxin Weng, and Yefeng Zheng, “X2ct-gan: Reconstructing ct from biplanar x-rays with generative adversarial networks,” in 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 10611-10620."
1145
+ },
1146
+ {
1147
+ "type": "ref_text",
1148
+ "bbox": [
1149
+ 0.086,
1150
+ 0.127,
1151
+ 0.49,
1152
+ 0.183
1153
+ ],
1154
+ "angle": 0,
1155
+ "content": "[11] Bao Nguyen, Adam Feldman, Sarath Bethapudi, Andrew Jennings, and Chris G. Willcocks, \"Unsupervised region-based anomaly detection in brain mri with adversarial image inpainting,\" in 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), 2021, pp. 1127-1131."
1156
+ },
1157
+ {
1158
+ "type": "ref_text",
1159
+ "bbox": [
1160
+ 0.086,
1161
+ 0.185,
1162
+ 0.49,
1163
+ 0.253
1164
+ ],
1165
+ "angle": 0,
1166
+ "content": "[12] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng, “Nerf: Representing scenes as neural radiance fields for view synthesis,” in Computer Vision – ECCV 2020, Andreae Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, Eds., Cham, 2020, pp. 405–421, Springer International Publishing."
1167
+ },
1168
+ {
1169
+ "type": "ref_text",
1170
+ "bbox": [
1171
+ 0.086,
1172
+ 0.254,
1173
+ 0.49,
1174
+ 0.299
1175
+ ],
1176
+ "angle": 0,
1177
+ "content": "[13] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth, \"NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections,\" in CVPR, 2021."
1178
+ },
1179
+ {
1180
+ "type": "ref_text",
1181
+ "bbox": [
1182
+ 0.086,
1183
+ 0.3,
1184
+ 0.49,
1185
+ 0.335
1186
+ ],
1187
+ "angle": 0,
1188
+ "content": "[14] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger, \"Graf: Generative radiance fields for 3d-aware image synthesis,\" in Advances in Neural Information Processing Systems (NeurIPS), 2020."
1189
+ },
1190
+ {
1191
+ "type": "ref_text",
1192
+ "bbox": [
1193
+ 0.086,
1194
+ 0.335,
1195
+ 0.49,
1196
+ 0.415
1197
+ ],
1198
+ "angle": 0,
1199
+ "content": "[15] E B Tsai, S Lungren, M P Hershman, M Roshkovan, L Colak, E Erickson, B J Shih, G Stein, A Kalpathy-Cramer, J Shen, J Hafez, M A F John, S Rajiah, P Pogatchnik, B P Mongan, J T Altinmakas, E Ranschaert, E R Kitamura, and C C. Wu, “Medical imaging data resource center (midrc) - rsna international pandemic open research database (ricord) release 1b - chest ct pandemic-[dataset],” The Cancer Imaging Archive, 2021."
1200
+ },
1201
+ {
1202
+ "type": "ref_text",
1203
+ "bbox": [
1204
+ 0.086,
1205
+ 0.416,
1206
+ 0.49,
1207
+ 0.473
1208
+ ],
1209
+ "angle": 0,
1210
+ "content": "[16] K Clark, B Vendt, K Smith, J Freymann, J Kirby, P Koppel, S Moore, S Phillips, D Maffitt, M Pringle, L Tarbox, and F Prior, \"The cancer imaging archive (tcia): Maintaining and operating a public information repository,\" Journal of Digital Imaging, vol. 26, no. 6, pp. 1045-1057, December 2013."
1211
+ },
1212
+ {
1213
+ "type": "ref_text",
1214
+ "bbox": [
1215
+ 0.086,
1216
+ 0.473,
1217
+ 0.49,
1218
+ 0.497
1219
+ ],
1220
+ "angle": 0,
1221
+ "content": "[17] Michael D Harris, Adam J Cyr, Azhar A Ali, Clare K Fitzpatrick, Paul J Rullkoetter, Lorin P Maletsky, and Kevin B Shelburne, “A"
1222
+ },
1223
+ {
1224
+ "type": "list",
1225
+ "bbox": [
1226
+ 0.086,
1227
+ 0.068,
1228
+ 0.49,
1229
+ 0.497
1230
+ ],
1231
+ "angle": 0,
1232
+ "content": null
1233
+ },
1234
+ {
1235
+ "type": "ref_text",
1236
+ "bbox": [
1237
+ 0.536,
1238
+ 0.068,
1239
+ 0.912,
1240
+ 0.103
1241
+ ],
1242
+ "angle": 0,
1243
+ "content": "combined experimental and computational approach to subject-specific analysis of knee joint laxity,\" Journal of biomechanical engineering, vol. 138, no. 8, August 2016."
1244
+ },
1245
+ {
1246
+ "type": "ref_text",
1247
+ "bbox": [
1248
+ 0.51,
1249
+ 0.103,
1250
+ 0.912,
1251
+ 0.16
1252
+ ],
1253
+ "angle": 0,
1254
+ "content": "[18] Azhar A Ali, Sami S Shalhoub, Adam J Cyr, Clare K Fitzpatrick, Lorin P Maletsky, Paul J Rullkoetter, and Kevin B Shelburne, \"Validation of predicted patellofemoral mechanics in a finite element model of the healthy and cruciate-deficient knee,\" Journal of biomechanics, vol. 49, no. 2, pp. 302-309, January 2016."
1255
+ },
1256
+ {
1257
+ "type": "ref_text",
1258
+ "bbox": [
1259
+ 0.51,
1260
+ 0.16,
1261
+ 0.912,
1262
+ 0.192
1263
+ ],
1264
+ "angle": 0,
1265
+ "content": "[19] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa, “pixelNeRF: Neural radiance fields from one or few images,” in CVPR, 2021."
1266
+ },
1267
+ {
1268
+ "type": "ref_text",
1269
+ "bbox": [
1270
+ 0.51,
1271
+ 0.193,
1272
+ 0.912,
1273
+ 0.238
1274
+ ],
1275
+ "angle": 0,
1276
+ "content": "[20] Ngoc-Trung Tran, Viet-Hung Tran, Ngoc-Bao Nguyen, Trung-Kien Nguyen, and Ngai-Man Cheung, “On data augmentation for gan training,” IEEE Transactions on Image Processing, vol. 30, pp. 1882–1897, 2021."
1277
+ },
1278
+ {
1279
+ "type": "ref_text",
1280
+ "bbox": [
1281
+ 0.51,
1282
+ 0.239,
1283
+ 0.912,
1284
+ 0.284
1285
+ ],
1286
+ "angle": 0,
1287
+ "content": "[21] Bingchen Liu, Yizhe Zhu, Kunpeng Song, and Ahmed Elgammal, \"Towards faster and stabilized \\(\\{\\mathrm{gan}\\}\\) training for high-fidelity few-shot image synthesis,\" in International Conference on Learning Representations, 2021."
1288
+ },
1289
+ {
1290
+ "type": "ref_text",
1291
+ "bbox": [
1292
+ 0.51,
1293
+ 0.285,
1294
+ 0.912,
1295
+ 0.329
1296
+ ],
1297
+ "angle": 0,
1298
+ "content": "[22] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang, \"The unreasonable effectiveness of deep features as a perceptual metric,\" in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018."
1299
+ },
1300
+ {
1301
+ "type": "ref_text",
1302
+ "bbox": [
1303
+ 0.51,
1304
+ 0.33,
1305
+ 0.912,
1306
+ 0.375
1307
+ ],
1308
+ "angle": 0,
1309
+ "content": "[23] Xingang Pan, Xiaohang Zhan, Bo Dai, Dahua Lin, Chen Change Loy, and Ping Luo, \"Exploiting deep generative prior for versatile image restoration and manipulation,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, pp. 1-1, 2021."
1310
+ },
1311
+ {
1312
+ "type": "ref_text",
1313
+ "bbox": [
1314
+ 0.51,
1315
+ 0.375,
1316
+ 0.912,
1317
+ 0.41
1318
+ ],
1319
+ "angle": 0,
1320
+ "content": "[24] Yochai Blau and Tomer Michaeli, \"The perception-distortion tradeoff,\" in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018."
1321
+ },
1322
+ {
1323
+ "type": "list",
1324
+ "bbox": [
1325
+ 0.51,
1326
+ 0.068,
1327
+ 0.912,
1328
+ 0.41
1329
+ ],
1330
+ "angle": 0,
1331
+ "content": null
1332
+ }
1333
+ ]
1334
+ ]
2202.01xxx/2202.01020/3f9e10da-85b7-4cf2-a6b2-fc1e487efd0e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d7ec1a0f0840b3e27008dd1eb7a18c800710a8ce77ff1256daf73a8a78c4a8f
3
+ size 4321967
2202.01xxx/2202.01020/full.md ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MedNeRF: Medical Neural Radiance Fields for Reconstructing 3D-aware CT-Projections from a Single X-ray
2
+
3
+ Abril Corona-Figueroa<sup>1</sup>, Jonathan Frawley<sup>1</sup>, Sam Bond-Taylor<sup>1</sup>, Sarath Bethapudi<sup>2</sup>, Hubert P. H. Shum<sup>1</sup>, Chris G. Willcocks<sup>1</sup>
4
+
5
+ Abstract—Computed tomography (CT) is an effective medical imaging modality, widely used in the field of clinical medicine for the diagnosis of various pathologies. Advances in Multidetector CT imaging technology have enabled additional functionalities, including generation of thin slice multiplanar cross-sectional body imaging and 3D reconstructions. However, this involves patients being exposed to a considerable dose of ionising radiation. Excessive ionising radiation can lead to deterministic and harmful effects on the body. This paper proposes a Deep Learning model that learns to reconstruct CT projections from a few or even a single-view X-ray. This is based on a novel architecture that builds from neural radiance fields, which learns a continuous representation of CT scans by disentangling the shape and volumetric depth of surface and internal anatomical structures from 2D images. Our model is trained on chest and knee datasets, and we demonstrate qualitative and quantitative high-fidelity renderings and compare our approach to other recent radiance field-based methods. Our code and link to our datasets are available at https://github.com/abrilcf/mednerf
6
+
7
+ Clinical relevance—Our model is able to infer the anatomical 3D structure from a few or a single-view X-ray, showing future potential for reduced ionising radiation exposure during the imaging process.
8
+
9
+ # I. INTRODUCTION
10
+
11
+ 3D medical imaging often involves joining multiple 2D slices from CT or Magnetic Resonance Imaging (MRI), and part of their workflow consists of specifying values for the position of the patient, the imaging source, and the detector. The quality and accuracy of a CT 3D representation require hundreds of X-ray projections with a thin slice thickness [1]. Moreover, this process exposes patients to more ionising radiation than typical X-rays and requires the patient to remain immobile for up to more than 1 hour, depending on the type of test [2]. Continuous 3D representations would give radiologists optics of every point in the internal anatomy captured. While such representations are useful, there are practical challenges in CT due to the increased radiation exposure, angle-dependent structures, and time consumption [3].
12
+
13
+ Earlier approaches in medical image reconstruction used analytic and iterative methods [4], [5] on given input data. However, they often encounter mismatches between the mathematical model and physical properties of the imaging system. Instead, several recent approaches leverage deep
14
+
15
+ learning [6] for sparse view reconstruction [7], [8], [9], 3D CT reconstruction from 2D images [10], and anomaly detection [11]. These deep learning approaches solved the mismatches between the mathematical model and imaging system and reported improved reconstructions by fine-tuning state-of-the-art architectures. However, they require a large amount of training data, which may be difficult to meet in the medical domain where acquiring expert annotations is both cost and time prohibitive.
16
+
17
+ The Neural Radiance Fields (NeRF) [12] model is a recent reformulation for estimating a 3D volumetric representation from images. Such representations encode the radiance field and density of the scene in the parameters of a neural network. The neural network learns to synthesize new views via volume rendering from point samples along cast rays. However, these representations are often captured in controlled settings [13]. First, the scene is taken by a set of fixed cameras within a short time frame. Second, all content in the scene is static and real images often need masking. These constraints prohibit the direct application of NeRF to the medical domain, where the imaging system greatly differs from conventional cameras, and the images are captured over a long time frame hampering the patient's stillness. Moreover, the overlapping of anatomical structures in medical images hinders the definition of edges which cannot be easily solved with masking. These aspects explain why the NeRF approach especially shows successes for "natural images".
18
+
19
+ To address these challenges, we propose MedNeRF, a model that adapts Generative Radiance Fields (GRAF) [14] in the medical domain to render CT projections given a few or even a single-view X-ray. Our approach not only synthesizes realistic images, but also captures the data manifold and provides a continuous representation of how the attenuation and volumetric depth of anatomical structures vary with the viewpoint without 3D supervision. This is achieved via a new discriminator architecture that provides a stronger and more comprehensive signal to GRAF when dealing with CT scans.
20
+
21
+ Closest to our goal are [8], [9], which both train a coordinate-based network in sinograms of low-dose CT of phantom objects and apply it to the sparse-view tomography reconstruction problem. In contrast to [8], we learn multiple representations in a single model by randomly feeding data of different medical instances instead of separately optimizing for each collection of images. For testing [9] reconstruction ability, they integrate it into reconstruction methods and use
22
+
23
+ at least 60 views. Different from their methods, we do not rely on additional reconstruction algorithms, and we only require multiple views during training.
24
+
25
+ We render CT projections of our two datasets of digitally reconstructed radiographs (DRR) from chest and knee. We qualitative and quantitative demonstrate high-fidelity renderings and compare our approach to other recent radiance field-based methods. Furthermore, we render CT projections of a medical instance given a single-view X-ray and show the effectiveness of our model to cover surface and internal structures.
26
+
27
+ # II. METHODS
28
+
29
+ # A. Dataset Preparation
30
+
31
+ To train our models, we generate DRRs instead of collecting paired X-rays and corresponding CT reconstructions, which would expose patients to more radiation. Furthermore, DRR generation removes patient data and enables control in capture ranges and resolutions. We generated DRRs by using 20 CT chest scans from [15], [16] and five CT knee scans from [17], [18]. These scans cover a diverse group of patients at different contrast types showing both normal and abnormal anatomy. The radiation source and imaging panel are assumed to rotate around the vertical-axis, generating a DRR of $128 \times 128$ resolution at every five degrees, resulting in 72 DRRs for each object. During training we use the whole set of 72 DRRs (a fifth of all views within a full 360-degree vertical rotation) per patient and let the model render the rest. Our work did not involve experimental procedures on human subjects or animals and thus did not require Institutional Review Board approval.
32
+
33
+ # B. GRAF Overview
34
+
35
+ GRAF [14] is a model that builds from NeRF and defines it within an Generative Adversarial Network (GAN). It consists of a generator $G_{\theta}$ that predicts an image patch $P_{\mathrm{pred}}$ and a discriminator $D_{\phi}$ that compares the predicted patch to a patch $P_{\mathrm{real}}$ extracted from a real image. GRAF has shown an effective capacity to disentangle 3D shape and viewpoint of objects from 2D images alone, in contrast to the original NeRF [12] and similar approaches such as [19]. Therefore, we aim to translate GRAF's methods to our task, and in subsection II-C we describe our new discriminator architecture, which allows us to disentangle 3D properties from DRRs.
36
+
37
+ We consider the experimental setting to obtain the radiation attenuation response instead of the color used in natural images. To obtain the attenuation response at a pixel location for an arbitrary projection $\pmb{K}$ with pose $\pmb{\xi}$ , first, we consider a pattern $\pmb{\nu} = (\pmb{u}, s)$ to sample $R$ X-ray beams within a $K \times K$ image-patch $\pmb{P}$ . Then, we sample $N$ 3D points $\pmb{x}_r^i$ along the X-ray beam $r$ originating from the pixel location and ordered between the near and far planes of the projection (Fig. 1a).
38
+
39
+ The object representation is encoded in a multi-layer perceptron (MLP) that takes as input a 3D position $\pmb{x} = (x, y, z)$ and a viewing direction $\pmb{d} = (\theta, \phi)$ , and produces
40
+
41
+ ![](images/a1a71685023e7c68424f07b1784e12fd42527cc9ae3e108f61665d9b36a529bf.jpg)
42
+ Fig. 1. An overview of GRAF's generator.
43
+
44
+ as output a density scalar $\sigma$ and a pixel value $c$ . To learn high-frequency features, the input is mapped into a $2L$ -dimensional representation (Fig. 1b):
45
+
46
+ $$
47
+ \gamma (p) = \dots , \cos \left(2 ^ {j} \pi p\right), \sin \left(2 ^ {j} \pi p\right), \dots \tag {1}
48
+ $$
49
+
50
+ where $p$ represents the 3D position or viewing direction, for $j = 0,\dots,m - 1$ .
51
+
52
+ For modeling the shape and appearance of anatomical structures, let $z_{s} \sim p_{s}$ and $z_{a} \sim p_{a}$ be the latent codes sampled from a standard Gaussian distribution, respectively (Fig. 1c). To obtain the density prediction $\sigma$ , the shape encoding $q$ is transformed to volume density through a density head $\sigma_{\theta}$ . Then, the network $g_{\theta}(\cdot)$ operates on a shape encoding $q = (\gamma(x), z_{s})$ that is later concatenated with the positional encoding of $d$ and appearance code $z_{a}$ (Fig. 1c):
53
+
54
+ $$
55
+ (\gamma (\boldsymbol {x}), \boldsymbol {z} _ {s}) \mapsto \boldsymbol {q} \tag {2}
56
+ $$
57
+
58
+ $$
59
+ \left(\boldsymbol {q} \left(\boldsymbol {x}, \boldsymbol {z} _ {s}\right), \gamma (\boldsymbol {d}), \boldsymbol {z} _ {a}\right) \mapsto c \tag {3}
60
+ $$
61
+
62
+ $$
63
+ \boldsymbol {q} \left(\boldsymbol {x}, \boldsymbol {z} _ {s}\right) \mapsto \sigma \tag {4}
64
+ $$
65
+
66
+ The final pixel response $c_{r}$ is computed by the compositing operation (Fig. 1c):
67
+
68
+ $$
69
+ c _ {r} = \sum_ {i = 1} ^ {N} c _ {r} ^ {i} \alpha_ {r} ^ {i} \exp \left(- \sum_ {j = 1} ^ {i - 1} \sigma_ {r} ^ {j} \delta_ {r} ^ {j}\right) \tag {5}
70
+ $$
71
+
72
+ where $\alpha_r^i = 1 - \exp \left(-\sigma_r^i\delta_r^i\right)$ is the alpha compositing value of sampled point $i$ and $\delta_r^i = \| \pmb{x}_r^{i + 1} - \pmb{x}_r^i\| _2$ is the distance between the adjacent sampled points.
73
+
74
+ In this way, both the density and pixel values are computed at each sampled point along the beam $r$ with network $g_{\theta}$ . Finally, combining the results of all $R$ beams, the generator $G_{\theta}$ predicts an image patch $P_{\mathrm{pred}}$ , as illustrated in Fig. 1d.
75
+
76
+ # C. MedNeRF
77
+
78
+ We investigate how we can adapt GRAF to the medical domain and apply it to render a volumetric representation from DRRs. Leveraging a large dataset, GRAF's discriminator $D_{\phi}$ is able to continuously provide useful signals to train the generator $G_{\theta}$ . However, medical datasets like those considered in our problem are generally small, which causes two sequential issues:
79
+
80
+ The lack of real information to the generator: In GRAF (and in GAN in general), the only source of features of
81
+
82
+ the training data contributing to the generator is the indirect gradient transferred from the discriminator. We find that the single convolutional feedback from GRAF's discriminator poorly conveys refined features from DRRs resulting in inaccurate volumetric estimation.
83
+
84
+ Brittle adversarial training: With a limited training dataset, the generator or discriminator may fall into ill-posed settings such as mode collapse, which would lead to generating a limited number of instances and consequently, a suboptimal data distribution estimation. While some works have applied data augmentation techniques to leverage more data in the medical domain, some transformations could mislead the generator to learn the infrequent or even non-existent augmented data distribution [20]. We find that naively applying classic data augmentation works less favorably than our adopted framework.
85
+
86
+ 1) Self-supervised Learning for High-Fidelity Synthesis: To allow richer feature-maps covering from the DRRs such that it produces more comprehensive signals to train $G_{\theta}$ , we replace GRAF's discriminator architecture with recent advancements in self-supervised approaches. We allow $D_{\phi}$ to learn useful global and local features training it on a pretext task, in particular, the self-supervision method based on auto-encoding [21]. Different from [21], we only use two decoders for the feature-maps on scales: $f_{1}$ on $32^{2}$ and $f_{2}$ on $8^{2}$ (Fig. 2a). We find that this choice allows better performance and enables a correct volumetric depth estimation. $D_{\phi}$ must therefore not only discriminate $P_{\mathrm{pred}}$ predicted from $G_{\theta}$ but also extract comprehensive features from real image patches $P_{\mathrm{real}}$ that enable the decoders to resemble the data distribution.
87
+
88
+ To assess global structure in decoded patches from $D_{\phi}$ , we use the Learned Perceptual Image Patch Similarity (LPIPS) metric [22]. We compute the weighted pairwise image distance between two VGG16 feature spaces, where the pretrained weights are fit to better match human perceptual judgments. The additional discriminator loss is therefore:
89
+
90
+ $$
91
+ \mathcal {L} _ {\mathrm {r}} = \mathbb {E} _ {\boldsymbol {f} \sim D (\boldsymbol {p}), \boldsymbol {p} \sim P} \left[ \frac {1}{w h d} \| \phi_ {i} (\mathcal {G} (\boldsymbol {f})) - \phi_ {i} (\mathcal {T} (\boldsymbol {p})) \| _ {2} \right] \tag {6}
92
+ $$
93
+
94
+ where $\phi_i(\cdot)$ denotes the $i$ th layer output of a pretrained VGG16 network, and $w$ , $h$ , and $d$ stand for the width, height and depth of a feature space, respectively. Let $\mathcal{G}$ be the processing on the intermediate feature-maps $\pmb{f}$ from $D_{\phi}$ , and $\mathcal{T}$ the processing on real image patches. When coupled with this additional reconstruction loss, the network learns representations that transfer across tasks.
95
+
96
+ # 2) Improving Learning via Data Augmentation:
97
+
98
+ We improve learning of $G_{\theta}$ and $D_{\phi}$ by adopting the Data Augmentation Optimized for GAN (DAG) framework [20] in which a data augmentation transformation $\mathcal{T}_k$ (Fig. 2b) is applied using multiple discriminator heads $\{D_k\}$ . To further reduce memory usage, we share all layers of $D_{\phi}$ except the last layers corresponding to each head (Fig.
99
+
100
+ ![](images/f10282d655e5831e9fbb817a741ca909fbe4c13c66eb839fc02d059db8787566.jpg)
101
+ Fig. 2. An overview of our discriminator with self-supervised learning and DAG.
102
+
103
+ 2c). Because applying differentiable and invertible data augmentation transformations $\mathcal{T}_k$ has the Jenssen-Shannon (JS) preserving property [20]:
104
+
105
+ $$
106
+ \mathrm {J S} \left(p _ {d} ^ {\mathcal {T} _ {k}} \| p _ {g} ^ {\mathcal {T} _ {k}}\right) = \mathrm {J S} \left(p _ {d} \| p _ {g}\right) \tag {7}
107
+ $$
108
+
109
+ where $p_d^{\mathcal{T}_k}$ is the transformed training data distribution and $p_g^{\mathcal{T}_k}$ the transformed distribution captured by $G_{\theta}$ . By using a total of four transformations combining flipping and rotation, we encourage optimization to the original data distribution, which also brings the most performance boost. These choices allow our model to benefit from not only $\mathrm{JS}(p_d \parallel p_g)$ but also $\mathrm{JS}(p_d^{\mathcal{T}_k} \parallel p_g^{\mathcal{T}_k})$ , thereby improving the learning of $G_{\theta}$ and generalization of $D_{\phi}$ . Furthermore, using multiple discriminators with weight-sharing provides learning regularization of $D_{\phi}$ .
110
+
111
+ Replacing GRAF's logistic objective with a hinge loss, we then define our overall loss as below:
112
+
113
+ $$
114
+ \mathcal {L} (\theta , \left\{\phi_ {k} \right\}) = \mathcal {L} (\theta , \phi_ {0}) + \frac {\lambda}{n - 1} \sum_ {k = 1} ^ {n} \mathcal {L} (\theta , \phi_ {k}) \tag {8}
115
+ $$
116
+
117
+ $$
118
+ \begin{array}{l} \mathcal {L} (\theta , \phi_ {k}) = \\ \mathbb {E} _ {\boldsymbol {z} _ {s} \sim p _ {s}, \boldsymbol {z} _ {a} \sim p _ {a}, \boldsymbol {\xi} \sim p _ {\xi}, \boldsymbol {\nu} \sim p _ {\nu}} [ f (D _ {\phi} (G _ {\theta} (\boldsymbol {z} _ {s}, \boldsymbol {z} _ {a}, \boldsymbol {\xi}, \boldsymbol {\nu}))) ] \tag {9} \\ + \mathbb {E} _ {\boldsymbol {I} \sim p _ {D}, \boldsymbol {\nu} \sim p _ {\nu}} [ f (- D _ {\phi} (\boldsymbol {I}, \boldsymbol {\nu})) ] + \mathcal {L} _ {\mathrm {r}} \\ \end{array}
119
+ $$
120
+
121
+ where $f(u) = \max (0,1 + u)$ . We optimize this loss with $n = 4$ , where $k = 0$ corresponds to the identity transformation and $\lambda = 0.2$ (as in [20]).
122
+
123
+ # 3) Volumetric Rendering from a Single View $X$ -ray:
124
+
125
+ After training a model, we reconstruct the complete X-ray projections within a full vertical rotation of a medical instance given a single view X-ray. We follow the relaxed reconstruction formulation in [23], which fits the generator to a single image. Then, we allow the parameters of the generator $G_{\theta}$ to be slightly fine-tuned along with the shape and appearance latent vectors $\mathbf{z}_s$ and $\mathbf{z}_a$ . The distortion and perception tradeoff is well known in GAN methods [24] and therefore we modify our generation objective by adding the distortion Mean Square Error (MSE) loss, which incentivises
126
+
127
+ ![](images/8b5cb166172786a1654207b8a8cd716372a53cb7b3c2205dcd4806d14edbb52f.jpg)
128
+ Fig. 3. Knee renderings from continuous viewpoint rotations showing tissue and bone. Given a single-view X-ray from a CT, we can generate the complete set of CT-projections within a full vertical rotation by slightly fine-tuning a pretrained model along with the shape and appearance latent codes.
129
+
130
+ TABLE I. Quantitative results based on PSNR and SSIM of rendered X-ray projections with single-view X-ray input.
131
+
132
+ <table><tr><td>Dataset</td><td>↑ PSNR (dB) (μ ± σ)</td><td>↑ SSIM (μ ± σ)</td></tr><tr><td>Knee</td><td>30.17 ± 1.93</td><td>0.670 ± 0.040</td></tr><tr><td>Chest</td><td>28.54 ± 0.79</td><td>0.462 ± 0.082</td></tr></table>
133
+
134
+ a balance between blurriness and accuracy:
135
+
136
+ $$
137
+ \mathcal {L} _ {\text {g e n}} = \lambda_ {1} \mathcal {L} _ {\mathrm {r}} (V G G 1 6) + \lambda_ {2} \mathcal {L} _ {\mathrm {M S E}} (G) + \lambda_ {3} \mathcal {L} _ {\mathrm {N L L L}} \left(z _ {s}, z _ {a}\right) \tag {10}
138
+ $$
139
+
140
+ where NLLL corresponds to the negative log-likelihood loss and the tuned hyperparameters $lr = 0.0005$ , $\beta_{1} = 0$ , $\beta_{2} = 0.999$ , $\lambda_{1} = 0.3$ , $\lambda_{2} = 0.1$ and $\lambda_{3} = 0.3$ .
141
+
142
+ Once the model locates an optimal combination of $\mathbf{z}_s$ and $\mathbf{z}_a$ , we replicate them and use them to render the rest of the X-ray projections by continuously controlling the angle viewpoint.
143
+
144
+ # III. RESULTS
145
+
146
+ Here we provide an evaluation of MedNeRF on our datasets. We compare our model's results to the ground truth, two baselines, perform an ablation study, and show qualitative and quantitative evaluations. We train all models for 100,000 iterations with a batch size of 8. Projection parameters $(u,v)$ are chosen to evenly sample points on the surface of a sphere, specifically a slight horizontal elevation of 70-85 degrees and $u_{\mathrm{min}} = 0$ , $u_{\mathrm{max}} = 1$ for a full 360-degree vertical rotation. However, we only provide a fifth of the views (72-views each at five degrees) during training and let the model render the rest.
147
+
148
+ # A. Reconstruction from Single View X-ray
149
+
150
+ We evaluate our model's representation for 3D-aware DRR synthesis given a single-view X-ray as input. We find that despite the implicit linear network's limited capacity, our model can disentangle 3D anatomy identity and attenuation response of different medical instances, which are retrieved through the described reconstruction reformulation in II-C.3.
151
+
152
+ ![](images/55ddd5b41f7a73cb740f008bc96f1ade6fc9bdae224187130a82cd7e2ea564d3.jpg)
153
+ Fig. 4. Volumetric maps and attenuation renderings on our dataset.
154
+
155
+ Our model can also facilitate distinguishing bone from tissue via a contrast transformation, as it renders a brighter pixel value for denser structures (e.g. bone) (Fig. 3).
156
+
157
+ Table I summarises our results based on the peak signal-to-noise ratio (PSNR) and structural similarity (SSIM), which measure the quality of reconstructed signals and human subjective similarity, respectively. We find that our generative loss can achieve a reasonable perception-distortion curve in renderings and show consistency with the location and volumetric depth of anatomical structures at continuous viewpoints compared to the ground truth.
158
+
159
+ # B. 2D DRR Rendering
160
+
161
+ We evaluate our model on the task of 2D rendering and compare it to pixelNeRF [19], and GRAF [14] baseline, wherein the original architecture is used. Our model can more accurately estimate volumetric depth compared to
162
+
163
+ TABLE II. FID and KID analysis comparing other methods.
164
+
165
+ <table><tr><td rowspan="2">Method</td><td colspan="2">Chest dataset</td><td colspan="2">Knee dataset</td></tr><tr><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td></tr><tr><td>GRAF [14]</td><td>68.25 ± 0.954</td><td>0.053 ± 0.0008</td><td>76.70 ± 0.302</td><td>0.058 ± 0.0001</td></tr><tr><td>pixelNeRF [19]</td><td>112.96 ± 2.356</td><td>0.084 ± 0.0012</td><td>166.40 ± 2.153</td><td>0.158 ± 0.0010</td></tr><tr><td>Ours</td><td>60.26 ± 0.322</td><td>0.041 ± 0.0005</td><td>76.12 ± 0.193</td><td>0.052 ± 0.0004</td></tr></table>
166
+
167
+ TABLE III. FID and KID analysis of ablations of our model.
168
+
169
+ <table><tr><td rowspan="2">Ablation</td><td colspan="2">Chest dataset</td></tr><tr><td>↓ FID (μ ± σ)</td><td>↓ KID (μ ± σ)</td></tr><tr><td>MedNeRF - 3 SD, logistic loss, classic DA</td><td>84.85 ± 1.025</td><td>0.069 ± 0.0031</td></tr><tr><td>MedNeRF - 2 SD, logistic loss, classic DA</td><td>67.73 ± 0.712</td><td>0.051 ± 0.0006</td></tr><tr><td>MedNeRF - 2 SD, hinge loss, classic DA</td><td>65.34 ± 0.353</td><td>0.045 ± 0.0004</td></tr><tr><td>MedNeRF - 2 SD, hinge loss, DAG</td><td>60.26 ± 0.322</td><td>0.041 ± 0.0005</td></tr></table>
170
+
171
+ GRAF and pixelNeRF (Fig. 4). For each category, we find an unseen target instance with a similar view direction and shape. Volumetric depth estimation is given by bright colors (far) and dark colors (near). Lacking a perceptual loss, GRAF is not incentivized to produce high-frequency textures. In contrast, we find our model renders a more detailed internal structure with varied attenuation. GRAF produces a consistent attenuation response, but seems to be unable to distinguish the anatomical shape from the background. Our self-supervised discriminator enables the generator to disentangle shape and background by rendering a brighter color for the background and a darker color for the shape, while GRAF renders a bright or dark color for both.
172
+
173
+ We find pixelNeRF produces blurred attenuation renderings for all datasets, and volumetric maps tend to exhibit strong color shifts (Fig. 4). We believe these artifacts are due to the see-through nature of the dataset, compared to solid-like natural objects on which NeRFs are trained. This data characteristic impairs not only volumetric maps but also fine anatomical structures. In contrast, our model is better able to render both volumetric depth and attenuation response. We also find pixelNeRF is sensitive to slight changes in projection parameters, hampering optimization for the knee category. Our model produces a consistent 3D geometry and does not rely on explicit projection matrices.
174
+
175
+ Table II compares image quality based on Frechet Inception Distance (FID) and Kernel Inception Distance (KID) metrics, in which lower values mean better. Optimizing pixelNeRF on our datasets leads to particularly poor results that are unable to compete with the GRAF baseline and our model. In contrast, our model outperforms the baselines on FID and KID metrics for all datasets.
176
+
177
+ # C. Ablation Study
178
+
179
+ We evaluate our model with three ablations (Table III): wherein an additional simple decoder (SD) is included; the adversarial logistic loss is replaced by its hinge version; and wherein the non-classical DAG approach is adopted. We find that the DAG approach brings the most performance boost compared to naively applying classical DA, while the use of a hinge loss performs slightly better than its logistic version. However, an additional decoder in our self-supervised discriminator can lead to a significant drop in
180
+
181
+ performance.
182
+
183
+ # IV. CONCLUSION
184
+
185
+ We have presented a novel Deep Learning architecture based on Neural Radiance Fields for learning a continuous representation of CT scans. We learn a medical category encoding of the attenuation response of a set of 2D DRRs in the weights of a generator. Furthermore, we have found that a stronger and more comprehensive signal from our discriminator allows generative radiance fields to model 3D-aware CT-projections. Experimental evaluation demonstrates significant qualitative and quantitative reconstructions and improvements over other Neural Radiance Field approaches. Whilst the proposed model may not replace CT entirely, the functionality of generating 3D-aware CT-projections from X-rays has great potential for clinical use in osseous trauma, skeletal evaluation in dysplasia and for orthopaedic presurgical planning. This could cut down on the radiation dose given to patients, with significant economic implications such as bringing down the cost of investigations.
186
+
187
+ # ACKNOWLEDGMENT
188
+
189
+ This work is partially supported by the Mexican Council of Science and Technology (CONACyT).
190
+
191
+ # REFERENCES
192
+
193
+ [1] Paul Suetens, Visualization for diagnosis and therapy, p. 190-218, Cambridge University Press, 2 edition, 2009.
194
+ [2] Pechin Lo, Bram van Ginneken, Joseph M. Reinhardt, Tarunashree Yavarna, Pim A. de Jong, Benjamin Irving, Catalin Fetita, Margarete Ortner, Rómulo Pinho, Jan Sijbers, Marco Feuerstein, Anna Fabijanska, Christian Bauer, Reinhard Beichel, Carlos S. Mendoza, Rafael Wiemker, Jaesung Lee, Anthony P. Reeves, Silvia Born, Oliver Weinheimer, Eva M. van Rikxooft, Juerg Tschirren, Ken Mori, Benjamin Odry, David P. Naidich, Ieneke Hartmann, Eric A. Hoffman, Mathias Prokop, Jesper H. Pedersen, and Marleen de Bruijne, "Extraction of airways from ct (exact'09)," IEEE Transactions on Medical Imaging, vol. 31, no. 11, pp. 2093-2107, 2012.
195
+ [3] Mary Coffey and Aude Vaandering, "Patient setup for pet/ct acquisition in radiotherapy planning," Radiotherapy and Oncology, vol. 96, no. 3, pp. 298-301, 2010, PET in Radiotherapy Planning.
196
+ [4] Tri Huynh, Yaozong Gao, Jiayin Kang, Li Wang, Pei Zhang, Jun Lian, and Dinggang Shen, "Estimating ct image from mri data using structured random forest and auto-context model," IEEE Transactions on Medical Imaging, vol. 35, no. 1, pp. 174-183, 2016.
197
+ [5] Shoulie Xie, Weimin Huang, Tao Yang, Dajun Wu, and Huiying Liu, "Compressed sensing based image reconstruction with projection recovery for limited angle cone-beam ct imaging," in 2020 42nd Annual International Conference of the IEEE Engineering in Medicine Biology Society (EMBC), 2020, pp. 1307-1310.
198
+ [6] Ge Wang, Jong Chu Ye, Klaus Mueller, and Jeffrey A. Fessler, "Image reconstruction is a new frontier of machine learning," IEEE Transactions on Medical Imaging, vol. 37, no. 6, pp. 1289-1296, 2018.
199
+ [7] Yinsheng Li, Ke Li, Chengzhu Zhang, Juan Montoya, and Guang-Hong Chen, "Learning to reconstruct computed tomography images directly from sinogram data under a variety of data acquisition conditions," IEEE Transactions on Medical Imaging, vol. 38, no. 10, pp. 2469-2481, 2019.
200
+ [8] David B. Lindell, Julien N. P. Martel, and Gordon Wetzstein, "Autoint: Automatic integration for fast neural volume rendering," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2021, pp. 14556-14565.
201
+ [9] Yu Sun, Jiaming Liu, Mingyang Xie, Brendt Wohlberg, and Ulugbek S. Kamilov, "Coil: Coordinate-based internal learning for tomographic imaging," IEEE Transactions on Computational Imaging, vol. 7, pp. 1400-1412, 2021.
202
+
203
+ [10] Xingde Ying, Heng Guo, Kai Ma, Jian Wu, Zhengxin Weng, and Yefeng Zheng, “X2ct-gan: Reconstructing ct from biplanar x-rays with generative adversarial networks,” in 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 10611-10620.
204
+ [11] Bao Nguyen, Adam Feldman, Sarath Bethapudi, Andrew Jennings, and Chris G. Willcocks, "Unsupervised region-based anomaly detection in brain mri with adversarial image inpainting," in 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), 2021, pp. 1127-1131.
205
+ [12] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng, “Nerf: Representing scenes as neural radiance fields for view synthesis,” in Computer Vision – ECCV 2020, Andreae Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, Eds., Cham, 2020, pp. 405–421, Springer International Publishing.
206
+ [13] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth, "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections," in CVPR, 2021.
207
+ [14] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger, "Graf: Generative radiance fields for 3d-aware image synthesis," in Advances in Neural Information Processing Systems (NeurIPS), 2020.
208
+ [15] E B Tsai, S Lungren, M P Hershman, M Roshkovan, L Colak, E Erickson, B J Shih, G Stein, A Kalpathy-Cramer, J Shen, J Hafez, M A F John, S Rajiah, P Pogatchnik, B P Mongan, J T Altinmakas, E Ranschaert, E R Kitamura, and C C. Wu, “Medical imaging data resource center (midrc) - rsna international pandemic open research database (ricord) release 1b - chest ct pandemic-[dataset],” The Cancer Imaging Archive, 2021.
209
+ [16] K Clark, B Vendt, K Smith, J Freymann, J Kirby, P Koppel, S Moore, S Phillips, D Maffitt, M Pringle, L Tarbox, and F Prior, "The cancer imaging archive (tcia): Maintaining and operating a public information repository," Journal of Digital Imaging, vol. 26, no. 6, pp. 1045-1057, December 2013.
210
+ [17] Michael D Harris, Adam J Cyr, Azhar A Ali, Clare K Fitzpatrick, Paul J Rullkoetter, Lorin P Maletsky, and Kevin B Shelburne, “A
211
+
212
+ combined experimental and computational approach to subject-specific analysis of knee joint laxity," Journal of biomechanical engineering, vol. 138, no. 8, August 2016.
213
+ [18] Azhar A Ali, Sami S Shalhoub, Adam J Cyr, Clare K Fitzpatrick, Lorin P Maletsky, Paul J Rullkoetter, and Kevin B Shelburne, "Validation of predicted patellofemoral mechanics in a finite element model of the healthy and cruciate-deficient knee," Journal of biomechanics, vol. 49, no. 2, pp. 302-309, January 2016.
214
+ [19] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa, “pixelNeRF: Neural radiance fields from one or few images,” in CVPR, 2021.
215
+ [20] Ngoc-Trung Tran, Viet-Hung Tran, Ngoc-Bao Nguyen, Trung-Kien Nguyen, and Ngai-Man Cheung, “On data augmentation for gan training,” IEEE Transactions on Image Processing, vol. 30, pp. 1882–1897, 2021.
216
+ [21] Bingchen Liu, Yizhe Zhu, Kunpeng Song, and Ahmed Elgammal, "Towards faster and stabilized $\{\mathrm{gan}\}$ training for high-fidelity few-shot image synthesis," in International Conference on Learning Representations, 2021.
217
+ [22] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang, "The unreasonable effectiveness of deep features as a perceptual metric," in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018.
218
+ [23] Xingang Pan, Xiaohang Zhan, Bo Dai, Dahua Lin, Chen Change Loy, and Ping Luo, "Exploiting deep generative prior for versatile image restoration and manipulation," IEEE Transactions on Pattern Analysis and Machine Intelligence, pp. 1-1, 2021.
219
+ [24] Yochai Blau and Tomer Michaeli, "The perception-distortion tradeoff," in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018.
2202.01xxx/2202.01020/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ec6a1e53854670b9891c20b43a29d5e675912e19b72cf3f411f0aea3f29bdf6
3
+ size 286780
2202.01xxx/2202.01020/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01032/e1331bf0-3f9e-4d13-8efb-86967bf2089b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01032/e1331bf0-3f9e-4d13-8efb-86967bf2089b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01032/e1331bf0-3f9e-4d13-8efb-86967bf2089b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2681236f8cfd1bc636249f9ad1778deda3a2e8cfa0429ccd7f6fa2dc7422c075
3
+ size 2450787
2202.01xxx/2202.01032/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01032/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:350e770608771d438a05a2426874b1be1396ccb037a6c27212cc09d4a4e30e4a
3
+ size 1090960
2202.01xxx/2202.01032/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01034/902ca864-5cf2-40eb-b887-ccda751ec15b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01034/902ca864-5cf2-40eb-b887-ccda751ec15b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01034/902ca864-5cf2-40eb-b887-ccda751ec15b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cfedb66eec0d632bdbfe4a51115a43bfce540244f0358c3f9ee558d60cb50ea
3
+ size 11732005
2202.01xxx/2202.01034/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01034/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e2b440a36bd19366cd7576ba69341e1bc7d800a42c877cdf96cad22d9bc5e3c
3
+ size 869492
2202.01xxx/2202.01034/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01054/fec1fdb5-19ad-4e97-9913-360e71566230_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01054/fec1fdb5-19ad-4e97-9913-360e71566230_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01054/fec1fdb5-19ad-4e97-9913-360e71566230_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d73768ded2f1f8b5503f952ff31088cd63d83b7992f715ac12208cdff36e5f4e
3
+ size 605305
2202.01xxx/2202.01054/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01054/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54c7487676c64784434b27821ab8c8d22969f431c4060a1d95bcbcada4bb415d
3
+ size 1758393
2202.01xxx/2202.01054/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01094/df2346ee-d8b7-4b8c-9abf-5431f77d269b_content_list.json ADDED
@@ -0,0 +1,1123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "RESCOREBERT: DISCRIMINATIVE SPEECH RECOGNITION RESCORSING WITH BERT",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 109,
8
+ 114,
9
+ 890,
10
+ 133
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Liyan Xu $^{1,2}$ Yile Gu $^{1}$ Jari Kolehmainen $^{1}$ Haidar Khan $^{1}$ Ankur Gandhi $^{1}$ Ariya Rastrow $^{1}$ Andreas Stolcke $^{1}$ Ivan Bulyko $^{1}$",
17
+ "bbox": [
18
+ 140,
19
+ 150,
20
+ 854,
21
+ 188
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ Amazon Alexa AI, USA $^{2}$ Emory University, USA",
28
+ "bbox": [
29
+ 287,
30
+ 202,
31
+ 712,
32
+ 220
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "ABSTRACT",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 243,
42
+ 253,
43
+ 328,
44
+ 266
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Second-pass rescoring is an important component in automatic speech recognition (ASR) systems that is used to improve the outputs from a first-pass decoder by implementing a lattice rescoring or $n$ -best re-ranking. While pretraining with a masked language model (MLM) objective has received great success in various natural language understanding (NLU) tasks, it has not gained traction as a rescoring model for ASR. Specifically, training a bidirectional model like BERT on a discriminative objective such as minimumWER (MWER) has not been explored. Here we show how to train a BERT-based rescoring model with MWER loss, to incorporate the improvements of a discriminative loss into fine-tuning of deep bidirectional pretrained models for ASR. Specifically, we propose a fusion strategy that incorporates the MLM into the discriminative training process to effectively distill knowledge from a pretrained model. We further propose an alternative discriminative loss. This approach, which we call RescoreBERT, reduces WER by $6.6\\% /3.4\\%$ relative on the LibriSpeech clean/other test sets over a BERT baseline without discriminative objective. We also evaluate our method on an internal dataset from a conversational agent and find that it reduces both latency and WER (by 3 to $8\\%$ relative) over an LSTM rescoring model.",
51
+ "bbox": [
52
+ 86,
53
+ 277,
54
+ 488,
55
+ 554
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Index Terms— masked language model, BERT, second-pass rescoring, pretrained model, minimum WER training",
62
+ "bbox": [
63
+ 83,
64
+ 559,
65
+ 488,
66
+ 587
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "1. INTRODUCTION",
73
+ "text_level": 1,
74
+ "bbox": [
75
+ 215,
76
+ 609,
77
+ 357,
78
+ 621
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "The two-pass paradigm has been widely adopted in state-of-the-art ASR systems [1, 2, 3, 4, 5], where the first pass generates n-best hypotheses, and the second pass reranks them. For the second-pass rescoring models, discriminative training with MWER (minimum WER) objective is typically applied [3, 6, 7, 4] to improve performance, such that the model learns to prefer hypotheses with the lowest WER.",
85
+ "bbox": [
86
+ 81,
87
+ 635,
88
+ 488,
89
+ 727
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Previous work with discriminative training uses causal language models (CLMs), such as LSTMs or Transformer LMs. While pretrained masked language models (MLMs) such as BERT [8] have been highly successful on various natural language understanding (NLU) tasks, they have not been widely applied in second-pass ASR rescoring. Meanwhile, recent studies have shown promising results using BERT in several rescoring studies [9, 10, 11], as BERT is pretrained with large corpora and encodes the full hypothesis context using a deep bidirectional model architecture. In particular, previous work [9] shows that deep bidirectional Transformers, such as BERT, can outperform their unidirectional counterparts (either forward text, backward text, or the two models combined). Another paper [10]",
96
+ "bbox": [
97
+ 81,
98
+ 729,
99
+ 488,
100
+ 887
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "image",
106
+ "img_path": "images/62b155f0969b45c61b6cb5566be073bd276cd787532528c02448108d29452881.jpg",
107
+ "image_caption": [
108
+ "Fig. 1: Illustration of discriminative scoring with BERT on n-best hypotheses. Each hypothesis is individually encoded by BERT and represented by CLS; it is then followed by a feed-forward NN to compute a sentence-level second-pass LM score. The scores are then interpolated with first pass scores for reranking."
109
+ ],
110
+ "image_footnote": [],
111
+ "bbox": [
112
+ 550,
113
+ 250,
114
+ 877,
115
+ 397
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "text",
121
+ "text": "shows that a pretrained BERT model that is then fine-tuned on LibriSpeech data can outperform BERT trained from scratch on LibriSpeech clean/other test sets by $4.4\\% / 3.2\\%$ WER relative, demonstrating the effectiveness of pretraining in BERT. They also show that BERT can outperform GPT [12] with comparable model size and pretraining data, which the authors argue is due to the bidirectional nature of BERT.",
122
+ "bbox": [
123
+ 506,
124
+ 491,
125
+ 913,
126
+ 582
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "In this work, we propose a method to train BERT-style rescoreing models with a discriminative objective, to leverage the aforementioned benefits from both approaches. Typically, pseudo log-likelihood (PLL) [9, 10]—the sum of the negative log-likelihoods of each individual token given the bidirectional context—is used to rescore n-best output to improve WER, a computationally expensive process, particularly for longer sentences. For discriminative training, this issue is exacerbated as the PLL computation needs to be repeated for each hypothesis individually. The previous work [10] solves this issue by distilling the PLL into a single score prediction at the start-of-sentence token (CLS). In this work, illustrated in Figure 1, we extend this approach and use the score from the CLS representation to perform discriminative training, as discussed in Section 2.2.2, with either MWER loss or a novel discriminative training loss dubbed matching word error distribution (MWED), described in Section 2.2.1. Finally, in Section 2.2.3 we propose a fusion strategy that incorporates the MLM into the discriminative training process, giving further improvements.",
133
+ "bbox": [
134
+ 506,
135
+ 584,
136
+ 915,
137
+ 820
138
+ ],
139
+ "page_idx": 0
140
+ },
141
+ {
142
+ "type": "text",
143
+ "text": "We name the aforementioned approach RescoreBERT, and evaluate it on four datasets covering multiple domains and locales. Results show that discriminative training significantly improves upon non-discriminative BERT rescoring, on all test sets. The new MWED training loss is found to be a strong alternative to MWER. The results also show that the fusion approach for incorporating MLM into discriminative training can further improve WER. Lastly,",
144
+ "bbox": [
145
+ 506,
146
+ 820,
147
+ 913,
148
+ 912
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "aside_text",
154
+ "text": "arXiv:2202.01094v3 [eess.AS] 18 Feb 2022",
155
+ "bbox": [
156
+ 22,
157
+ 257,
158
+ 57,
159
+ 720
160
+ ],
161
+ "page_idx": 0
162
+ },
163
+ {
164
+ "type": "page_footnote",
165
+ "text": "2Work done as an applied scientist intern at Amazon Alexa.",
166
+ "bbox": [
167
+ 107,
168
+ 898,
169
+ 423,
170
+ 912
171
+ ],
172
+ "page_idx": 0
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "to achieve lower latency for streaming applications, we develop a method to further distill the model while maintaining WER improvements.",
177
+ "bbox": [
178
+ 83,
179
+ 92,
180
+ 488,
181
+ 132
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "text",
187
+ "text": "2. APPROACH",
188
+ "text_level": 1,
189
+ "bbox": [
190
+ 233,
191
+ 150,
192
+ 339,
193
+ 162
194
+ ],
195
+ "page_idx": 1
196
+ },
197
+ {
198
+ "type": "text",
199
+ "text": "2.1. BERT Without Discriminative Training",
200
+ "text_level": 1,
201
+ "bbox": [
202
+ 84,
203
+ 175,
204
+ 369,
205
+ 189
206
+ ],
207
+ "page_idx": 1
208
+ },
209
+ {
210
+ "type": "text",
211
+ "text": "In this section, we review previous work on BERT rescoring models that are not trained with discriminative objective.",
212
+ "bbox": [
213
+ 83,
214
+ 196,
215
+ 488,
216
+ 224
217
+ ],
218
+ "page_idx": 1
219
+ },
220
+ {
221
+ "type": "text",
222
+ "text": "2.1.1. Pseudo log-likelihood (PLL)",
223
+ "text_level": 1,
224
+ "bbox": [
225
+ 84,
226
+ 239,
227
+ 299,
228
+ 253
229
+ ],
230
+ "page_idx": 1
231
+ },
232
+ {
233
+ "type": "text",
234
+ "text": "Let $E = (e_1, \\dots, e_{|E|})$ be a sequence of tokens. Similar to the log-likelihood of a sequence as is commonly used in CLM scoring [13, 14, 15], the pseudo log-likelihood (PLL) of a sequence using an MLM, first introduced by [9], is defined as:",
235
+ "bbox": [
236
+ 83,
237
+ 261,
238
+ 488,
239
+ 315
240
+ ],
241
+ "page_idx": 1
242
+ },
243
+ {
244
+ "type": "equation",
245
+ "text": "\n$$\n\\operatorname {P L L} (E) = - \\sum_ {t = 1} ^ {| E |} \\log P \\left(e _ {t} \\mid E _ {\\backslash t}\\right) \\tag {1}\n$$\n",
246
+ "text_format": "latex",
247
+ "bbox": [
248
+ 184,
249
+ 325,
250
+ 486,
251
+ 363
252
+ ],
253
+ "page_idx": 1
254
+ },
255
+ {
256
+ "type": "text",
257
+ "text": "where $E_{\\backslash t} = (\\dots, e_{t-1}, [\\text{MASK}], e_{t+1}, \\dots)$ is the sequence whose corresponding position is replaced by the [MASK] token used in MLM pretraining. The PLL is thus the sum of the negative log-likelihoods of each token given the bidirectional context, with lower scores indicating more probable sequences.",
258
+ "bbox": [
259
+ 83,
260
+ 373,
261
+ 488,
262
+ 441
263
+ ],
264
+ "page_idx": 1
265
+ },
266
+ {
267
+ "type": "text",
268
+ "text": "2.1.2. MLM distillation (MD)",
269
+ "text_level": 1,
270
+ "bbox": [
271
+ 84,
272
+ 455,
273
+ 267,
274
+ 470
275
+ ],
276
+ "page_idx": 1
277
+ },
278
+ {
279
+ "type": "text",
280
+ "text": "Although the PLL demonstrates good performance for second-pass rescoring [9], it is not computationally efficient: $|E|$ additional sequences masking every position need to be generated and encoded by BERT, thus the computation due to PLL is on the order of $|E|$ times that of an autoregressive Transformer model of similar size. Following [10], one can \"distill\" the PLL calculation into a single utterance-level score using the CLS representation, such that the model is able to approximate PLL, while eliminating the need for masking $|E|$ times, as well as the large vocabulary softmax in $P(e_t|E_{\\backslash t})$ , thereby reducing computation significantly. As shown in the equations below, each sentence $E_i$ is individually encoded by BERT, represented by the hidden state of the CLS token in the last Transformers layer, denoted by $g_i$ . An additional layer is stacked on top of the CLS hidden states to produce the score $s_i^l$ for $E_i$ . The distillation is achieved by training the model to mimic PLL scores using mean squared error (MSE) regression loss:",
281
+ "bbox": [
282
+ 81,
283
+ 477,
284
+ 488,
285
+ 689
286
+ ],
287
+ "page_idx": 1
288
+ },
289
+ {
290
+ "type": "equation",
291
+ "text": "\n$$\ng _ {i} = \\operatorname {B E R T} ^ {\\mathrm {C L S}} \\left(E _ {i}\\right) \\tag {2}\n$$\n",
292
+ "text_format": "latex",
293
+ "bbox": [
294
+ 227,
295
+ 696,
296
+ 486,
297
+ 714
298
+ ],
299
+ "page_idx": 1
300
+ },
301
+ {
302
+ "type": "equation",
303
+ "text": "\n$$\ns _ {i} ^ {l} = \\operatorname {F F N N} (g _ {i}) \\tag {3}\n$$\n",
304
+ "text_format": "latex",
305
+ "bbox": [
306
+ 227,
307
+ 715,
308
+ 486,
309
+ 733
310
+ ],
311
+ "page_idx": 1
312
+ },
313
+ {
314
+ "type": "equation",
315
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {M D}} = \\left| s _ {i} ^ {l} - \\operatorname {P L L} \\left(E _ {i}\\right) \\right| ^ {2} \\tag {4}\n$$\n",
316
+ "text_format": "latex",
317
+ "bbox": [
318
+ 210,
319
+ 736,
320
+ 486,
321
+ 753
322
+ ],
323
+ "page_idx": 1
324
+ },
325
+ {
326
+ "type": "text",
327
+ "text": "FFNN denotes the learnable feed-forward neural network, $s_i^l$ is the predicted PLL approximation, and $\\mathrm{PLL}(E_i)$ is precomputed offline using Eq. (1). Note that the PLL can be computed by a larger teacher model.",
328
+ "bbox": [
329
+ 83,
330
+ 763,
331
+ 488,
332
+ 818
333
+ ],
334
+ "page_idx": 1
335
+ },
336
+ {
337
+ "type": "text",
338
+ "text": "2.2. BERT With Discriminative Training",
339
+ "text_level": 1,
340
+ "bbox": [
341
+ 84,
342
+ 835,
343
+ 349,
344
+ 849
345
+ ],
346
+ "page_idx": 1
347
+ },
348
+ {
349
+ "type": "text",
350
+ "text": "We now propose methods for training BERT with discriminative objective functions. For any utterance, let $\\vec{E} = \\{E_1,\\dots,E_n\\}$ be the n-best hypotheses obtained from beam search in the first-pass decoder. For any $E_{i}\\in \\vec{E}$ , let $s_i^a$ be its given score from the first pass, and",
351
+ "bbox": [
352
+ 83,
353
+ 856,
354
+ 488,
355
+ 914
356
+ ],
357
+ "page_idx": 1
358
+ },
359
+ {
360
+ "type": "text",
361
+ "text": "$s_i^t$ be the score from the second pass (same as Eq. (3)), with lower scores for more likely hypotheses for both; let $\\epsilon_{i}$ be its number of word errors (edit distance) from the ground truth transcription. Following the common theme of second pass rescoring approaches, the final score $s_i$ is the linear combination of the first-pass and second-pass scores:",
362
+ "bbox": [
363
+ 508,
364
+ 92,
365
+ 915,
366
+ 171
367
+ ],
368
+ "page_idx": 1
369
+ },
370
+ {
371
+ "type": "equation",
372
+ "text": "\n$$\ns _ {i} = s _ {i} ^ {a} + \\beta \\cdot s _ {i} ^ {l}, \\tag {5}\n$$\n",
373
+ "text_format": "latex",
374
+ "bbox": [
375
+ 651,
376
+ 176,
377
+ 913,
378
+ 193
379
+ ],
380
+ "page_idx": 1
381
+ },
382
+ {
383
+ "type": "text",
384
+ "text": "where $\\beta$ is the hyperparameter controlling the second-pass contribution. $s_i$ is then used to compute discriminative loss, as defined next.",
385
+ "bbox": [
386
+ 508,
387
+ 200,
388
+ 915,
389
+ 239
390
+ ],
391
+ "page_idx": 1
392
+ },
393
+ {
394
+ "type": "text",
395
+ "text": "2.2.1. Discriminative loss function",
396
+ "text_level": 1,
397
+ "bbox": [
398
+ 509,
399
+ 256,
400
+ 723,
401
+ 270
402
+ ],
403
+ "page_idx": 1
404
+ },
405
+ {
406
+ "type": "text",
407
+ "text": "We explore two discriminative loss functions: MWER and MWED.",
408
+ "bbox": [
409
+ 509,
410
+ 277,
411
+ 911,
412
+ 289
413
+ ],
414
+ "page_idx": 1
415
+ },
416
+ {
417
+ "type": "text",
418
+ "text": "MWER (Minimum word error rate): A standard discriminative loss function for ASR rescoring is MWER [7]. The training minimizes the expected number of word errors for the n-best hypotheses:",
419
+ "bbox": [
420
+ 508,
421
+ 290,
422
+ 913,
423
+ 342
424
+ ],
425
+ "page_idx": 1
426
+ },
427
+ {
428
+ "type": "equation",
429
+ "text": "\n$$\nP _ {i} = \\frac {e ^ {- s _ {i}}}{\\sum_ {j = 1} ^ {n} e ^ {- s _ {j}}} \\tag {6}\n$$\n",
430
+ "text_format": "latex",
431
+ "bbox": [
432
+ 650,
433
+ 351,
434
+ 913,
435
+ 382
436
+ ],
437
+ "page_idx": 1
438
+ },
439
+ {
440
+ "type": "equation",
441
+ "text": "\n$$\n\\bar {\\epsilon} _ {H} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\epsilon_ {i} \\tag {7}\n$$\n",
442
+ "text_format": "latex",
443
+ "bbox": [
444
+ 645,
445
+ 387,
446
+ 913,
447
+ 420
448
+ ],
449
+ "page_idx": 1
450
+ },
451
+ {
452
+ "type": "equation",
453
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {M W E R}} = \\sum_ {i = 1} ^ {n} P _ {i} \\cdot \\left(\\epsilon_ {i} - \\bar {\\epsilon} _ {H}\\right). \\tag {8}\n$$\n",
454
+ "text_format": "latex",
455
+ "bbox": [
456
+ 622,
457
+ 424,
458
+ 913,
459
+ 458
460
+ ],
461
+ "page_idx": 1
462
+ },
463
+ {
464
+ "type": "text",
465
+ "text": "$P_{i}$ is the posterior probability of each hypothesis, normalized over the hypotheses list from the first pass, such that higher probabilities indicate preferred hypotheses. $s_i$ is the final score of the hypothesis as in Eq. (5). The MWER loss $\\mathcal{L}_{\\mathrm{MWER}}$ represents the expected number of relative word errors, with $\\bar{\\epsilon}_H$ being the averaged word errors across the n-best list, which does not change the optima but helps to reduce the variance.",
466
+ "bbox": [
467
+ 508,
468
+ 465,
469
+ 913,
470
+ 556
471
+ ],
472
+ "page_idx": 1
473
+ },
474
+ {
475
+ "type": "text",
476
+ "text": "MWED (Matching word error distribution): MWED is a new loss function proposed here. Its goal is to mimic the distribution of n-best word errors through the predicted scores. As a result, the ranking of final scores should ideally be exactly the same as ranking by the word errors, which could potentially lead to better score interpolation at evaluation. By contrast, the model trained with the existing MWER loss as in Eq (8) picks the best hypothesis discriminatively, such that the full probability mass should be assigned to the one with minimum word errors in the ideal case.",
477
+ "bbox": [
478
+ 508,
479
+ 556,
480
+ 915,
481
+ 674
482
+ ],
483
+ "page_idx": 1
484
+ },
485
+ {
486
+ "type": "text",
487
+ "text": "The MWED loss is proposed as the following:",
488
+ "bbox": [
489
+ 534,
490
+ 675,
491
+ 812,
492
+ 689
493
+ ],
494
+ "page_idx": 1
495
+ },
496
+ {
497
+ "type": "equation",
498
+ "text": "\n$$\nd _ {i} ^ {\\epsilon} = \\frac {e ^ {\\epsilon_ {i}}}{\\sum_ {j = 1} ^ {n} e ^ {\\epsilon_ {j}}} \\tag {9}\n$$\n",
499
+ "text_format": "latex",
500
+ "bbox": [
501
+ 661,
502
+ 695,
503
+ 913,
504
+ 727
505
+ ],
506
+ "page_idx": 1
507
+ },
508
+ {
509
+ "type": "equation",
510
+ "text": "\n$$\nd _ {i} ^ {s} = \\frac {e ^ {s _ {i} / T}}{\\sum_ {j = 1} ^ {n} e ^ {s _ {j} / T}} \\tag {10}\n$$\n",
511
+ "text_format": "latex",
512
+ "bbox": [
513
+ 661,
514
+ 729,
515
+ 913,
516
+ 762
517
+ ],
518
+ "page_idx": 1
519
+ },
520
+ {
521
+ "type": "equation",
522
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {M W E D}} = - \\sum_ {i = 1} ^ {n} d _ {i} ^ {e} \\log d _ {i} ^ {s} \\tag {11}\n$$\n",
523
+ "text_format": "latex",
524
+ "bbox": [
525
+ 633,
526
+ 765,
527
+ 913,
528
+ 800
529
+ ],
530
+ "page_idx": 1
531
+ },
532
+ {
533
+ "type": "text",
534
+ "text": "$d_{i}^{\\epsilon}$ and $d_{i}^{s}$ represent the relative distribution of word errors and predicted scores over the n-best list. $\\mathcal{L}_{\\mathrm{MWED}}$ is the cross-entropy from scores to word errors, equivalent to optimizing the Kullback-Leibler divergence between the two distributions. Due to that $s_i$ contains $s_i^a$ which is fixed, to stabilize the match of the two distributions, we add a hyperparameter $T$ to rescale the distribution mass of $s_i$ . In practice, we found that $T = \\sum_{i=1}^{n} s_i / \\sum_{i=1}^{n} \\epsilon_i$ can yield good performance.",
535
+ "bbox": [
536
+ 508,
537
+ 806,
538
+ 915,
539
+ 912
540
+ ],
541
+ "page_idx": 1
542
+ },
543
+ {
544
+ "type": "text",
545
+ "text": "2.2.2. Training with discriminative loss only",
546
+ "text_level": 1,
547
+ "bbox": [
548
+ 84,
549
+ 92,
550
+ 354,
551
+ 106
552
+ ],
553
+ "page_idx": 2
554
+ },
555
+ {
556
+ "type": "text",
557
+ "text": "Training BERT naively with discriminative loss using word-level scores, as done in [4, 6], requires computation of Eq. (1) for every hypothesis and is prohibitively expensive during both training and inference. Instead, it can be fine-tuned such that the sentence-level score from the CLS representation (as in Eq. (3)) minimizes the discriminative loss $\\mathcal{L}_{\\mathrm{MWER}}$ or $\\mathcal{L}_{\\mathrm{MWED}}$ defined earlier.",
558
+ "bbox": [
559
+ 81,
560
+ 112,
561
+ 486,
562
+ 191
563
+ ],
564
+ "page_idx": 2
565
+ },
566
+ {
567
+ "type": "text",
568
+ "text": "In Section 4, we show results using this approach with $\\mathcal{L}_{\\mathrm{MWER}}$ , labeled \"MWER Only\", where we perform MWER training on a pretrained BERT with domain adaptation.",
569
+ "bbox": [
570
+ 83,
571
+ 191,
572
+ 486,
573
+ 232
574
+ ],
575
+ "page_idx": 2
576
+ },
577
+ {
578
+ "type": "text",
579
+ "text": "2.2.3. Training with combined MLM and discriminative loss",
580
+ "text_level": 1,
581
+ "bbox": [
582
+ 83,
583
+ 247,
584
+ 447,
585
+ 261
586
+ ],
587
+ "page_idx": 2
588
+ },
589
+ {
590
+ "type": "text",
591
+ "text": "We propose a fusion strategy to incorporate MLM distillation into discriminative training. It is accomplished by making two modifications to the approach in Section 2.2.2, where only discriminative loss is applied.",
592
+ "bbox": [
593
+ 81,
594
+ 268,
595
+ 488,
596
+ 321
597
+ ],
598
+ "page_idx": 2
599
+ },
600
+ {
601
+ "type": "text",
602
+ "text": "First, we apply a pretraining step using MD alone on a large-scale text-only corpus, so that the discriminative training can be warm-started from a better initialization point. Unlike MWER training, MD only needs text-only data and their PLL scores computed by a teacher model. Therefore, the distillation itself can be trained on much more data than the n-best hypotheses used in MWER training.",
603
+ "bbox": [
604
+ 81,
605
+ 321,
606
+ 486,
607
+ 400
608
+ ],
609
+ "page_idx": 2
610
+ },
611
+ {
612
+ "type": "text",
613
+ "text": "Second, we introduce the new loss $\\mathcal{L}$ to replace $\\mathcal{L}_{\\mathrm{MWER}}$ or $\\mathcal{L}_{\\mathrm{MWED}}$ in the discriminative training step:",
614
+ "bbox": [
615
+ 83,
616
+ 400,
617
+ 488,
618
+ 426
619
+ ],
620
+ "page_idx": 2
621
+ },
622
+ {
623
+ "type": "equation",
624
+ "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\text {D i s c r i m i n a t i v e}} + \\lambda \\cdot \\sum_ {i = 1} ^ {n} \\mathcal {L} _ {\\mathrm {M D}} \\left(E _ {i}\\right), \\tag {12}\n$$\n",
625
+ "text_format": "latex",
626
+ "bbox": [
627
+ 169,
628
+ 436,
629
+ 486,
630
+ 470
631
+ ],
632
+ "page_idx": 2
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "where $\\mathcal{L}_{\\text{Discriminative}}$ is the discriminative loss that can be either $\\mathcal{L}_{\\text{MWER}}$ or $\\mathcal{L}_{\\text{MWED}}$ . $\\mathcal{L}_{\\text{MD}}$ is similar to cross-entropy regularization added to MWER loss in [6], and controlled by the hyperparameter $\\lambda$ . It is found that MD pretraining is more important than adding additional MD loss. On top of MD pretraining, having an additional step of adding MD loss yields less than $0.5\\%$ relative improvement from all experiments.",
637
+ "bbox": [
638
+ 81,
639
+ 479,
640
+ 488,
641
+ 571
642
+ ],
643
+ "page_idx": 2
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "In Section 4, we show the results using this approach with $\\mathcal{L}_{\\mathrm{MWER}}$ and $\\mathcal{L}_{\\mathrm{MWED}}$ , which are named MD-MWER and MD-MWED, respectively.",
648
+ "bbox": [
649
+ 83,
650
+ 571,
651
+ 488,
652
+ 613
653
+ ],
654
+ "page_idx": 2
655
+ },
656
+ {
657
+ "type": "text",
658
+ "text": "3. EXPERIMENTS",
659
+ "text_level": 1,
660
+ "bbox": [
661
+ 218,
662
+ 628,
663
+ 352,
664
+ 642
665
+ ],
666
+ "page_idx": 2
667
+ },
668
+ {
669
+ "type": "text",
670
+ "text": "3.1. Datasets",
671
+ "text_level": 1,
672
+ "bbox": [
673
+ 83,
674
+ 655,
675
+ 173,
676
+ 667
677
+ ],
678
+ "page_idx": 2
679
+ },
680
+ {
681
+ "type": "text",
682
+ "text": "We evaluated our approach on four datasets in multiple domains and locales to test its general applicability, including one public dataset LibriSpeech [16] and three internal dataset based on a conversational agent (one for Information (Info) domain in English (en), and two for Info and Navigation (Nav) domains in Japanese (ja)).",
683
+ "bbox": [
684
+ 81,
685
+ 675,
686
+ 488,
687
+ 742
688
+ ],
689
+ "page_idx": 2
690
+ },
691
+ {
692
+ "type": "text",
693
+ "text": "For LibriSpeech, an LAS model [17] is adopted as the first-pass decoder, and we use the same decoded 100-best hypotheses of the dev and test set along with their first-pass scores used by [9, 10]. Since there is no dedicated training set provided for MWER, we combine the decoded hypotheses from both dev-clean and dev-other as the MWER training set, and randomly hold out $10\\%$ utterances as the MWER dev set. The resulting training/dev set has 5011/556 utterances, with up to 100 hypotheses per utterance. For MLM distillation, we sample 4 million utterances from the in-domain text corpus provided by LibriSpeech as the training set, similar to [10].",
694
+ "bbox": [
695
+ 81,
696
+ 742,
697
+ 488,
698
+ 873
699
+ ],
700
+ "page_idx": 2
701
+ },
702
+ {
703
+ "type": "text",
704
+ "text": "All internal datasets consist of de-identified live user interactions with a conversational agent, decoded by a RNN-T model [18] for en or a hybrid HMM model for ja. For Info (en), $\\sim 100 / 5$ hours of",
705
+ "bbox": [
706
+ 83,
707
+ 873,
708
+ 488,
709
+ 912
710
+ ],
711
+ "page_idx": 2
712
+ },
713
+ {
714
+ "type": "text",
715
+ "text": "utterances are used as the MWER training/dev set; the test set has $\\sim 2$ hours of long-tail info utterances. For Info and Nav in ja, we use a single MWER training/dev set that consists of $\\sim 220 / 10$ hours of utterances in multiple domains including Info and Nav domains. The test set has $\\sim 1 / 3$ hours of utterances for Info/Nav respectively. For MD, 4 million in-domain utterances sampled from user interactions are used as training set.",
716
+ "bbox": [
717
+ 508,
718
+ 92,
719
+ 915,
720
+ 185
721
+ ],
722
+ "page_idx": 2
723
+ },
724
+ {
725
+ "type": "text",
726
+ "text": "3.2. Implementation",
727
+ "text_level": 1,
728
+ "bbox": [
729
+ 509,
730
+ 203,
731
+ 645,
732
+ 215
733
+ ],
734
+ "page_idx": 2
735
+ },
736
+ {
737
+ "type": "text",
738
+ "text": "For LibriSpeech, we use the uncased $\\mathrm{BERT}_{\\mathrm{Base}}$ for our experiments to enable direct comparison with previous work. For internal datasets, we use an in-house multilingual BERT of $\\sim 170\\mathrm{M}$ parameters (excluding embedding size) with 16 layers and 1024 hidden size, supporting both en and ja locales. In addition, our final MWER setting also includes two smaller BERT models of $\\sim 17\\mathrm{M} / 5\\mathrm{M}$ parameters distilled [19] from the 170M BERT model, both with only 4 layers and 768/320 hidden size respectively. All BERT models are implemented in PyTorch and pretrained on public data. We limit the maximum sequence length to 128 for LibriSpeech, and 32 for others; longer utterances will be truncated. The $\\lambda$ parameter in Eq. (12) is set to $10^{-4}$ . We found that $\\lambda$ between $10^{-4}$ and $10^{-3}$ generally yield same performance.",
739
+ "bbox": [
740
+ 508,
741
+ 224,
742
+ 915,
743
+ 396
744
+ ],
745
+ "page_idx": 2
746
+ },
747
+ {
748
+ "type": "text",
749
+ "text": "Before we conducted any training, we first performed domain adaptation for BERT, as BERT contains general world knowledge from pretraining but not necessarily task-specific knowledge. Therefore, we take the pretrained BERT and further train with the MLM objective on the in-domain corpus. For LibriSpeech, we train 400K steps on the provided text corpus, similar to [10]. For internal datasets, we train 200K steps on the in-domain transcriptions for each of the en and ja locales. Each step has an effective batch size of 256 utterances for both of these cases.",
750
+ "bbox": [
751
+ 508,
752
+ 396,
753
+ 915,
754
+ 513
755
+ ],
756
+ "page_idx": 2
757
+ },
758
+ {
759
+ "type": "text",
760
+ "text": "3.3. Baseline and Evaluation Protocol",
761
+ "text_level": 1,
762
+ "bbox": [
763
+ 509,
764
+ 532,
765
+ 754,
766
+ 546
767
+ ],
768
+ "page_idx": 2
769
+ },
770
+ {
771
+ "type": "text",
772
+ "text": "For LibriSpeech, we use the results of the MLM distillation (MD) as the baseline, which can be seen as our re-implementation of the \"sentence-level fine-tuning\" results from [10], which has the same low-latency scoring as our MWER setting. We also provide the results of high-latency PLL scores for comparison. WER is used as the evaluation metric, and the optimal interpolation weight $\\beta$ in Eq. (5) is linearly searched on the dev set.",
773
+ "bbox": [
774
+ 508,
775
+ 554,
776
+ 913,
777
+ 647
778
+ ],
779
+ "page_idx": 2
780
+ },
781
+ {
782
+ "type": "text",
783
+ "text": "For internal datasets, we use the LM scoring from a 2-layer LSTM trained with noise contrastive estimation (NCE) [20] as the baseline, which is often employed in industrial settings for streaming applications. The LSTM is trained on the same data used for domain adaptation for BERT. New scores from BERT replace the existing LSTM scores, and the optimal weight is searched on the dev set as well. We report the relative improvements in WER for en, and in CER (character error rate) for ja.",
784
+ "bbox": [
785
+ 508,
786
+ 648,
787
+ 913,
788
+ 753
789
+ ],
790
+ "page_idx": 2
791
+ },
792
+ {
793
+ "type": "text",
794
+ "text": "4. RESULTS AND ANALYSIS",
795
+ "text_level": 1,
796
+ "bbox": [
797
+ 609,
798
+ 772,
799
+ 813,
800
+ 785
801
+ ],
802
+ "page_idx": 2
803
+ },
804
+ {
805
+ "type": "text",
806
+ "text": "4.1. Comparing Different BERT Rescoring Approaches",
807
+ "text_level": 1,
808
+ "bbox": [
809
+ 509,
810
+ 799,
811
+ 857,
812
+ 811
813
+ ],
814
+ "page_idx": 2
815
+ },
816
+ {
817
+ "type": "text",
818
+ "text": "Tables 1(a) and (b) show the evaluation results on both LibriSpeech and internal datasets. Here, PLL denotes the approach in Section 2.1.1 which is computationally expensive; MD denotes the approach in Section 2.1.2 that distills the PLL score; MWER Only denotes the approach described in Section 2.2.2 that trains BERT with the MWER objective only; MD-MWER and MD-MWED denote the approaches described in Section 2.2.3 that incorporate MLM into",
819
+ "bbox": [
820
+ 508,
821
+ 820,
822
+ 913,
823
+ 912
824
+ ],
825
+ "page_idx": 2
826
+ },
827
+ {
828
+ "type": "text",
829
+ "text": "Table 1: Evaluation results on the test partitions of all datasets. Details of the baseline and evaluation protocol are described in Section 3.3. The case of \"MWED only\" is not included, and the relative difference between it and \"MWER only\" is within $1\\%$ for all tests.",
830
+ "bbox": [
831
+ 83,
832
+ 88,
833
+ 486,
834
+ 142
835
+ ],
836
+ "page_idx": 3
837
+ },
838
+ {
839
+ "type": "text",
840
+ "text": "(a) WER on the two test sets of LibriSpeech using $\\mathrm{BERT_{Base}}$ . Numbers inside parentheses are relative improvements compared to the baseline.",
841
+ "bbox": [
842
+ 83,
843
+ 148,
844
+ 486,
845
+ 172
846
+ ],
847
+ "page_idx": 3
848
+ },
849
+ {
850
+ "type": "table",
851
+ "img_path": "images/07aa3904dff1492ce34af29903787179b90bb8cd919f379940369b5d75834ac6.jpg",
852
+ "table_caption": [],
853
+ "table_footnote": [],
854
+ "table_body": "<table><tr><td></td><td>Test-Clean</td><td>Test-Other</td></tr><tr><td>First-Pass</td><td>7.26</td><td>20.37</td></tr><tr><td>PLL</td><td>4.54</td><td>16.08</td></tr><tr><td>Baseline (MD)</td><td>4.67</td><td>16.15</td></tr><tr><td>MWER Only</td><td>4.82 (-3.2%)</td><td>16.35 (-1.2%)</td></tr><tr><td>MD-MWER</td><td>4.42 (5.3%)</td><td>15.87 (1.7%)</td></tr><tr><td>MD-MWED</td><td>4.36 (6.6%)</td><td>15.60 (3.4%)</td></tr></table>",
855
+ "bbox": [
856
+ 143,
857
+ 178,
858
+ 433,
859
+ 286
860
+ ],
861
+ "page_idx": 3
862
+ },
863
+ {
864
+ "type": "text",
865
+ "text": "(b) Relative improvements of WER (for en) and CER (for ja) on three internal datasets, using the in-house 170M BERT model.",
866
+ "bbox": [
867
+ 83,
868
+ 289,
869
+ 486,
870
+ 314
871
+ ],
872
+ "page_idx": 3
873
+ },
874
+ {
875
+ "type": "table",
876
+ "img_path": "images/5346e820384b8c4b8f8d8026b889a52df9f5d15198854f9f484aeb5fefec3ed8.jpg",
877
+ "table_caption": [],
878
+ "table_footnote": [],
879
+ "table_body": "<table><tr><td></td><td>Info (en)</td><td>Info (ja)</td><td>Nav (ja)</td></tr><tr><td>LSTM</td><td>Baseline</td><td>Baseline</td><td>Baseline</td></tr><tr><td>MD</td><td>2.6%</td><td>3.7%</td><td>5.6%</td></tr><tr><td>MWER Only</td><td>5.3%</td><td>11.8%</td><td>11.2%</td></tr><tr><td>MD-MWER</td><td>4.0%</td><td>12.0%</td><td>12.7%</td></tr><tr><td>MD-MWED</td><td>6.6%</td><td>10.4%</td><td>12.2%</td></tr></table>",
880
+ "bbox": [
881
+ 143,
882
+ 319,
883
+ 433,
884
+ 407
885
+ ],
886
+ "page_idx": 3
887
+ },
888
+ {
889
+ "type": "text",
890
+ "text": "discriminative training with MWER and MWED loss functions (as in Section 2.2.1), respectively.",
891
+ "bbox": [
892
+ 81,
893
+ 414,
894
+ 486,
895
+ 440
896
+ ],
897
+ "page_idx": 3
898
+ },
899
+ {
900
+ "type": "text",
901
+ "text": "Based on these results, we can make three observations. First, discriminative training significantly improves upon nondiscriminative BERT rescoring (MD) across all test sets: $6.6\\% / 3.4\\%$ WER relative improvement on LibriSpeech and $4\\% / 8.3\\% / 7.1\\%$ relative WER reduction on internal datasets. What is particularly striking is that on LibriSpeech, because of discriminative training, both MD-MWER and MD-MWED now outperform the much more computationally expensive PLL approach. Second, the fusion approach of incorporating MLM in discriminative training improves on all test sets. The effect is particularly strong on the LibriSpeech test sets, where MWER Only would actually perform worse than MD with $3.2\\% / 1.2\\%$ relative WER degradations, but where the fusion approach now gives $5.3\\% / 1.7\\%$ relative improvement over MD. Third, to compare the new loss function MWED with the existing MWER, MD-MWER achieves better performance on both LibriSpeech $(1.3\\%)$ and $1.7\\%$ relative) and Info (en) $(2.6\\%)$ relative) over MD-MWER, but worse than MD-MWER on both ja test sets. This result shows that MWED can be a strong alternative loss in the MWER training, and the final performance can be dataset-specific. One potential explanation for MWED being less effective for ja is that the CER distribution is spikier than the WER distribution, resulting in less stable gradients from the relative entropy in Eq. (11).",
902
+ "bbox": [
903
+ 81,
904
+ 441,
905
+ 486,
906
+ 731
907
+ ],
908
+ "page_idx": 3
909
+ },
910
+ {
911
+ "type": "text",
912
+ "text": "4.2. Low Latency for Streaming Applications",
913
+ "text_level": 1,
914
+ "bbox": [
915
+ 83,
916
+ 746,
917
+ 375,
918
+ 761
919
+ ],
920
+ "page_idx": 3
921
+ },
922
+ {
923
+ "type": "text",
924
+ "text": "A main challenge when applying second-pass rescoring for streaming applications is to keep user-perceived latency low while obtain-",
925
+ "bbox": [
926
+ 83,
927
+ 768,
928
+ 486,
929
+ 795
930
+ ],
931
+ "page_idx": 3
932
+ },
933
+ {
934
+ "type": "image",
935
+ "img_path": "images/42560f3c33ab629bea68d19699b694aeb7ddebb030409ee06071661f8cbf8ad6.jpg",
936
+ "image_caption": [
937
+ "Fig. 2: Diagram of training the distilled 5M-parameter BERT model with the fusion strategy described in Section 2.2.3); the 170M BERT model is the teacher for calculating PLL scores."
938
+ ],
939
+ "image_footnote": [],
940
+ "bbox": [
941
+ 117,
942
+ 801,
943
+ 454,
944
+ 878
945
+ ],
946
+ "page_idx": 3
947
+ },
948
+ {
949
+ "type": "text",
950
+ "text": "Table 2: Performance and latency study of our approach using BERT of different parameter sizes on three internal datasets.",
951
+ "bbox": [
952
+ 508,
953
+ 88,
954
+ 913,
955
+ 114
956
+ ],
957
+ "page_idx": 3
958
+ },
959
+ {
960
+ "type": "text",
961
+ "text": "(a) Relative improvements over LSTM (4M) on three internal datasets, using the best setting (MD-MWER/MWED) according to Table 1b for each dataset.",
962
+ "bbox": [
963
+ 508,
964
+ 122,
965
+ 913,
966
+ 146
967
+ ],
968
+ "page_idx": 3
969
+ },
970
+ {
971
+ "type": "table",
972
+ "img_path": "images/79c8cfe2185bfce97c0c8992bdafd329a14caa6ee9f0bd808ab775af937bba7f.jpg",
973
+ "table_caption": [],
974
+ "table_footnote": [],
975
+ "table_body": "<table><tr><td>BERT</td><td>Info (en)</td><td>Info (ja)</td><td>Nav (ja)</td></tr><tr><td>170M</td><td>6.6%</td><td>12.0%</td><td>12.7%</td></tr><tr><td>17M</td><td>3.5%</td><td>9.1%</td><td>9.4%</td></tr><tr><td>5M</td><td>3.1%</td><td>7.8%</td><td>7.8%</td></tr></table>",
976
+ "bbox": [
977
+ 594,
978
+ 152,
979
+ 834,
980
+ 214
981
+ ],
982
+ "page_idx": 3
983
+ },
984
+ {
985
+ "type": "text",
986
+ "text": "(b) Averaged latency (in ms) of each batch using 2 threads on a CPU, with a batch size of 5 hypotheses. SL: input sequence/hypothesis length. Parentheses indicate relative latency compared to LSTM.",
987
+ "bbox": [
988
+ 508,
989
+ 217,
990
+ 913,
991
+ 253
992
+ ],
993
+ "page_idx": 3
994
+ },
995
+ {
996
+ "type": "table",
997
+ "img_path": "images/3642250898c35904bf8c4453d3733497102a6172ce13f0b505ebf65733cec475.jpg",
998
+ "table_caption": [],
999
+ "table_footnote": [],
1000
+ "table_body": "<table><tr><td>SL</td><td>LSTM (4M)</td><td>BERT (5M)</td><td>BERT (17M)</td><td>BERT (170M)</td></tr><tr><td>16</td><td>9.7</td><td>7.6 (78%)</td><td>17.5 (180%)</td><td>180 (1.8k%)</td></tr><tr><td>32</td><td>18.7</td><td>11.0 (59%)</td><td>26.3 (141%)</td><td>270 (1.4k%)</td></tr></table>",
1001
+ "bbox": [
1002
+ 521,
1003
+ 258,
1004
+ 906,
1005
+ 309
1006
+ ],
1007
+ "page_idx": 3
1008
+ },
1009
+ {
1010
+ "type": "text",
1011
+ "text": "ing the accuracy gains. We next examine how to further distill the model to reduce its compute footprint and hence, achieve even lower latency. We focus on the internal datasets for this task. As described in Section 3.2, in addition to the 170M BERT model, we have 17M and 5M BERT models distilled [19] from it. To achieve low latency, we can perform MD-MWER or MD-MWED starting from this smaller model, except that to maintain WER gains, we use the 170M BERT model to compute PLL scores in MD training, as well as in the final combined loss. This training process is illustrated in Figure 2.",
1012
+ "bbox": [
1013
+ 506,
1014
+ 318,
1015
+ 913,
1016
+ 450
1017
+ ],
1018
+ "page_idx": 3
1019
+ },
1020
+ {
1021
+ "type": "text",
1022
+ "text": "Table 2a shows the relative improvements using BERT models with 170M, 17M, and 5M parameters described in Section 3.2 with the best settings for each dataset. Smaller BERT is expected to yield less improvement; nevertheless, the degradation is within a relatively small margin even for the much smaller 5M version that has only $\\sim 3\\%$ the parameters of the 170M model, with still nearly $8\\%$ improvement on two ja datasets and $3 + \\%$ on Info (en).",
1023
+ "bbox": [
1024
+ 506,
1025
+ 450,
1026
+ 913,
1027
+ 542
1028
+ ],
1029
+ "page_idx": 3
1030
+ },
1031
+ {
1032
+ "type": "text",
1033
+ "text": "Table 2b shows the latency comparison among three BERT models and the NCE-based LSTM of 4M parameters, using the PyTorch benchmarking tool under direct model inference in Python using 2 threads on a CPU. 5M BERT is shown to be faster than LSTM, while 17M BERT is slower but also appears comparable. Overall, Table 2 shows that our proposed approach can be a superior substitute for LSTM scoring in deployed systems. In particular, from Table 2a and 2b, 5M BERT significantly outperforms a similar-sized 4M LSTM, both in WER (by $3.1\\% /7.8\\% /7.8\\%$ ) and in latency (by $22\\% /41\\%$ ).",
1034
+ "bbox": [
1035
+ 506,
1036
+ 544,
1037
+ 913,
1038
+ 664
1039
+ ],
1040
+ "page_idx": 3
1041
+ },
1042
+ {
1043
+ "type": "text",
1044
+ "text": "5. CONCLUSION",
1045
+ "text_level": 1,
1046
+ "bbox": [
1047
+ 648,
1048
+ 686,
1049
+ 774,
1050
+ 700
1051
+ ],
1052
+ "page_idx": 3
1053
+ },
1054
+ {
1055
+ "type": "text",
1056
+ "text": "We have proposed a method to train a BERT rescoring model with discriminative objective functions. We show that discriminative training can significantly improve BERT rescoring on a variety of datasets: $6.6\\% / 3.4\\%$ relative WER improvement on LibriSpeech and $4\\% / 8.3\\% / 7.1\\%$ relative WER improvement on internal voice assistant datasets. The proposed fusion strategy to incorporate MLM into discriminative training is found to further reduce WER. We also propose a new discriminative loss MWED that is a strong alternative to the standard MWER loss, yielding $1.3\\%$ and $1.7\\%$ relative WER improvement over MWER loss on LibriSpeech and $2.6\\%$ relative improvement on one internal dataset. Lastly, we show how to further distill the model to achieve even lower latency for streaming applications while preserving WER improvements. We achieve $3 - 8\\%$ relative WER improvement and lower latency compared to a baseline LSTM model on internal data.",
1057
+ "bbox": [
1058
+ 506,
1059
+ 714,
1060
+ 913,
1061
+ 912
1062
+ ],
1063
+ "page_idx": 3
1064
+ },
1065
+ {
1066
+ "type": "text",
1067
+ "text": "6. REFERENCES",
1068
+ "text_level": 1,
1069
+ "bbox": [
1070
+ 223,
1071
+ 90,
1072
+ 346,
1073
+ 104
1074
+ ],
1075
+ "page_idx": 4
1076
+ },
1077
+ {
1078
+ "type": "list",
1079
+ "sub_type": "ref_text",
1080
+ "list_items": [
1081
+ "[1] Yingce Xia, Fei Tian, Lijun Wu, Jianxin Lin, Tao Qin, Nenghai Yu, and Tie-Yan Liu, \"Deliberation networks: Sequence generation beyond one-pass decoding,\" in Advances in Neural Information Processing Systems, I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, Eds. 2017, vol. 30, Curran Associates, Inc.",
1082
+ "[2] Tara N. Sainath, Ruoming Pang, David Rybach, Yanzhang He, Rohit Prabhavalkar, Wei Li, Mirkó Visontai, Qiao Liang, Trevor Strohman, Yonghui Wu, Ian McGraw, and Chung-Cheng Chiu, “Two-Pass End-to-End Speech Recognition,” in Proc. Interspeech, 2019, pp. 2773–2777.",
1083
+ "[3] Ke Hu, Tara N Sainath, Ruoming Pang, and Rohit Prabhavalkar, “Deliberation model based two-pass end-to-end speech recognition,” in Proc. IEEE ICASSP, 2020, pp. 7799–7803.",
1084
+ "[4] Ankur Gandhi and Ariya Rastrow, \"Audio-attention discriminative language model for asr rescoring,\" in Proc. IEEE ICASSP, 2020, pp. 7944-7948.",
1085
+ "[5] Ke Hu, Ruoming Pang, Tara N Sainath, and Trevor Strohman, \"Transformer based deliberation for two-pass speech recognition,\" in Proc. IEEE Spoken Language Technology Workshop, 2021, pp. 68-74.",
1086
+ "[6] Takaaki Hori, Chiori Hori, Shinji Watanabe, and J. Hershey, \"Minimum word error training of long short-term memory recurrent neural network language models for speech recognition,\" in Proc. IEEE ICASSP, 2016, pp. 5990-5994.",
1087
+ "[7] Rohit Prabhavalkar, Tara N Sainath, Yonghui Wu, Patrick Nguyen, Zhifeng Chen, Chung-Cheng Chiu, and Anjuli Kannan, “Minimum word error rate training for attention-based sequence-to-sequence models,” in Proc. IEEE ICASSP, 2018, pp. 4839–4843.",
1088
+ "[8] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova, “BERT: Pre-training of deep bidirectional transformers for language understanding,” in Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics, Minneapolis, Minnesota, June 2019, pp. 4171–4186.",
1089
+ "[9] Joonbo Shin, Yoonhyung Lee, and Kyomin Jung, \"Effective sentence scoring method using BERT for speech recognition,\" in Asian Conference on Machine Learning. PMLR, 2019, pp. 1081-1093.",
1090
+ "[10] Julian Salazar, Davis Liang, Toan Q. Nguyen, and Katrin Kirchhoff, “Masked language model scoring,” in Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, Online, July 2020, pp. 2699–2712, Association for Computational Linguistics.",
1091
+ "[11] Shih-Hsuan Chiu and Berlin Chen, “Innovative BERT-based reranking language models for speech recognition,” in Proc. IEEE Spoken Language Technology Workshop, 2021, pp. 266–271.",
1092
+ "[12] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever, \"Language models are unsupervised multitask learners,\" OpenAI Blog 1(8), 2019.",
1093
+ "[13] Caglar Gulcehre, Orhan Firat, Kelvin Xu, Kyunghyun Cho, Loic Barrault, Huei-Chi Lin, Fethi Bougares, Holger Schwenk, and Yoshua Bengio, \"On using monolingual corpora in neural machine translation,\" arXiv preprint arXiv:1503.03535, 2015."
1094
+ ],
1095
+ "bbox": [
1096
+ 86,
1097
+ 116,
1098
+ 488,
1099
+ 912
1100
+ ],
1101
+ "page_idx": 4
1102
+ },
1103
+ {
1104
+ "type": "list",
1105
+ "sub_type": "ref_text",
1106
+ "list_items": [
1107
+ "[14] Felix Stahlberg, James Cross, and Veselin Stoyanov, \"Simple fusion: Return of the language model,\" in Proceedings of the Third Conference on Machine Translation: Research Papers, Brussels, Belgium, Oct. 2018, pp. 204-211, Association for Computational Linguistics.",
1108
+ "[15] Xie Chen, Anton Ragni, Xunying Liu, and Mark JF Gales, \"Investigating bidirectional recurrent neural network language models for speech recognition,\" in Proc. Interspeech. International Speech Communication Association (ISCA), 2017, pp. 269-273.",
1109
+ "[16] Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur, \"LibriSpeech: an ASR corpus based on public domain audio books,\" in Proc. IEEE ICASSP, 2015, pp. 5206-5210.",
1110
+ "[17] William Chan, Navdeep Jaitly, Quoc Le, and Oriol Vinyals, \"Listen, Attend and Spell: A neural network for large vocabulary conversational speech recognition,\" in Proc. IEEE ICASSP, 2016, pp. 4960-4964.",
1111
+ "[18] Yanzhang He, Tara N Sainath, Rohit Prabhavalkar, Ian McGraw, Raziel Alvarez, Ding Zhao, David Rybach, Anjuli Kannan, Yonghui Wu, Ruoming Pang, et al., \"Streaming end-to-end speech recognition for mobile devices,\" in Proc. IEEE ICASSP, 2019, pp. 6381-6385.",
1112
+ "[19] Geoffrey Hinton, Oriol Vinyals, and Jeffrey Dean, “Distilling the knowledge in a neural network,” in NIPS Deep Learning and Representation Learning Workshop, 2015.",
1113
+ "[20] Anirudh Raju, Denis Filimonov, Gautam Tiwari, Guitang Lan, and Ariya Rastrow, \"Scalable multi corpora neural language models for ASR,\" in Proc. Interspeech, Gernot Kubin and Zdravko Kacic, Eds. 2019, pp. 3910-3914, ISCA."
1114
+ ],
1115
+ "bbox": [
1116
+ 511,
1117
+ 92,
1118
+ 913,
1119
+ 518
1120
+ ],
1121
+ "page_idx": 4
1122
+ }
1123
+ ]
2202.01xxx/2202.01094/df2346ee-d8b7-4b8c-9abf-5431f77d269b_model.json ADDED
@@ -0,0 +1,1299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.11,
7
+ 0.115,
8
+ 0.892,
9
+ 0.134
10
+ ],
11
+ "angle": 0,
12
+ "content": "RESCOREBERT: DISCRIMINATIVE SPEECH RECOGNITION RESCORSING WITH BERT"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.141,
18
+ 0.151,
19
+ 0.856,
20
+ 0.189
21
+ ],
22
+ "angle": 0,
23
+ "content": "Liyan Xu\\(^{1,2}\\) Yile Gu\\(^{1}\\) Jari Kolehmainen\\(^{1}\\) Haidar Khan\\(^{1}\\) Ankur Gandhi\\(^{1}\\) Ariya Rastrow\\(^{1}\\) Andreas Stolcke\\(^{1}\\) Ivan Bulyko\\(^{1}\\)"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.288,
29
+ 0.203,
30
+ 0.713,
31
+ 0.222
32
+ ],
33
+ "angle": 0,
34
+ "content": "\\(^{1}\\)Amazon Alexa AI, USA \\(^{2}\\)Emory University, USA"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.245,
40
+ 0.254,
41
+ 0.33,
42
+ 0.267
43
+ ],
44
+ "angle": 0,
45
+ "content": "ABSTRACT"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.087,
51
+ 0.278,
52
+ 0.49,
53
+ 0.555
54
+ ],
55
+ "angle": 0,
56
+ "content": "Second-pass rescoring is an important component in automatic speech recognition (ASR) systems that is used to improve the outputs from a first-pass decoder by implementing a lattice rescoring or \\( n \\)-best re-ranking. While pretraining with a masked language model (MLM) objective has received great success in various natural language understanding (NLU) tasks, it has not gained traction as a rescoring model for ASR. Specifically, training a bidirectional model like BERT on a discriminative objective such as minimumWER (MWER) has not been explored. Here we show how to train a BERT-based rescoring model with MWER loss, to incorporate the improvements of a discriminative loss into fine-tuning of deep bidirectional pretrained models for ASR. Specifically, we propose a fusion strategy that incorporates the MLM into the discriminative training process to effectively distill knowledge from a pretrained model. We further propose an alternative discriminative loss. This approach, which we call RescoreBERT, reduces WER by \\( 6.6\\% /3.4\\% \\) relative on the LibriSpeech clean/other test sets over a BERT baseline without discriminative objective. We also evaluate our method on an internal dataset from a conversational agent and find that it reduces both latency and WER (by 3 to \\( 8\\% \\) relative) over an LSTM rescoring model."
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.084,
62
+ 0.56,
63
+ 0.489,
64
+ 0.588
65
+ ],
66
+ "angle": 0,
67
+ "content": "Index Terms— masked language model, BERT, second-pass rescoring, pretrained model, minimum WER training"
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.216,
73
+ 0.61,
74
+ 0.358,
75
+ 0.622
76
+ ],
77
+ "angle": 0,
78
+ "content": "1. INTRODUCTION"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.083,
84
+ 0.636,
85
+ 0.489,
86
+ 0.728
87
+ ],
88
+ "angle": 0,
89
+ "content": "The two-pass paradigm has been widely adopted in state-of-the-art ASR systems [1, 2, 3, 4, 5], where the first pass generates n-best hypotheses, and the second pass reranks them. For the second-pass rescoring models, discriminative training with MWER (minimum WER) objective is typically applied [3, 6, 7, 4] to improve performance, such that the model learns to prefer hypotheses with the lowest WER."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.083,
95
+ 0.73,
96
+ 0.489,
97
+ 0.888
98
+ ],
99
+ "angle": 0,
100
+ "content": "Previous work with discriminative training uses causal language models (CLMs), such as LSTMs or Transformer LMs. While pretrained masked language models (MLMs) such as BERT [8] have been highly successful on various natural language understanding (NLU) tasks, they have not been widely applied in second-pass ASR rescoring. Meanwhile, recent studies have shown promising results using BERT in several rescoring studies [9, 10, 11], as BERT is pretrained with large corpora and encodes the full hypothesis context using a deep bidirectional model architecture. In particular, previous work [9] shows that deep bidirectional Transformers, such as BERT, can outperform their unidirectional counterparts (either forward text, backward text, or the two models combined). Another paper [10]"
101
+ },
102
+ {
103
+ "type": "image",
104
+ "bbox": [
105
+ 0.551,
106
+ 0.25,
107
+ 0.878,
108
+ 0.398
109
+ ],
110
+ "angle": 0,
111
+ "content": null
112
+ },
113
+ {
114
+ "type": "image_caption",
115
+ "bbox": [
116
+ 0.509,
117
+ 0.408,
118
+ 0.915,
119
+ 0.475
120
+ ],
121
+ "angle": 0,
122
+ "content": "Fig. 1: Illustration of discriminative scoring with BERT on n-best hypotheses. Each hypothesis is individually encoded by BERT and represented by CLS; it is then followed by a feed-forward NN to compute a sentence-level second-pass LM score. The scores are then interpolated with first pass scores for reranking."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.508,
128
+ 0.492,
129
+ 0.915,
130
+ 0.583
131
+ ],
132
+ "angle": 0,
133
+ "content": "shows that a pretrained BERT model that is then fine-tuned on LibriSpeech data can outperform BERT trained from scratch on LibriSpeech clean/other test sets by \\(4.4\\% / 3.2\\%\\) WER relative, demonstrating the effectiveness of pretraining in BERT. They also show that BERT can outperform GPT [12] with comparable model size and pretraining data, which the authors argue is due to the bidirectional nature of BERT."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.508,
139
+ 0.585,
140
+ 0.916,
141
+ 0.821
142
+ ],
143
+ "angle": 0,
144
+ "content": "In this work, we propose a method to train BERT-style rescoreing models with a discriminative objective, to leverage the aforementioned benefits from both approaches. Typically, pseudo log-likelihood (PLL) [9, 10]—the sum of the negative log-likelihoods of each individual token given the bidirectional context—is used to rescore n-best output to improve WER, a computationally expensive process, particularly for longer sentences. For discriminative training, this issue is exacerbated as the PLL computation needs to be repeated for each hypothesis individually. The previous work [10] solves this issue by distilling the PLL into a single score prediction at the start-of-sentence token (CLS). In this work, illustrated in Figure 1, we extend this approach and use the score from the CLS representation to perform discriminative training, as discussed in Section 2.2.2, with either MWER loss or a novel discriminative training loss dubbed matching word error distribution (MWED), described in Section 2.2.1. Finally, in Section 2.2.3 we propose a fusion strategy that incorporates the MLM into the discriminative training process, giving further improvements."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.508,
150
+ 0.821,
151
+ 0.915,
152
+ 0.914
153
+ ],
154
+ "angle": 0,
155
+ "content": "We name the aforementioned approach RescoreBERT, and evaluate it on four datasets covering multiple domains and locales. Results show that discriminative training significantly improves upon non-discriminative BERT rescoring, on all test sets. The new MWED training loss is found to be a strong alternative to MWER. The results also show that the fusion approach for incorporating MLM into discriminative training can further improve WER. Lastly,"
156
+ },
157
+ {
158
+ "type": "aside_text",
159
+ "bbox": [
160
+ 0.023,
161
+ 0.258,
162
+ 0.058,
163
+ 0.721
164
+ ],
165
+ "angle": 270,
166
+ "content": "arXiv:2202.01094v3 [eess.AS] 18 Feb 2022"
167
+ },
168
+ {
169
+ "type": "page_footnote",
170
+ "bbox": [
171
+ 0.109,
172
+ 0.9,
173
+ 0.424,
174
+ 0.913
175
+ ],
176
+ "angle": 0,
177
+ "content": "2Work done as an applied scientist intern at Amazon Alexa."
178
+ }
179
+ ],
180
+ [
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.084,
185
+ 0.093,
186
+ 0.49,
187
+ 0.133
188
+ ],
189
+ "angle": 0,
190
+ "content": "to achieve lower latency for streaming applications, we develop a method to further distill the model while maintaining WER improvements."
191
+ },
192
+ {
193
+ "type": "title",
194
+ "bbox": [
195
+ 0.234,
196
+ 0.151,
197
+ 0.341,
198
+ 0.164
199
+ ],
200
+ "angle": 0,
201
+ "content": "2. APPROACH"
202
+ },
203
+ {
204
+ "type": "title",
205
+ "bbox": [
206
+ 0.085,
207
+ 0.176,
208
+ 0.37,
209
+ 0.19
210
+ ],
211
+ "angle": 0,
212
+ "content": "2.1. BERT Without Discriminative Training"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.084,
218
+ 0.197,
219
+ 0.489,
220
+ 0.225
221
+ ],
222
+ "angle": 0,
223
+ "content": "In this section, we review previous work on BERT rescoring models that are not trained with discriminative objective."
224
+ },
225
+ {
226
+ "type": "title",
227
+ "bbox": [
228
+ 0.085,
229
+ 0.241,
230
+ 0.3,
231
+ 0.255
232
+ ],
233
+ "angle": 0,
234
+ "content": "2.1.1. Pseudo log-likelihood (PLL)"
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.084,
240
+ 0.262,
241
+ 0.489,
242
+ 0.316
243
+ ],
244
+ "angle": 0,
245
+ "content": "Let \\( E = (e_1, \\dots, e_{|E|}) \\) be a sequence of tokens. Similar to the log-likelihood of a sequence as is commonly used in CLM scoring [13, 14, 15], the pseudo log-likelihood (PLL) of a sequence using an MLM, first introduced by [9], is defined as:"
246
+ },
247
+ {
248
+ "type": "equation",
249
+ "bbox": [
250
+ 0.186,
251
+ 0.326,
252
+ 0.488,
253
+ 0.364
254
+ ],
255
+ "angle": 0,
256
+ "content": "\\[\n\\operatorname {P L L} (E) = - \\sum_ {t = 1} ^ {| E |} \\log P \\left(e _ {t} \\mid E _ {\\backslash t}\\right) \\tag {1}\n\\]"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.084,
262
+ 0.374,
263
+ 0.49,
264
+ 0.442
265
+ ],
266
+ "angle": 0,
267
+ "content": "where \\( E_{\\backslash t} = (\\dots, e_{t-1}, [\\text{MASK}], e_{t+1}, \\dots) \\) is the sequence whose corresponding position is replaced by the [MASK] token used in MLM pretraining. The PLL is thus the sum of the negative log-likelihoods of each token given the bidirectional context, with lower scores indicating more probable sequences."
268
+ },
269
+ {
270
+ "type": "title",
271
+ "bbox": [
272
+ 0.085,
273
+ 0.457,
274
+ 0.269,
275
+ 0.471
276
+ ],
277
+ "angle": 0,
278
+ "content": "2.1.2. MLM distillation (MD)"
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.083,
284
+ 0.478,
285
+ 0.49,
286
+ 0.69
287
+ ],
288
+ "angle": 0,
289
+ "content": "Although the PLL demonstrates good performance for second-pass rescoring [9], it is not computationally efficient: \\( |E| \\) additional sequences masking every position need to be generated and encoded by BERT, thus the computation due to PLL is on the order of \\( |E| \\) times that of an autoregressive Transformer model of similar size. Following [10], one can \"distill\" the PLL calculation into a single utterance-level score using the CLS representation, such that the model is able to approximate PLL, while eliminating the need for masking \\( |E| \\) times, as well as the large vocabulary softmax in \\( P(e_t|E_{\\backslash t}) \\), thereby reducing computation significantly. As shown in the equations below, each sentence \\( E_i \\) is individually encoded by BERT, represented by the hidden state of the CLS token in the last Transformers layer, denoted by \\( g_i \\). An additional layer is stacked on top of the CLS hidden states to produce the score \\( s_i^l \\) for \\( E_i \\). The distillation is achieved by training the model to mimic PLL scores using mean squared error (MSE) regression loss:"
290
+ },
291
+ {
292
+ "type": "equation",
293
+ "bbox": [
294
+ 0.228,
295
+ 0.697,
296
+ 0.488,
297
+ 0.715
298
+ ],
299
+ "angle": 0,
300
+ "content": "\\[\ng _ {i} = \\operatorname {B E R T} ^ {\\mathrm {C L S}} \\left(E _ {i}\\right) \\tag {2}\n\\]"
301
+ },
302
+ {
303
+ "type": "equation",
304
+ "bbox": [
305
+ 0.228,
306
+ 0.717,
307
+ 0.488,
308
+ 0.734
309
+ ],
310
+ "angle": 0,
311
+ "content": "\\[\ns _ {i} ^ {l} = \\operatorname {F F N N} (g _ {i}) \\tag {3}\n\\]"
312
+ },
313
+ {
314
+ "type": "equation",
315
+ "bbox": [
316
+ 0.212,
317
+ 0.737,
318
+ 0.488,
319
+ 0.754
320
+ ],
321
+ "angle": 0,
322
+ "content": "\\[\n\\mathcal {L} _ {\\mathrm {M D}} = \\left| s _ {i} ^ {l} - \\operatorname {P L L} \\left(E _ {i}\\right) \\right| ^ {2} \\tag {4}\n\\]"
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.084,
328
+ 0.765,
329
+ 0.49,
330
+ 0.819
331
+ ],
332
+ "angle": 0,
333
+ "content": "FFNN denotes the learnable feed-forward neural network, \\( s_i^l \\) is the predicted PLL approximation, and \\( \\mathrm{PLL}(E_i) \\) is precomputed offline using Eq. (1). Note that the PLL can be computed by a larger teacher model."
334
+ },
335
+ {
336
+ "type": "title",
337
+ "bbox": [
338
+ 0.085,
339
+ 0.836,
340
+ 0.35,
341
+ 0.85
342
+ ],
343
+ "angle": 0,
344
+ "content": "2.2. BERT With Discriminative Training"
345
+ },
346
+ {
347
+ "type": "text",
348
+ "bbox": [
349
+ 0.084,
350
+ 0.857,
351
+ 0.49,
352
+ 0.915
353
+ ],
354
+ "angle": 0,
355
+ "content": "We now propose methods for training BERT with discriminative objective functions. For any utterance, let \\(\\vec{E} = \\{E_1,\\dots,E_n\\}\\) be the n-best hypotheses obtained from beam search in the first-pass decoder. For any \\(E_{i}\\in \\vec{E}\\), let \\(s_i^a\\) be its given score from the first pass, and"
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.509,
361
+ 0.093,
362
+ 0.916,
363
+ 0.172
364
+ ],
365
+ "angle": 0,
366
+ "content": "\\(s_i^t\\) be the score from the second pass (same as Eq. (3)), with lower scores for more likely hypotheses for both; let \\(\\epsilon_{i}\\) be its number of word errors (edit distance) from the ground truth transcription. Following the common theme of second pass rescoring approaches, the final score \\(s_i\\) is the linear combination of the first-pass and second-pass scores:"
367
+ },
368
+ {
369
+ "type": "equation",
370
+ "bbox": [
371
+ 0.652,
372
+ 0.178,
373
+ 0.915,
374
+ 0.194
375
+ ],
376
+ "angle": 0,
377
+ "content": "\\[\ns _ {i} = s _ {i} ^ {a} + \\beta \\cdot s _ {i} ^ {l}, \\tag {5}\n\\]"
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.509,
383
+ 0.201,
384
+ 0.916,
385
+ 0.241
386
+ ],
387
+ "angle": 0,
388
+ "content": "where \\(\\beta\\) is the hyperparameter controlling the second-pass contribution. \\(s_i\\) is then used to compute discriminative loss, as defined next."
389
+ },
390
+ {
391
+ "type": "title",
392
+ "bbox": [
393
+ 0.51,
394
+ 0.257,
395
+ 0.724,
396
+ 0.271
397
+ ],
398
+ "angle": 0,
399
+ "content": "2.2.1. Discriminative loss function"
400
+ },
401
+ {
402
+ "type": "text",
403
+ "bbox": [
404
+ 0.51,
405
+ 0.278,
406
+ 0.912,
407
+ 0.29
408
+ ],
409
+ "angle": 0,
410
+ "content": "We explore two discriminative loss functions: MWER and MWED."
411
+ },
412
+ {
413
+ "type": "text",
414
+ "bbox": [
415
+ 0.509,
416
+ 0.291,
417
+ 0.915,
418
+ 0.343
419
+ ],
420
+ "angle": 0,
421
+ "content": "MWER (Minimum word error rate): A standard discriminative loss function for ASR rescoring is MWER [7]. The training minimizes the expected number of word errors for the n-best hypotheses:"
422
+ },
423
+ {
424
+ "type": "equation",
425
+ "bbox": [
426
+ 0.651,
427
+ 0.352,
428
+ 0.914,
429
+ 0.383
430
+ ],
431
+ "angle": 0,
432
+ "content": "\\[\nP _ {i} = \\frac {e ^ {- s _ {i}}}{\\sum_ {j = 1} ^ {n} e ^ {- s _ {j}}} \\tag {6}\n\\]"
433
+ },
434
+ {
435
+ "type": "equation",
436
+ "bbox": [
437
+ 0.647,
438
+ 0.388,
439
+ 0.914,
440
+ 0.421
441
+ ],
442
+ "angle": 0,
443
+ "content": "\\[\n\\bar {\\epsilon} _ {H} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\epsilon_ {i} \\tag {7}\n\\]"
444
+ },
445
+ {
446
+ "type": "equation",
447
+ "bbox": [
448
+ 0.624,
449
+ 0.425,
450
+ 0.914,
451
+ 0.459
452
+ ],
453
+ "angle": 0,
454
+ "content": "\\[\n\\mathcal {L} _ {\\mathrm {M W E R}} = \\sum_ {i = 1} ^ {n} P _ {i} \\cdot \\left(\\epsilon_ {i} - \\bar {\\epsilon} _ {H}\\right). \\tag {8}\n\\]"
455
+ },
456
+ {
457
+ "type": "text",
458
+ "bbox": [
459
+ 0.509,
460
+ 0.466,
461
+ 0.915,
462
+ 0.557
463
+ ],
464
+ "angle": 0,
465
+ "content": "\\(P_{i}\\) is the posterior probability of each hypothesis, normalized over the hypotheses list from the first pass, such that higher probabilities indicate preferred hypotheses. \\(s_i\\) is the final score of the hypothesis as in Eq. (5). The MWER loss \\(\\mathcal{L}_{\\mathrm{MWER}}\\) represents the expected number of relative word errors, with \\(\\bar{\\epsilon}_H\\) being the averaged word errors across the n-best list, which does not change the optima but helps to reduce the variance."
466
+ },
467
+ {
468
+ "type": "text",
469
+ "bbox": [
470
+ 0.509,
471
+ 0.558,
472
+ 0.916,
473
+ 0.675
474
+ ],
475
+ "angle": 0,
476
+ "content": "MWED (Matching word error distribution): MWED is a new loss function proposed here. Its goal is to mimic the distribution of n-best word errors through the predicted scores. As a result, the ranking of final scores should ideally be exactly the same as ranking by the word errors, which could potentially lead to better score interpolation at evaluation. By contrast, the model trained with the existing MWER loss as in Eq (8) picks the best hypothesis discriminatively, such that the full probability mass should be assigned to the one with minimum word errors in the ideal case."
477
+ },
478
+ {
479
+ "type": "text",
480
+ "bbox": [
481
+ 0.535,
482
+ 0.676,
483
+ 0.813,
484
+ 0.69
485
+ ],
486
+ "angle": 0,
487
+ "content": "The MWED loss is proposed as the following:"
488
+ },
489
+ {
490
+ "type": "equation",
491
+ "bbox": [
492
+ 0.663,
493
+ 0.696,
494
+ 0.914,
495
+ 0.728
496
+ ],
497
+ "angle": 0,
498
+ "content": "\\[\nd _ {i} ^ {\\epsilon} = \\frac {e ^ {\\epsilon_ {i}}}{\\sum_ {j = 1} ^ {n} e ^ {\\epsilon_ {j}}} \\tag {9}\n\\]"
499
+ },
500
+ {
501
+ "type": "equation",
502
+ "bbox": [
503
+ 0.663,
504
+ 0.73,
505
+ 0.914,
506
+ 0.763
507
+ ],
508
+ "angle": 0,
509
+ "content": "\\[\nd _ {i} ^ {s} = \\frac {e ^ {s _ {i} / T}}{\\sum_ {j = 1} ^ {n} e ^ {s _ {j} / T}} \\tag {10}\n\\]"
510
+ },
511
+ {
512
+ "type": "equation",
513
+ "bbox": [
514
+ 0.634,
515
+ 0.766,
516
+ 0.914,
517
+ 0.801
518
+ ],
519
+ "angle": 0,
520
+ "content": "\\[\n\\mathcal {L} _ {\\mathrm {M W E D}} = - \\sum_ {i = 1} ^ {n} d _ {i} ^ {e} \\log d _ {i} ^ {s} \\tag {11}\n\\]"
521
+ },
522
+ {
523
+ "type": "text",
524
+ "bbox": [
525
+ 0.509,
526
+ 0.808,
527
+ 0.916,
528
+ 0.914
529
+ ],
530
+ "angle": 0,
531
+ "content": "\\(d_{i}^{\\epsilon}\\) and \\(d_{i}^{s}\\) represent the relative distribution of word errors and predicted scores over the n-best list. \\(\\mathcal{L}_{\\mathrm{MWED}}\\) is the cross-entropy from scores to word errors, equivalent to optimizing the Kullback-Leibler divergence between the two distributions. Due to that \\(s_i\\) contains \\(s_i^a\\) which is fixed, to stabilize the match of the two distributions, we add a hyperparameter \\(T\\) to rescale the distribution mass of \\(s_i\\). In practice, we found that \\(T = \\sum_{i=1}^{n} s_i / \\sum_{i=1}^{n} \\epsilon_i\\) can yield good performance."
532
+ }
533
+ ],
534
+ [
535
+ {
536
+ "type": "title",
537
+ "bbox": [
538
+ 0.085,
539
+ 0.093,
540
+ 0.355,
541
+ 0.107
542
+ ],
543
+ "angle": 0,
544
+ "content": "2.2.2. Training with discriminative loss only"
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.083,
550
+ 0.113,
551
+ 0.488,
552
+ 0.193
553
+ ],
554
+ "angle": 0,
555
+ "content": "Training BERT naively with discriminative loss using word-level scores, as done in [4, 6], requires computation of Eq. (1) for every hypothesis and is prohibitively expensive during both training and inference. Instead, it can be fine-tuned such that the sentence-level score from the CLS representation (as in Eq. (3)) minimizes the discriminative loss \\(\\mathcal{L}_{\\mathrm{MWER}}\\) or \\(\\mathcal{L}_{\\mathrm{MWED}}\\) defined earlier."
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.084,
561
+ 0.193,
562
+ 0.488,
563
+ 0.233
564
+ ],
565
+ "angle": 0,
566
+ "content": "In Section 4, we show results using this approach with \\(\\mathcal{L}_{\\mathrm{MWER}}\\), labeled \"MWER Only\", where we perform MWER training on a pretrained BERT with domain adaptation."
567
+ },
568
+ {
569
+ "type": "title",
570
+ "bbox": [
571
+ 0.084,
572
+ 0.248,
573
+ 0.449,
574
+ 0.262
575
+ ],
576
+ "angle": 0,
577
+ "content": "2.2.3. Training with combined MLM and discriminative loss"
578
+ },
579
+ {
580
+ "type": "text",
581
+ "bbox": [
582
+ 0.083,
583
+ 0.269,
584
+ 0.489,
585
+ 0.322
586
+ ],
587
+ "angle": 0,
588
+ "content": "We propose a fusion strategy to incorporate MLM distillation into discriminative training. It is accomplished by making two modifications to the approach in Section 2.2.2, where only discriminative loss is applied."
589
+ },
590
+ {
591
+ "type": "text",
592
+ "bbox": [
593
+ 0.083,
594
+ 0.322,
595
+ 0.488,
596
+ 0.401
597
+ ],
598
+ "angle": 0,
599
+ "content": "First, we apply a pretraining step using MD alone on a large-scale text-only corpus, so that the discriminative training can be warm-started from a better initialization point. Unlike MWER training, MD only needs text-only data and their PLL scores computed by a teacher model. Therefore, the distillation itself can be trained on much more data than the n-best hypotheses used in MWER training."
600
+ },
601
+ {
602
+ "type": "text",
603
+ "bbox": [
604
+ 0.084,
605
+ 0.401,
606
+ 0.489,
607
+ 0.428
608
+ ],
609
+ "angle": 0,
610
+ "content": "Second, we introduce the new loss \\(\\mathcal{L}\\) to replace \\(\\mathcal{L}_{\\mathrm{MWER}}\\) or \\(\\mathcal{L}_{\\mathrm{MWED}}\\) in the discriminative training step:"
611
+ },
612
+ {
613
+ "type": "equation",
614
+ "bbox": [
615
+ 0.171,
616
+ 0.438,
617
+ 0.488,
618
+ 0.472
619
+ ],
620
+ "angle": 0,
621
+ "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\text {D i s c r i m i n a t i v e}} + \\lambda \\cdot \\sum_ {i = 1} ^ {n} \\mathcal {L} _ {\\mathrm {M D}} \\left(E _ {i}\\right), \\tag {12}\n\\]"
622
+ },
623
+ {
624
+ "type": "text",
625
+ "bbox": [
626
+ 0.083,
627
+ 0.481,
628
+ 0.489,
629
+ 0.573
630
+ ],
631
+ "angle": 0,
632
+ "content": "where \\(\\mathcal{L}_{\\text{Discriminative}}\\) is the discriminative loss that can be either \\(\\mathcal{L}_{\\text{MWER}}\\) or \\(\\mathcal{L}_{\\text{MWED}}\\). \\(\\mathcal{L}_{\\text{MD}}\\) is similar to cross-entropy regularization added to MWER loss in [6], and controlled by the hyperparameter \\(\\lambda\\). It is found that MD pretraining is more important than adding additional MD loss. On top of MD pretraining, having an additional step of adding MD loss yields less than \\(0.5\\%\\) relative improvement from all experiments."
633
+ },
634
+ {
635
+ "type": "text",
636
+ "bbox": [
637
+ 0.084,
638
+ 0.573,
639
+ 0.489,
640
+ 0.614
641
+ ],
642
+ "angle": 0,
643
+ "content": "In Section 4, we show the results using this approach with \\(\\mathcal{L}_{\\mathrm{MWER}}\\) and \\(\\mathcal{L}_{\\mathrm{MWED}}\\), which are named MD-MWER and MD-MWED, respectively."
644
+ },
645
+ {
646
+ "type": "title",
647
+ "bbox": [
648
+ 0.22,
649
+ 0.63,
650
+ 0.353,
651
+ 0.643
652
+ ],
653
+ "angle": 0,
654
+ "content": "3. EXPERIMENTS"
655
+ },
656
+ {
657
+ "type": "title",
658
+ "bbox": [
659
+ 0.084,
660
+ 0.656,
661
+ 0.174,
662
+ 0.669
663
+ ],
664
+ "angle": 0,
665
+ "content": "3.1. Datasets"
666
+ },
667
+ {
668
+ "type": "text",
669
+ "bbox": [
670
+ 0.083,
671
+ 0.676,
672
+ 0.489,
673
+ 0.743
674
+ ],
675
+ "angle": 0,
676
+ "content": "We evaluated our approach on four datasets in multiple domains and locales to test its general applicability, including one public dataset LibriSpeech [16] and three internal dataset based on a conversational agent (one for Information (Info) domain in English (en), and two for Info and Navigation (Nav) domains in Japanese (ja))."
677
+ },
678
+ {
679
+ "type": "text",
680
+ "bbox": [
681
+ 0.083,
682
+ 0.743,
683
+ 0.489,
684
+ 0.874
685
+ ],
686
+ "angle": 0,
687
+ "content": "For LibriSpeech, an LAS model [17] is adopted as the first-pass decoder, and we use the same decoded 100-best hypotheses of the dev and test set along with their first-pass scores used by [9, 10]. Since there is no dedicated training set provided for MWER, we combine the decoded hypotheses from both dev-clean and dev-other as the MWER training set, and randomly hold out \\(10\\%\\) utterances as the MWER dev set. The resulting training/dev set has 5011/556 utterances, with up to 100 hypotheses per utterance. For MLM distillation, we sample 4 million utterances from the in-domain text corpus provided by LibriSpeech as the training set, similar to [10]."
688
+ },
689
+ {
690
+ "type": "text",
691
+ "bbox": [
692
+ 0.084,
693
+ 0.874,
694
+ 0.489,
695
+ 0.914
696
+ ],
697
+ "angle": 0,
698
+ "content": "All internal datasets consist of de-identified live user interactions with a conversational agent, decoded by a RNN-T model [18] for en or a hybrid HMM model for ja. For Info (en), \\(\\sim 100 / 5\\) hours of"
699
+ },
700
+ {
701
+ "type": "text",
702
+ "bbox": [
703
+ 0.509,
704
+ 0.093,
705
+ 0.916,
706
+ 0.186
707
+ ],
708
+ "angle": 0,
709
+ "content": "utterances are used as the MWER training/dev set; the test set has \\(\\sim 2\\) hours of long-tail info utterances. For Info and Nav in ja, we use a single MWER training/dev set that consists of \\(\\sim 220 / 10\\) hours of utterances in multiple domains including Info and Nav domains. The test set has \\(\\sim 1 / 3\\) hours of utterances for Info/Nav respectively. For MD, 4 million in-domain utterances sampled from user interactions are used as training set."
710
+ },
711
+ {
712
+ "type": "title",
713
+ "bbox": [
714
+ 0.51,
715
+ 0.204,
716
+ 0.647,
717
+ 0.217
718
+ ],
719
+ "angle": 0,
720
+ "content": "3.2. Implementation"
721
+ },
722
+ {
723
+ "type": "text",
724
+ "bbox": [
725
+ 0.509,
726
+ 0.225,
727
+ 0.916,
728
+ 0.397
729
+ ],
730
+ "angle": 0,
731
+ "content": "For LibriSpeech, we use the uncased \\(\\mathrm{BERT}_{\\mathrm{Base}}\\) for our experiments to enable direct comparison with previous work. For internal datasets, we use an in-house multilingual BERT of \\(\\sim 170\\mathrm{M}\\) parameters (excluding embedding size) with 16 layers and 1024 hidden size, supporting both en and ja locales. In addition, our final MWER setting also includes two smaller BERT models of \\(\\sim 17\\mathrm{M} / 5\\mathrm{M}\\) parameters distilled [19] from the 170M BERT model, both with only 4 layers and 768/320 hidden size respectively. All BERT models are implemented in PyTorch and pretrained on public data. We limit the maximum sequence length to 128 for LibriSpeech, and 32 for others; longer utterances will be truncated. The \\(\\lambda\\) parameter in Eq. (12) is set to \\(10^{-4}\\). We found that \\(\\lambda\\) between \\(10^{-4}\\) and \\(10^{-3}\\) generally yield same performance."
732
+ },
733
+ {
734
+ "type": "text",
735
+ "bbox": [
736
+ 0.509,
737
+ 0.397,
738
+ 0.916,
739
+ 0.515
740
+ ],
741
+ "angle": 0,
742
+ "content": "Before we conducted any training, we first performed domain adaptation for BERT, as BERT contains general world knowledge from pretraining but not necessarily task-specific knowledge. Therefore, we take the pretrained BERT and further train with the MLM objective on the in-domain corpus. For LibriSpeech, we train 400K steps on the provided text corpus, similar to [10]. For internal datasets, we train 200K steps on the in-domain transcriptions for each of the en and ja locales. Each step has an effective batch size of 256 utterances for both of these cases."
743
+ },
744
+ {
745
+ "type": "title",
746
+ "bbox": [
747
+ 0.51,
748
+ 0.534,
749
+ 0.755,
750
+ 0.547
751
+ ],
752
+ "angle": 0,
753
+ "content": "3.3. Baseline and Evaluation Protocol"
754
+ },
755
+ {
756
+ "type": "text",
757
+ "bbox": [
758
+ 0.509,
759
+ 0.555,
760
+ 0.915,
761
+ 0.648
762
+ ],
763
+ "angle": 0,
764
+ "content": "For LibriSpeech, we use the results of the MLM distillation (MD) as the baseline, which can be seen as our re-implementation of the \"sentence-level fine-tuning\" results from [10], which has the same low-latency scoring as our MWER setting. We also provide the results of high-latency PLL scores for comparison. WER is used as the evaluation metric, and the optimal interpolation weight \\(\\beta\\) in Eq. (5) is linearly searched on the dev set."
765
+ },
766
+ {
767
+ "type": "text",
768
+ "bbox": [
769
+ 0.509,
770
+ 0.649,
771
+ 0.915,
772
+ 0.754
773
+ ],
774
+ "angle": 0,
775
+ "content": "For internal datasets, we use the LM scoring from a 2-layer LSTM trained with noise contrastive estimation (NCE) [20] as the baseline, which is often employed in industrial settings for streaming applications. The LSTM is trained on the same data used for domain adaptation for BERT. New scores from BERT replace the existing LSTM scores, and the optimal weight is searched on the dev set as well. We report the relative improvements in WER for en, and in CER (character error rate) for ja."
776
+ },
777
+ {
778
+ "type": "title",
779
+ "bbox": [
780
+ 0.611,
781
+ 0.773,
782
+ 0.815,
783
+ 0.786
784
+ ],
785
+ "angle": 0,
786
+ "content": "4. RESULTS AND ANALYSIS"
787
+ },
788
+ {
789
+ "type": "title",
790
+ "bbox": [
791
+ 0.51,
792
+ 0.8,
793
+ 0.859,
794
+ 0.813
795
+ ],
796
+ "angle": 0,
797
+ "content": "4.1. Comparing Different BERT Rescoring Approaches"
798
+ },
799
+ {
800
+ "type": "text",
801
+ "bbox": [
802
+ 0.509,
803
+ 0.821,
804
+ 0.915,
805
+ 0.914
806
+ ],
807
+ "angle": 0,
808
+ "content": "Tables 1(a) and (b) show the evaluation results on both LibriSpeech and internal datasets. Here, PLL denotes the approach in Section 2.1.1 which is computationally expensive; MD denotes the approach in Section 2.1.2 that distills the PLL score; MWER Only denotes the approach described in Section 2.2.2 that trains BERT with the MWER objective only; MD-MWER and MD-MWED denote the approaches described in Section 2.2.3 that incorporate MLM into"
809
+ }
810
+ ],
811
+ [
812
+ {
813
+ "type": "text",
814
+ "bbox": [
815
+ 0.084,
816
+ 0.089,
817
+ 0.488,
818
+ 0.143
819
+ ],
820
+ "angle": 0,
821
+ "content": "Table 1: Evaluation results on the test partitions of all datasets. Details of the baseline and evaluation protocol are described in Section 3.3. The case of \"MWED only\" is not included, and the relative difference between it and \"MWER only\" is within \\(1\\%\\) for all tests."
822
+ },
823
+ {
824
+ "type": "text",
825
+ "bbox": [
826
+ 0.084,
827
+ 0.149,
828
+ 0.488,
829
+ 0.174
830
+ ],
831
+ "angle": 0,
832
+ "content": "(a) WER on the two test sets of LibriSpeech using \\(\\mathrm{BERT_{Base}}\\). Numbers inside parentheses are relative improvements compared to the baseline."
833
+ },
834
+ {
835
+ "type": "table",
836
+ "bbox": [
837
+ 0.145,
838
+ 0.179,
839
+ 0.434,
840
+ 0.287
841
+ ],
842
+ "angle": 0,
843
+ "content": "<table><tr><td></td><td>Test-Clean</td><td>Test-Other</td></tr><tr><td>First-Pass</td><td>7.26</td><td>20.37</td></tr><tr><td>PLL</td><td>4.54</td><td>16.08</td></tr><tr><td>Baseline (MD)</td><td>4.67</td><td>16.15</td></tr><tr><td>MWER Only</td><td>4.82 (-3.2%)</td><td>16.35 (-1.2%)</td></tr><tr><td>MD-MWER</td><td>4.42 (5.3%)</td><td>15.87 (1.7%)</td></tr><tr><td>MD-MWED</td><td>4.36 (6.6%)</td><td>15.60 (3.4%)</td></tr></table>"
844
+ },
845
+ {
846
+ "type": "text",
847
+ "bbox": [
848
+ 0.084,
849
+ 0.29,
850
+ 0.488,
851
+ 0.315
852
+ ],
853
+ "angle": 0,
854
+ "content": "(b) Relative improvements of WER (for en) and CER (for ja) on three internal datasets, using the in-house 170M BERT model."
855
+ },
856
+ {
857
+ "type": "table",
858
+ "bbox": [
859
+ 0.145,
860
+ 0.32,
861
+ 0.434,
862
+ 0.408
863
+ ],
864
+ "angle": 0,
865
+ "content": "<table><tr><td></td><td>Info (en)</td><td>Info (ja)</td><td>Nav (ja)</td></tr><tr><td>LSTM</td><td>Baseline</td><td>Baseline</td><td>Baseline</td></tr><tr><td>MD</td><td>2.6%</td><td>3.7%</td><td>5.6%</td></tr><tr><td>MWER Only</td><td>5.3%</td><td>11.8%</td><td>11.2%</td></tr><tr><td>MD-MWER</td><td>4.0%</td><td>12.0%</td><td>12.7%</td></tr><tr><td>MD-MWED</td><td>6.6%</td><td>10.4%</td><td>12.2%</td></tr></table>"
866
+ },
867
+ {
868
+ "type": "text",
869
+ "bbox": [
870
+ 0.083,
871
+ 0.415,
872
+ 0.488,
873
+ 0.441
874
+ ],
875
+ "angle": 0,
876
+ "content": "discriminative training with MWER and MWED loss functions (as in Section 2.2.1), respectively."
877
+ },
878
+ {
879
+ "type": "text",
880
+ "bbox": [
881
+ 0.083,
882
+ 0.442,
883
+ 0.488,
884
+ 0.732
885
+ ],
886
+ "angle": 0,
887
+ "content": "Based on these results, we can make three observations. First, discriminative training significantly improves upon nondiscriminative BERT rescoring (MD) across all test sets: \\(6.6\\% / 3.4\\%\\) WER relative improvement on LibriSpeech and \\(4\\% / 8.3\\% / 7.1\\%\\) relative WER reduction on internal datasets. What is particularly striking is that on LibriSpeech, because of discriminative training, both MD-MWER and MD-MWED now outperform the much more computationally expensive PLL approach. Second, the fusion approach of incorporating MLM in discriminative training improves on all test sets. The effect is particularly strong on the LibriSpeech test sets, where MWER Only would actually perform worse than MD with \\(3.2\\% / 1.2\\%\\) relative WER degradations, but where the fusion approach now gives \\(5.3\\% / 1.7\\%\\) relative improvement over MD. Third, to compare the new loss function MWED with the existing MWER, MD-MWER achieves better performance on both LibriSpeech \\((1.3\\%)\\) and \\(1.7\\%\\) relative) and Info (en) \\((2.6\\%)\\) relative) over MD-MWER, but worse than MD-MWER on both ja test sets. This result shows that MWED can be a strong alternative loss in the MWER training, and the final performance can be dataset-specific. One potential explanation for MWED being less effective for ja is that the CER distribution is spikier than the WER distribution, resulting in less stable gradients from the relative entropy in Eq. (11)."
888
+ },
889
+ {
890
+ "type": "title",
891
+ "bbox": [
892
+ 0.084,
893
+ 0.747,
894
+ 0.377,
895
+ 0.762
896
+ ],
897
+ "angle": 0,
898
+ "content": "4.2. Low Latency for Streaming Applications"
899
+ },
900
+ {
901
+ "type": "text",
902
+ "bbox": [
903
+ 0.084,
904
+ 0.769,
905
+ 0.488,
906
+ 0.796
907
+ ],
908
+ "angle": 0,
909
+ "content": "A main challenge when applying second-pass rescoring for streaming applications is to keep user-perceived latency low while obtain-"
910
+ },
911
+ {
912
+ "type": "image",
913
+ "bbox": [
914
+ 0.119,
915
+ 0.803,
916
+ 0.455,
917
+ 0.88
918
+ ],
919
+ "angle": 0,
920
+ "content": null
921
+ },
922
+ {
923
+ "type": "image_caption",
924
+ "bbox": [
925
+ 0.084,
926
+ 0.889,
927
+ 0.488,
928
+ 0.93
929
+ ],
930
+ "angle": 0,
931
+ "content": "Fig. 2: Diagram of training the distilled 5M-parameter BERT model with the fusion strategy described in Section 2.2.3); the 170M BERT model is the teacher for calculating PLL scores."
932
+ },
933
+ {
934
+ "type": "text",
935
+ "bbox": [
936
+ 0.509,
937
+ 0.089,
938
+ 0.915,
939
+ 0.116
940
+ ],
941
+ "angle": 0,
942
+ "content": "Table 2: Performance and latency study of our approach using BERT of different parameter sizes on three internal datasets."
943
+ },
944
+ {
945
+ "type": "text",
946
+ "bbox": [
947
+ 0.509,
948
+ 0.123,
949
+ 0.915,
950
+ 0.147
951
+ ],
952
+ "angle": 0,
953
+ "content": "(a) Relative improvements over LSTM (4M) on three internal datasets, using the best setting (MD-MWER/MWED) according to Table 1b for each dataset."
954
+ },
955
+ {
956
+ "type": "table",
957
+ "bbox": [
958
+ 0.595,
959
+ 0.153,
960
+ 0.835,
961
+ 0.215
962
+ ],
963
+ "angle": 0,
964
+ "content": "<table><tr><td>BERT</td><td>Info (en)</td><td>Info (ja)</td><td>Nav (ja)</td></tr><tr><td>170M</td><td>6.6%</td><td>12.0%</td><td>12.7%</td></tr><tr><td>17M</td><td>3.5%</td><td>9.1%</td><td>9.4%</td></tr><tr><td>5M</td><td>3.1%</td><td>7.8%</td><td>7.8%</td></tr></table>"
965
+ },
966
+ {
967
+ "type": "text",
968
+ "bbox": [
969
+ 0.509,
970
+ 0.218,
971
+ 0.915,
972
+ 0.254
973
+ ],
974
+ "angle": 0,
975
+ "content": "(b) Averaged latency (in ms) of each batch using 2 threads on a CPU, with a batch size of 5 hypotheses. SL: input sequence/hypothesis length. Parentheses indicate relative latency compared to LSTM."
976
+ },
977
+ {
978
+ "type": "table",
979
+ "bbox": [
980
+ 0.522,
981
+ 0.26,
982
+ 0.907,
983
+ 0.31
984
+ ],
985
+ "angle": 0,
986
+ "content": "<table><tr><td>SL</td><td>LSTM (4M)</td><td>BERT (5M)</td><td>BERT (17M)</td><td>BERT (170M)</td></tr><tr><td>16</td><td>9.7</td><td>7.6 (78%)</td><td>17.5 (180%)</td><td>180 (1.8k%)</td></tr><tr><td>32</td><td>18.7</td><td>11.0 (59%)</td><td>26.3 (141%)</td><td>270 (1.4k%)</td></tr></table>"
987
+ },
988
+ {
989
+ "type": "text",
990
+ "bbox": [
991
+ 0.508,
992
+ 0.319,
993
+ 0.915,
994
+ 0.451
995
+ ],
996
+ "angle": 0,
997
+ "content": "ing the accuracy gains. We next examine how to further distill the model to reduce its compute footprint and hence, achieve even lower latency. We focus on the internal datasets for this task. As described in Section 3.2, in addition to the 170M BERT model, we have 17M and 5M BERT models distilled [19] from it. To achieve low latency, we can perform MD-MWER or MD-MWED starting from this smaller model, except that to maintain WER gains, we use the 170M BERT model to compute PLL scores in MD training, as well as in the final combined loss. This training process is illustrated in Figure 2."
998
+ },
999
+ {
1000
+ "type": "text",
1001
+ "bbox": [
1002
+ 0.508,
1003
+ 0.452,
1004
+ 0.915,
1005
+ 0.544
1006
+ ],
1007
+ "angle": 0,
1008
+ "content": "Table 2a shows the relative improvements using BERT models with 170M, 17M, and 5M parameters described in Section 3.2 with the best settings for each dataset. Smaller BERT is expected to yield less improvement; nevertheless, the degradation is within a relatively small margin even for the much smaller 5M version that has only \\(\\sim 3\\%\\) the parameters of the 170M model, with still nearly \\(8\\%\\) improvement on two ja datasets and \\(3 + \\%\\) on Info (en)."
1009
+ },
1010
+ {
1011
+ "type": "text",
1012
+ "bbox": [
1013
+ 0.508,
1014
+ 0.545,
1015
+ 0.915,
1016
+ 0.665
1017
+ ],
1018
+ "angle": 0,
1019
+ "content": "Table 2b shows the latency comparison among three BERT models and the NCE-based LSTM of 4M parameters, using the PyTorch benchmarking tool under direct model inference in Python using 2 threads on a CPU. 5M BERT is shown to be faster than LSTM, while 17M BERT is slower but also appears comparable. Overall, Table 2 shows that our proposed approach can be a superior substitute for LSTM scoring in deployed systems. In particular, from Table 2a and 2b, 5M BERT significantly outperforms a similar-sized 4M LSTM, both in WER (by \\(3.1\\% /7.8\\% /7.8\\%\\) ) and in latency (by \\(22\\% /41\\%\\))."
1020
+ },
1021
+ {
1022
+ "type": "title",
1023
+ "bbox": [
1024
+ 0.65,
1025
+ 0.688,
1026
+ 0.776,
1027
+ 0.701
1028
+ ],
1029
+ "angle": 0,
1030
+ "content": "5. CONCLUSION"
1031
+ },
1032
+ {
1033
+ "type": "text",
1034
+ "bbox": [
1035
+ 0.508,
1036
+ 0.715,
1037
+ 0.915,
1038
+ 0.913
1039
+ ],
1040
+ "angle": 0,
1041
+ "content": "We have proposed a method to train a BERT rescoring model with discriminative objective functions. We show that discriminative training can significantly improve BERT rescoring on a variety of datasets: \\(6.6\\% / 3.4\\%\\) relative WER improvement on LibriSpeech and \\(4\\% / 8.3\\% / 7.1\\%\\) relative WER improvement on internal voice assistant datasets. The proposed fusion strategy to incorporate MLM into discriminative training is found to further reduce WER. We also propose a new discriminative loss MWED that is a strong alternative to the standard MWER loss, yielding \\(1.3\\%\\) and \\(1.7\\%\\) relative WER improvement over MWER loss on LibriSpeech and \\(2.6\\%\\) relative improvement on one internal dataset. Lastly, we show how to further distill the model to achieve even lower latency for streaming applications while preserving WER improvements. We achieve \\(3 - 8\\%\\) relative WER improvement and lower latency compared to a baseline LSTM model on internal data."
1042
+ }
1043
+ ],
1044
+ [
1045
+ {
1046
+ "type": "title",
1047
+ "bbox": [
1048
+ 0.225,
1049
+ 0.092,
1050
+ 0.348,
1051
+ 0.105
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "6. REFERENCES"
1055
+ },
1056
+ {
1057
+ "type": "ref_text",
1058
+ "bbox": [
1059
+ 0.093,
1060
+ 0.117,
1061
+ 0.489,
1062
+ 0.197
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "[1] Yingce Xia, Fei Tian, Lijun Wu, Jianxin Lin, Tao Qin, Nenghai Yu, and Tie-Yan Liu, \"Deliberation networks: Sequence generation beyond one-pass decoding,\" in Advances in Neural Information Processing Systems, I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, Eds. 2017, vol. 30, Curran Associates, Inc."
1066
+ },
1067
+ {
1068
+ "type": "ref_text",
1069
+ "bbox": [
1070
+ 0.093,
1071
+ 0.2,
1072
+ 0.489,
1073
+ 0.268
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": "[2] Tara N. Sainath, Ruoming Pang, David Rybach, Yanzhang He, Rohit Prabhavalkar, Wei Li, Mirkó Visontai, Qiao Liang, Trevor Strohman, Yonghui Wu, Ian McGraw, and Chung-Cheng Chiu, “Two-Pass End-to-End Speech Recognition,” in Proc. Interspeech, 2019, pp. 2773–2777."
1077
+ },
1078
+ {
1079
+ "type": "ref_text",
1080
+ "bbox": [
1081
+ 0.093,
1082
+ 0.269,
1083
+ 0.489,
1084
+ 0.322
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": "[3] Ke Hu, Tara N Sainath, Ruoming Pang, and Rohit Prabhavalkar, “Deliberation model based two-pass end-to-end speech recognition,” in Proc. IEEE ICASSP, 2020, pp. 7799–7803."
1088
+ },
1089
+ {
1090
+ "type": "ref_text",
1091
+ "bbox": [
1092
+ 0.093,
1093
+ 0.326,
1094
+ 0.489,
1095
+ 0.367
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "[4] Ankur Gandhi and Ariya Rastrow, \"Audio-attention discriminative language model for asr rescoring,\" in Proc. IEEE ICASSP, 2020, pp. 7944-7948."
1099
+ },
1100
+ {
1101
+ "type": "ref_text",
1102
+ "bbox": [
1103
+ 0.093,
1104
+ 0.369,
1105
+ 0.489,
1106
+ 0.422
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": "[5] Ke Hu, Ruoming Pang, Tara N Sainath, and Trevor Strohman, \"Transformer based deliberation for two-pass speech recognition,\" in Proc. IEEE Spoken Language Technology Workshop, 2021, pp. 68-74."
1110
+ },
1111
+ {
1112
+ "type": "ref_text",
1113
+ "bbox": [
1114
+ 0.093,
1115
+ 0.426,
1116
+ 0.489,
1117
+ 0.48
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": "[6] Takaaki Hori, Chiori Hori, Shinji Watanabe, and J. Hershey, \"Minimum word error training of long short-term memory recurrent neural network language models for speech recognition,\" in Proc. IEEE ICASSP, 2016, pp. 5990-5994."
1121
+ },
1122
+ {
1123
+ "type": "ref_text",
1124
+ "bbox": [
1125
+ 0.093,
1126
+ 0.482,
1127
+ 0.489,
1128
+ 0.549
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": "[7] Rohit Prabhavalkar, Tara N Sainath, Yonghui Wu, Patrick Nguyen, Zhifeng Chen, Chung-Cheng Chiu, and Anjuli Kannan, “Minimum word error rate training for attention-based sequence-to-sequence models,” in Proc. IEEE ICASSP, 2018, pp. 4839–4843."
1132
+ },
1133
+ {
1134
+ "type": "ref_text",
1135
+ "bbox": [
1136
+ 0.093,
1137
+ 0.552,
1138
+ 0.489,
1139
+ 0.632
1140
+ ],
1141
+ "angle": 0,
1142
+ "content": "[8] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova, “BERT: Pre-training of deep bidirectional transformers for language understanding,” in Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics, Minneapolis, Minnesota, June 2019, pp. 4171–4186."
1143
+ },
1144
+ {
1145
+ "type": "ref_text",
1146
+ "bbox": [
1147
+ 0.093,
1148
+ 0.635,
1149
+ 0.489,
1150
+ 0.687
1151
+ ],
1152
+ "angle": 0,
1153
+ "content": "[9] Joonbo Shin, Yoonhyung Lee, and Kyomin Jung, \"Effective sentence scoring method using BERT for speech recognition,\" in Asian Conference on Machine Learning. PMLR, 2019, pp. 1081-1093."
1154
+ },
1155
+ {
1156
+ "type": "ref_text",
1157
+ "bbox": [
1158
+ 0.087,
1159
+ 0.691,
1160
+ 0.489,
1161
+ 0.758
1162
+ ],
1163
+ "angle": 0,
1164
+ "content": "[10] Julian Salazar, Davis Liang, Toan Q. Nguyen, and Katrin Kirchhoff, “Masked language model scoring,” in Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, Online, July 2020, pp. 2699–2712, Association for Computational Linguistics."
1165
+ },
1166
+ {
1167
+ "type": "ref_text",
1168
+ "bbox": [
1169
+ 0.087,
1170
+ 0.76,
1171
+ 0.489,
1172
+ 0.813
1173
+ ],
1174
+ "angle": 0,
1175
+ "content": "[11] Shih-Hsuan Chiu and Berlin Chen, “Innovative BERT-based reranking language models for speech recognition,” in Proc. IEEE Spoken Language Technology Workshop, 2021, pp. 266–271."
1176
+ },
1177
+ {
1178
+ "type": "ref_text",
1179
+ "bbox": [
1180
+ 0.087,
1181
+ 0.817,
1182
+ 0.489,
1183
+ 0.857
1184
+ ],
1185
+ "angle": 0,
1186
+ "content": "[12] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever, \"Language models are unsupervised multitask learners,\" OpenAI Blog 1(8), 2019."
1187
+ },
1188
+ {
1189
+ "type": "ref_text",
1190
+ "bbox": [
1191
+ 0.087,
1192
+ 0.86,
1193
+ 0.489,
1194
+ 0.914
1195
+ ],
1196
+ "angle": 0,
1197
+ "content": "[13] Caglar Gulcehre, Orhan Firat, Kelvin Xu, Kyunghyun Cho, Loic Barrault, Huei-Chi Lin, Fethi Bougares, Holger Schwenk, and Yoshua Bengio, \"On using monolingual corpora in neural machine translation,\" arXiv preprint arXiv:1503.03535, 2015."
1198
+ },
1199
+ {
1200
+ "type": "list",
1201
+ "bbox": [
1202
+ 0.087,
1203
+ 0.117,
1204
+ 0.489,
1205
+ 0.914
1206
+ ],
1207
+ "angle": 0,
1208
+ "content": null
1209
+ },
1210
+ {
1211
+ "type": "ref_text",
1212
+ "bbox": [
1213
+ 0.513,
1214
+ 0.093,
1215
+ 0.914,
1216
+ 0.159
1217
+ ],
1218
+ "angle": 0,
1219
+ "content": "[14] Felix Stahlberg, James Cross, and Veselin Stoyanov, \"Simple fusion: Return of the language model,\" in Proceedings of the Third Conference on Machine Translation: Research Papers, Brussels, Belgium, Oct. 2018, pp. 204-211, Association for Computational Linguistics."
1220
+ },
1221
+ {
1222
+ "type": "ref_text",
1223
+ "bbox": [
1224
+ 0.513,
1225
+ 0.163,
1226
+ 0.914,
1227
+ 0.229
1228
+ ],
1229
+ "angle": 0,
1230
+ "content": "[15] Xie Chen, Anton Ragni, Xunying Liu, and Mark JF Gales, \"Investigating bidirectional recurrent neural network language models for speech recognition,\" in Proc. Interspeech. International Speech Communication Association (ISCA), 2017, pp. 269-273."
1231
+ },
1232
+ {
1233
+ "type": "ref_text",
1234
+ "bbox": [
1235
+ 0.513,
1236
+ 0.234,
1237
+ 0.914,
1238
+ 0.287
1239
+ ],
1240
+ "angle": 0,
1241
+ "content": "[16] Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur, \"LibriSpeech: an ASR corpus based on public domain audio books,\" in Proc. IEEE ICASSP, 2015, pp. 5206-5210."
1242
+ },
1243
+ {
1244
+ "type": "ref_text",
1245
+ "bbox": [
1246
+ 0.513,
1247
+ 0.291,
1248
+ 0.914,
1249
+ 0.346
1250
+ ],
1251
+ "angle": 0,
1252
+ "content": "[17] William Chan, Navdeep Jaitly, Quoc Le, and Oriol Vinyals, \"Listen, Attend and Spell: A neural network for large vocabulary conversational speech recognition,\" in Proc. IEEE ICASSP, 2016, pp. 4960-4964."
1253
+ },
1254
+ {
1255
+ "type": "ref_text",
1256
+ "bbox": [
1257
+ 0.513,
1258
+ 0.349,
1259
+ 0.914,
1260
+ 0.416
1261
+ ],
1262
+ "angle": 0,
1263
+ "content": "[18] Yanzhang He, Tara N Sainath, Rohit Prabhavalkar, Ian McGraw, Raziel Alvarez, Ding Zhao, David Rybach, Anjuli Kannan, Yonghui Wu, Ruoming Pang, et al., \"Streaming end-to-end speech recognition for mobile devices,\" in Proc. IEEE ICASSP, 2019, pp. 6381-6385."
1264
+ },
1265
+ {
1266
+ "type": "ref_text",
1267
+ "bbox": [
1268
+ 0.513,
1269
+ 0.42,
1270
+ 0.914,
1271
+ 0.461
1272
+ ],
1273
+ "angle": 0,
1274
+ "content": "[19] Geoffrey Hinton, Oriol Vinyals, and Jeffrey Dean, “Distilling the knowledge in a neural network,” in NIPS Deep Learning and Representation Learning Workshop, 2015."
1275
+ },
1276
+ {
1277
+ "type": "ref_text",
1278
+ "bbox": [
1279
+ 0.513,
1280
+ 0.464,
1281
+ 0.914,
1282
+ 0.519
1283
+ ],
1284
+ "angle": 0,
1285
+ "content": "[20] Anirudh Raju, Denis Filimonov, Gautam Tiwari, Guitang Lan, and Ariya Rastrow, \"Scalable multi corpora neural language models for ASR,\" in Proc. Interspeech, Gernot Kubin and Zdravko Kacic, Eds. 2019, pp. 3910-3914, ISCA."
1286
+ },
1287
+ {
1288
+ "type": "list",
1289
+ "bbox": [
1290
+ 0.513,
1291
+ 0.093,
1292
+ 0.914,
1293
+ 0.519
1294
+ ],
1295
+ "angle": 0,
1296
+ "content": null
1297
+ }
1298
+ ]
1299
+ ]
2202.01xxx/2202.01094/df2346ee-d8b7-4b8c-9abf-5431f77d269b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddb0b82a437ae9e0933f5b90a6726026ed68e3e03646421a5be4d948927d1968
3
+ size 786044
2202.01xxx/2202.01094/full.md ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RESCOREBERT: DISCRIMINATIVE SPEECH RECOGNITION RESCORSING WITH BERT
2
+
3
+ Liyan Xu $^{1,2}$ Yile Gu $^{1}$ Jari Kolehmainen $^{1}$ Haidar Khan $^{1}$ Ankur Gandhi $^{1}$ Ariya Rastrow $^{1}$ Andreas Stolcke $^{1}$ Ivan Bulyko $^{1}$
4
+
5
+ $^{1}$ Amazon Alexa AI, USA $^{2}$ Emory University, USA
6
+
7
+ # ABSTRACT
8
+
9
+ Second-pass rescoring is an important component in automatic speech recognition (ASR) systems that is used to improve the outputs from a first-pass decoder by implementing a lattice rescoring or $n$ -best re-ranking. While pretraining with a masked language model (MLM) objective has received great success in various natural language understanding (NLU) tasks, it has not gained traction as a rescoring model for ASR. Specifically, training a bidirectional model like BERT on a discriminative objective such as minimumWER (MWER) has not been explored. Here we show how to train a BERT-based rescoring model with MWER loss, to incorporate the improvements of a discriminative loss into fine-tuning of deep bidirectional pretrained models for ASR. Specifically, we propose a fusion strategy that incorporates the MLM into the discriminative training process to effectively distill knowledge from a pretrained model. We further propose an alternative discriminative loss. This approach, which we call RescoreBERT, reduces WER by $6.6\% /3.4\%$ relative on the LibriSpeech clean/other test sets over a BERT baseline without discriminative objective. We also evaluate our method on an internal dataset from a conversational agent and find that it reduces both latency and WER (by 3 to $8\%$ relative) over an LSTM rescoring model.
10
+
11
+ Index Terms— masked language model, BERT, second-pass rescoring, pretrained model, minimum WER training
12
+
13
+ # 1. INTRODUCTION
14
+
15
+ The two-pass paradigm has been widely adopted in state-of-the-art ASR systems [1, 2, 3, 4, 5], where the first pass generates n-best hypotheses, and the second pass reranks them. For the second-pass rescoring models, discriminative training with MWER (minimum WER) objective is typically applied [3, 6, 7, 4] to improve performance, such that the model learns to prefer hypotheses with the lowest WER.
16
+
17
+ Previous work with discriminative training uses causal language models (CLMs), such as LSTMs or Transformer LMs. While pretrained masked language models (MLMs) such as BERT [8] have been highly successful on various natural language understanding (NLU) tasks, they have not been widely applied in second-pass ASR rescoring. Meanwhile, recent studies have shown promising results using BERT in several rescoring studies [9, 10, 11], as BERT is pretrained with large corpora and encodes the full hypothesis context using a deep bidirectional model architecture. In particular, previous work [9] shows that deep bidirectional Transformers, such as BERT, can outperform their unidirectional counterparts (either forward text, backward text, or the two models combined). Another paper [10]
18
+
19
+ ![](images/62b155f0969b45c61b6cb5566be073bd276cd787532528c02448108d29452881.jpg)
20
+ Fig. 1: Illustration of discriminative scoring with BERT on n-best hypotheses. Each hypothesis is individually encoded by BERT and represented by CLS; it is then followed by a feed-forward NN to compute a sentence-level second-pass LM score. The scores are then interpolated with first pass scores for reranking.
21
+
22
+ shows that a pretrained BERT model that is then fine-tuned on LibriSpeech data can outperform BERT trained from scratch on LibriSpeech clean/other test sets by $4.4\% / 3.2\%$ WER relative, demonstrating the effectiveness of pretraining in BERT. They also show that BERT can outperform GPT [12] with comparable model size and pretraining data, which the authors argue is due to the bidirectional nature of BERT.
23
+
24
+ In this work, we propose a method to train BERT-style rescoreing models with a discriminative objective, to leverage the aforementioned benefits from both approaches. Typically, pseudo log-likelihood (PLL) [9, 10]—the sum of the negative log-likelihoods of each individual token given the bidirectional context—is used to rescore n-best output to improve WER, a computationally expensive process, particularly for longer sentences. For discriminative training, this issue is exacerbated as the PLL computation needs to be repeated for each hypothesis individually. The previous work [10] solves this issue by distilling the PLL into a single score prediction at the start-of-sentence token (CLS). In this work, illustrated in Figure 1, we extend this approach and use the score from the CLS representation to perform discriminative training, as discussed in Section 2.2.2, with either MWER loss or a novel discriminative training loss dubbed matching word error distribution (MWED), described in Section 2.2.1. Finally, in Section 2.2.3 we propose a fusion strategy that incorporates the MLM into the discriminative training process, giving further improvements.
25
+
26
+ We name the aforementioned approach RescoreBERT, and evaluate it on four datasets covering multiple domains and locales. Results show that discriminative training significantly improves upon non-discriminative BERT rescoring, on all test sets. The new MWED training loss is found to be a strong alternative to MWER. The results also show that the fusion approach for incorporating MLM into discriminative training can further improve WER. Lastly,
27
+
28
+ to achieve lower latency for streaming applications, we develop a method to further distill the model while maintaining WER improvements.
29
+
30
+ # 2. APPROACH
31
+
32
+ # 2.1. BERT Without Discriminative Training
33
+
34
+ In this section, we review previous work on BERT rescoring models that are not trained with discriminative objective.
35
+
36
+ # 2.1.1. Pseudo log-likelihood (PLL)
37
+
38
+ Let $E = (e_1, \dots, e_{|E|})$ be a sequence of tokens. Similar to the log-likelihood of a sequence as is commonly used in CLM scoring [13, 14, 15], the pseudo log-likelihood (PLL) of a sequence using an MLM, first introduced by [9], is defined as:
39
+
40
+ $$
41
+ \operatorname {P L L} (E) = - \sum_ {t = 1} ^ {| E |} \log P \left(e _ {t} \mid E _ {\backslash t}\right) \tag {1}
42
+ $$
43
+
44
+ where $E_{\backslash t} = (\dots, e_{t-1}, [\text{MASK}], e_{t+1}, \dots)$ is the sequence whose corresponding position is replaced by the [MASK] token used in MLM pretraining. The PLL is thus the sum of the negative log-likelihoods of each token given the bidirectional context, with lower scores indicating more probable sequences.
45
+
46
+ # 2.1.2. MLM distillation (MD)
47
+
48
+ Although the PLL demonstrates good performance for second-pass rescoring [9], it is not computationally efficient: $|E|$ additional sequences masking every position need to be generated and encoded by BERT, thus the computation due to PLL is on the order of $|E|$ times that of an autoregressive Transformer model of similar size. Following [10], one can "distill" the PLL calculation into a single utterance-level score using the CLS representation, such that the model is able to approximate PLL, while eliminating the need for masking $|E|$ times, as well as the large vocabulary softmax in $P(e_t|E_{\backslash t})$ , thereby reducing computation significantly. As shown in the equations below, each sentence $E_i$ is individually encoded by BERT, represented by the hidden state of the CLS token in the last Transformers layer, denoted by $g_i$ . An additional layer is stacked on top of the CLS hidden states to produce the score $s_i^l$ for $E_i$ . The distillation is achieved by training the model to mimic PLL scores using mean squared error (MSE) regression loss:
49
+
50
+ $$
51
+ g _ {i} = \operatorname {B E R T} ^ {\mathrm {C L S}} \left(E _ {i}\right) \tag {2}
52
+ $$
53
+
54
+ $$
55
+ s _ {i} ^ {l} = \operatorname {F F N N} (g _ {i}) \tag {3}
56
+ $$
57
+
58
+ $$
59
+ \mathcal {L} _ {\mathrm {M D}} = \left| s _ {i} ^ {l} - \operatorname {P L L} \left(E _ {i}\right) \right| ^ {2} \tag {4}
60
+ $$
61
+
62
+ FFNN denotes the learnable feed-forward neural network, $s_i^l$ is the predicted PLL approximation, and $\mathrm{PLL}(E_i)$ is precomputed offline using Eq. (1). Note that the PLL can be computed by a larger teacher model.
63
+
64
+ # 2.2. BERT With Discriminative Training
65
+
66
+ We now propose methods for training BERT with discriminative objective functions. For any utterance, let $\vec{E} = \{E_1,\dots,E_n\}$ be the n-best hypotheses obtained from beam search in the first-pass decoder. For any $E_{i}\in \vec{E}$ , let $s_i^a$ be its given score from the first pass, and
67
+
68
+ $s_i^t$ be the score from the second pass (same as Eq. (3)), with lower scores for more likely hypotheses for both; let $\epsilon_{i}$ be its number of word errors (edit distance) from the ground truth transcription. Following the common theme of second pass rescoring approaches, the final score $s_i$ is the linear combination of the first-pass and second-pass scores:
69
+
70
+ $$
71
+ s _ {i} = s _ {i} ^ {a} + \beta \cdot s _ {i} ^ {l}, \tag {5}
72
+ $$
73
+
74
+ where $\beta$ is the hyperparameter controlling the second-pass contribution. $s_i$ is then used to compute discriminative loss, as defined next.
75
+
76
+ # 2.2.1. Discriminative loss function
77
+
78
+ We explore two discriminative loss functions: MWER and MWED.
79
+
80
+ MWER (Minimum word error rate): A standard discriminative loss function for ASR rescoring is MWER [7]. The training minimizes the expected number of word errors for the n-best hypotheses:
81
+
82
+ $$
83
+ P _ {i} = \frac {e ^ {- s _ {i}}}{\sum_ {j = 1} ^ {n} e ^ {- s _ {j}}} \tag {6}
84
+ $$
85
+
86
+ $$
87
+ \bar {\epsilon} _ {H} = \frac {1}{n} \sum_ {i = 1} ^ {n} \epsilon_ {i} \tag {7}
88
+ $$
89
+
90
+ $$
91
+ \mathcal {L} _ {\mathrm {M W E R}} = \sum_ {i = 1} ^ {n} P _ {i} \cdot \left(\epsilon_ {i} - \bar {\epsilon} _ {H}\right). \tag {8}
92
+ $$
93
+
94
+ $P_{i}$ is the posterior probability of each hypothesis, normalized over the hypotheses list from the first pass, such that higher probabilities indicate preferred hypotheses. $s_i$ is the final score of the hypothesis as in Eq. (5). The MWER loss $\mathcal{L}_{\mathrm{MWER}}$ represents the expected number of relative word errors, with $\bar{\epsilon}_H$ being the averaged word errors across the n-best list, which does not change the optima but helps to reduce the variance.
95
+
96
+ MWED (Matching word error distribution): MWED is a new loss function proposed here. Its goal is to mimic the distribution of n-best word errors through the predicted scores. As a result, the ranking of final scores should ideally be exactly the same as ranking by the word errors, which could potentially lead to better score interpolation at evaluation. By contrast, the model trained with the existing MWER loss as in Eq (8) picks the best hypothesis discriminatively, such that the full probability mass should be assigned to the one with minimum word errors in the ideal case.
97
+
98
+ The MWED loss is proposed as the following:
99
+
100
+ $$
101
+ d _ {i} ^ {\epsilon} = \frac {e ^ {\epsilon_ {i}}}{\sum_ {j = 1} ^ {n} e ^ {\epsilon_ {j}}} \tag {9}
102
+ $$
103
+
104
+ $$
105
+ d _ {i} ^ {s} = \frac {e ^ {s _ {i} / T}}{\sum_ {j = 1} ^ {n} e ^ {s _ {j} / T}} \tag {10}
106
+ $$
107
+
108
+ $$
109
+ \mathcal {L} _ {\mathrm {M W E D}} = - \sum_ {i = 1} ^ {n} d _ {i} ^ {e} \log d _ {i} ^ {s} \tag {11}
110
+ $$
111
+
112
+ $d_{i}^{\epsilon}$ and $d_{i}^{s}$ represent the relative distribution of word errors and predicted scores over the n-best list. $\mathcal{L}_{\mathrm{MWED}}$ is the cross-entropy from scores to word errors, equivalent to optimizing the Kullback-Leibler divergence between the two distributions. Due to that $s_i$ contains $s_i^a$ which is fixed, to stabilize the match of the two distributions, we add a hyperparameter $T$ to rescale the distribution mass of $s_i$ . In practice, we found that $T = \sum_{i=1}^{n} s_i / \sum_{i=1}^{n} \epsilon_i$ can yield good performance.
113
+
114
+ # 2.2.2. Training with discriminative loss only
115
+
116
+ Training BERT naively with discriminative loss using word-level scores, as done in [4, 6], requires computation of Eq. (1) for every hypothesis and is prohibitively expensive during both training and inference. Instead, it can be fine-tuned such that the sentence-level score from the CLS representation (as in Eq. (3)) minimizes the discriminative loss $\mathcal{L}_{\mathrm{MWER}}$ or $\mathcal{L}_{\mathrm{MWED}}$ defined earlier.
117
+
118
+ In Section 4, we show results using this approach with $\mathcal{L}_{\mathrm{MWER}}$ , labeled "MWER Only", where we perform MWER training on a pretrained BERT with domain adaptation.
119
+
120
+ # 2.2.3. Training with combined MLM and discriminative loss
121
+
122
+ We propose a fusion strategy to incorporate MLM distillation into discriminative training. It is accomplished by making two modifications to the approach in Section 2.2.2, where only discriminative loss is applied.
123
+
124
+ First, we apply a pretraining step using MD alone on a large-scale text-only corpus, so that the discriminative training can be warm-started from a better initialization point. Unlike MWER training, MD only needs text-only data and their PLL scores computed by a teacher model. Therefore, the distillation itself can be trained on much more data than the n-best hypotheses used in MWER training.
125
+
126
+ Second, we introduce the new loss $\mathcal{L}$ to replace $\mathcal{L}_{\mathrm{MWER}}$ or $\mathcal{L}_{\mathrm{MWED}}$ in the discriminative training step:
127
+
128
+ $$
129
+ \mathcal {L} = \mathcal {L} _ {\text {D i s c r i m i n a t i v e}} + \lambda \cdot \sum_ {i = 1} ^ {n} \mathcal {L} _ {\mathrm {M D}} \left(E _ {i}\right), \tag {12}
130
+ $$
131
+
132
+ where $\mathcal{L}_{\text{Discriminative}}$ is the discriminative loss that can be either $\mathcal{L}_{\text{MWER}}$ or $\mathcal{L}_{\text{MWED}}$ . $\mathcal{L}_{\text{MD}}$ is similar to cross-entropy regularization added to MWER loss in [6], and controlled by the hyperparameter $\lambda$ . It is found that MD pretraining is more important than adding additional MD loss. On top of MD pretraining, having an additional step of adding MD loss yields less than $0.5\%$ relative improvement from all experiments.
133
+
134
+ In Section 4, we show the results using this approach with $\mathcal{L}_{\mathrm{MWER}}$ and $\mathcal{L}_{\mathrm{MWED}}$ , which are named MD-MWER and MD-MWED, respectively.
135
+
136
+ # 3. EXPERIMENTS
137
+
138
+ # 3.1. Datasets
139
+
140
+ We evaluated our approach on four datasets in multiple domains and locales to test its general applicability, including one public dataset LibriSpeech [16] and three internal dataset based on a conversational agent (one for Information (Info) domain in English (en), and two for Info and Navigation (Nav) domains in Japanese (ja)).
141
+
142
+ For LibriSpeech, an LAS model [17] is adopted as the first-pass decoder, and we use the same decoded 100-best hypotheses of the dev and test set along with their first-pass scores used by [9, 10]. Since there is no dedicated training set provided for MWER, we combine the decoded hypotheses from both dev-clean and dev-other as the MWER training set, and randomly hold out $10\%$ utterances as the MWER dev set. The resulting training/dev set has 5011/556 utterances, with up to 100 hypotheses per utterance. For MLM distillation, we sample 4 million utterances from the in-domain text corpus provided by LibriSpeech as the training set, similar to [10].
143
+
144
+ All internal datasets consist of de-identified live user interactions with a conversational agent, decoded by a RNN-T model [18] for en or a hybrid HMM model for ja. For Info (en), $\sim 100 / 5$ hours of
145
+
146
+ utterances are used as the MWER training/dev set; the test set has $\sim 2$ hours of long-tail info utterances. For Info and Nav in ja, we use a single MWER training/dev set that consists of $\sim 220 / 10$ hours of utterances in multiple domains including Info and Nav domains. The test set has $\sim 1 / 3$ hours of utterances for Info/Nav respectively. For MD, 4 million in-domain utterances sampled from user interactions are used as training set.
147
+
148
+ # 3.2. Implementation
149
+
150
+ For LibriSpeech, we use the uncased $\mathrm{BERT}_{\mathrm{Base}}$ for our experiments to enable direct comparison with previous work. For internal datasets, we use an in-house multilingual BERT of $\sim 170\mathrm{M}$ parameters (excluding embedding size) with 16 layers and 1024 hidden size, supporting both en and ja locales. In addition, our final MWER setting also includes two smaller BERT models of $\sim 17\mathrm{M} / 5\mathrm{M}$ parameters distilled [19] from the 170M BERT model, both with only 4 layers and 768/320 hidden size respectively. All BERT models are implemented in PyTorch and pretrained on public data. We limit the maximum sequence length to 128 for LibriSpeech, and 32 for others; longer utterances will be truncated. The $\lambda$ parameter in Eq. (12) is set to $10^{-4}$ . We found that $\lambda$ between $10^{-4}$ and $10^{-3}$ generally yield same performance.
151
+
152
+ Before we conducted any training, we first performed domain adaptation for BERT, as BERT contains general world knowledge from pretraining but not necessarily task-specific knowledge. Therefore, we take the pretrained BERT and further train with the MLM objective on the in-domain corpus. For LibriSpeech, we train 400K steps on the provided text corpus, similar to [10]. For internal datasets, we train 200K steps on the in-domain transcriptions for each of the en and ja locales. Each step has an effective batch size of 256 utterances for both of these cases.
153
+
154
+ # 3.3. Baseline and Evaluation Protocol
155
+
156
+ For LibriSpeech, we use the results of the MLM distillation (MD) as the baseline, which can be seen as our re-implementation of the "sentence-level fine-tuning" results from [10], which has the same low-latency scoring as our MWER setting. We also provide the results of high-latency PLL scores for comparison. WER is used as the evaluation metric, and the optimal interpolation weight $\beta$ in Eq. (5) is linearly searched on the dev set.
157
+
158
+ For internal datasets, we use the LM scoring from a 2-layer LSTM trained with noise contrastive estimation (NCE) [20] as the baseline, which is often employed in industrial settings for streaming applications. The LSTM is trained on the same data used for domain adaptation for BERT. New scores from BERT replace the existing LSTM scores, and the optimal weight is searched on the dev set as well. We report the relative improvements in WER for en, and in CER (character error rate) for ja.
159
+
160
+ # 4. RESULTS AND ANALYSIS
161
+
162
+ # 4.1. Comparing Different BERT Rescoring Approaches
163
+
164
+ Tables 1(a) and (b) show the evaluation results on both LibriSpeech and internal datasets. Here, PLL denotes the approach in Section 2.1.1 which is computationally expensive; MD denotes the approach in Section 2.1.2 that distills the PLL score; MWER Only denotes the approach described in Section 2.2.2 that trains BERT with the MWER objective only; MD-MWER and MD-MWED denote the approaches described in Section 2.2.3 that incorporate MLM into
165
+
166
+ Table 1: Evaluation results on the test partitions of all datasets. Details of the baseline and evaluation protocol are described in Section 3.3. The case of "MWED only" is not included, and the relative difference between it and "MWER only" is within $1\%$ for all tests.
167
+
168
+ (a) WER on the two test sets of LibriSpeech using $\mathrm{BERT_{Base}}$ . Numbers inside parentheses are relative improvements compared to the baseline.
169
+
170
+ <table><tr><td></td><td>Test-Clean</td><td>Test-Other</td></tr><tr><td>First-Pass</td><td>7.26</td><td>20.37</td></tr><tr><td>PLL</td><td>4.54</td><td>16.08</td></tr><tr><td>Baseline (MD)</td><td>4.67</td><td>16.15</td></tr><tr><td>MWER Only</td><td>4.82 (-3.2%)</td><td>16.35 (-1.2%)</td></tr><tr><td>MD-MWER</td><td>4.42 (5.3%)</td><td>15.87 (1.7%)</td></tr><tr><td>MD-MWED</td><td>4.36 (6.6%)</td><td>15.60 (3.4%)</td></tr></table>
171
+
172
+ (b) Relative improvements of WER (for en) and CER (for ja) on three internal datasets, using the in-house 170M BERT model.
173
+
174
+ <table><tr><td></td><td>Info (en)</td><td>Info (ja)</td><td>Nav (ja)</td></tr><tr><td>LSTM</td><td>Baseline</td><td>Baseline</td><td>Baseline</td></tr><tr><td>MD</td><td>2.6%</td><td>3.7%</td><td>5.6%</td></tr><tr><td>MWER Only</td><td>5.3%</td><td>11.8%</td><td>11.2%</td></tr><tr><td>MD-MWER</td><td>4.0%</td><td>12.0%</td><td>12.7%</td></tr><tr><td>MD-MWED</td><td>6.6%</td><td>10.4%</td><td>12.2%</td></tr></table>
175
+
176
+ discriminative training with MWER and MWED loss functions (as in Section 2.2.1), respectively.
177
+
178
+ Based on these results, we can make three observations. First, discriminative training significantly improves upon nondiscriminative BERT rescoring (MD) across all test sets: $6.6\% / 3.4\%$ WER relative improvement on LibriSpeech and $4\% / 8.3\% / 7.1\%$ relative WER reduction on internal datasets. What is particularly striking is that on LibriSpeech, because of discriminative training, both MD-MWER and MD-MWED now outperform the much more computationally expensive PLL approach. Second, the fusion approach of incorporating MLM in discriminative training improves on all test sets. The effect is particularly strong on the LibriSpeech test sets, where MWER Only would actually perform worse than MD with $3.2\% / 1.2\%$ relative WER degradations, but where the fusion approach now gives $5.3\% / 1.7\%$ relative improvement over MD. Third, to compare the new loss function MWED with the existing MWER, MD-MWER achieves better performance on both LibriSpeech $(1.3\%)$ and $1.7\%$ relative) and Info (en) $(2.6\%)$ relative) over MD-MWER, but worse than MD-MWER on both ja test sets. This result shows that MWED can be a strong alternative loss in the MWER training, and the final performance can be dataset-specific. One potential explanation for MWED being less effective for ja is that the CER distribution is spikier than the WER distribution, resulting in less stable gradients from the relative entropy in Eq. (11).
179
+
180
+ # 4.2. Low Latency for Streaming Applications
181
+
182
+ A main challenge when applying second-pass rescoring for streaming applications is to keep user-perceived latency low while obtain-
183
+
184
+ ![](images/42560f3c33ab629bea68d19699b694aeb7ddebb030409ee06071661f8cbf8ad6.jpg)
185
+ Fig. 2: Diagram of training the distilled 5M-parameter BERT model with the fusion strategy described in Section 2.2.3); the 170M BERT model is the teacher for calculating PLL scores.
186
+
187
+ Table 2: Performance and latency study of our approach using BERT of different parameter sizes on three internal datasets.
188
+
189
+ (a) Relative improvements over LSTM (4M) on three internal datasets, using the best setting (MD-MWER/MWED) according to Table 1b for each dataset.
190
+
191
+ <table><tr><td>BERT</td><td>Info (en)</td><td>Info (ja)</td><td>Nav (ja)</td></tr><tr><td>170M</td><td>6.6%</td><td>12.0%</td><td>12.7%</td></tr><tr><td>17M</td><td>3.5%</td><td>9.1%</td><td>9.4%</td></tr><tr><td>5M</td><td>3.1%</td><td>7.8%</td><td>7.8%</td></tr></table>
192
+
193
+ (b) Averaged latency (in ms) of each batch using 2 threads on a CPU, with a batch size of 5 hypotheses. SL: input sequence/hypothesis length. Parentheses indicate relative latency compared to LSTM.
194
+
195
+ <table><tr><td>SL</td><td>LSTM (4M)</td><td>BERT (5M)</td><td>BERT (17M)</td><td>BERT (170M)</td></tr><tr><td>16</td><td>9.7</td><td>7.6 (78%)</td><td>17.5 (180%)</td><td>180 (1.8k%)</td></tr><tr><td>32</td><td>18.7</td><td>11.0 (59%)</td><td>26.3 (141%)</td><td>270 (1.4k%)</td></tr></table>
196
+
197
+ ing the accuracy gains. We next examine how to further distill the model to reduce its compute footprint and hence, achieve even lower latency. We focus on the internal datasets for this task. As described in Section 3.2, in addition to the 170M BERT model, we have 17M and 5M BERT models distilled [19] from it. To achieve low latency, we can perform MD-MWER or MD-MWED starting from this smaller model, except that to maintain WER gains, we use the 170M BERT model to compute PLL scores in MD training, as well as in the final combined loss. This training process is illustrated in Figure 2.
198
+
199
+ Table 2a shows the relative improvements using BERT models with 170M, 17M, and 5M parameters described in Section 3.2 with the best settings for each dataset. Smaller BERT is expected to yield less improvement; nevertheless, the degradation is within a relatively small margin even for the much smaller 5M version that has only $\sim 3\%$ the parameters of the 170M model, with still nearly $8\%$ improvement on two ja datasets and $3 + \%$ on Info (en).
200
+
201
+ Table 2b shows the latency comparison among three BERT models and the NCE-based LSTM of 4M parameters, using the PyTorch benchmarking tool under direct model inference in Python using 2 threads on a CPU. 5M BERT is shown to be faster than LSTM, while 17M BERT is slower but also appears comparable. Overall, Table 2 shows that our proposed approach can be a superior substitute for LSTM scoring in deployed systems. In particular, from Table 2a and 2b, 5M BERT significantly outperforms a similar-sized 4M LSTM, both in WER (by $3.1\% /7.8\% /7.8\%$ ) and in latency (by $22\% /41\%$ ).
202
+
203
+ # 5. CONCLUSION
204
+
205
+ We have proposed a method to train a BERT rescoring model with discriminative objective functions. We show that discriminative training can significantly improve BERT rescoring on a variety of datasets: $6.6\% / 3.4\%$ relative WER improvement on LibriSpeech and $4\% / 8.3\% / 7.1\%$ relative WER improvement on internal voice assistant datasets. The proposed fusion strategy to incorporate MLM into discriminative training is found to further reduce WER. We also propose a new discriminative loss MWED that is a strong alternative to the standard MWER loss, yielding $1.3\%$ and $1.7\%$ relative WER improvement over MWER loss on LibriSpeech and $2.6\%$ relative improvement on one internal dataset. Lastly, we show how to further distill the model to achieve even lower latency for streaming applications while preserving WER improvements. We achieve $3 - 8\%$ relative WER improvement and lower latency compared to a baseline LSTM model on internal data.
206
+
207
+ # 6. REFERENCES
208
+
209
+ [1] Yingce Xia, Fei Tian, Lijun Wu, Jianxin Lin, Tao Qin, Nenghai Yu, and Tie-Yan Liu, "Deliberation networks: Sequence generation beyond one-pass decoding," in Advances in Neural Information Processing Systems, I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, Eds. 2017, vol. 30, Curran Associates, Inc.
210
+ [2] Tara N. Sainath, Ruoming Pang, David Rybach, Yanzhang He, Rohit Prabhavalkar, Wei Li, Mirkó Visontai, Qiao Liang, Trevor Strohman, Yonghui Wu, Ian McGraw, and Chung-Cheng Chiu, “Two-Pass End-to-End Speech Recognition,” in Proc. Interspeech, 2019, pp. 2773–2777.
211
+ [3] Ke Hu, Tara N Sainath, Ruoming Pang, and Rohit Prabhavalkar, “Deliberation model based two-pass end-to-end speech recognition,” in Proc. IEEE ICASSP, 2020, pp. 7799–7803.
212
+ [4] Ankur Gandhi and Ariya Rastrow, "Audio-attention discriminative language model for asr rescoring," in Proc. IEEE ICASSP, 2020, pp. 7944-7948.
213
+ [5] Ke Hu, Ruoming Pang, Tara N Sainath, and Trevor Strohman, "Transformer based deliberation for two-pass speech recognition," in Proc. IEEE Spoken Language Technology Workshop, 2021, pp. 68-74.
214
+ [6] Takaaki Hori, Chiori Hori, Shinji Watanabe, and J. Hershey, "Minimum word error training of long short-term memory recurrent neural network language models for speech recognition," in Proc. IEEE ICASSP, 2016, pp. 5990-5994.
215
+ [7] Rohit Prabhavalkar, Tara N Sainath, Yonghui Wu, Patrick Nguyen, Zhifeng Chen, Chung-Cheng Chiu, and Anjuli Kannan, “Minimum word error rate training for attention-based sequence-to-sequence models,” in Proc. IEEE ICASSP, 2018, pp. 4839–4843.
216
+ [8] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova, “BERT: Pre-training of deep bidirectional transformers for language understanding,” in Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics, Minneapolis, Minnesota, June 2019, pp. 4171–4186.
217
+ [9] Joonbo Shin, Yoonhyung Lee, and Kyomin Jung, "Effective sentence scoring method using BERT for speech recognition," in Asian Conference on Machine Learning. PMLR, 2019, pp. 1081-1093.
218
+ [10] Julian Salazar, Davis Liang, Toan Q. Nguyen, and Katrin Kirchhoff, “Masked language model scoring,” in Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, Online, July 2020, pp. 2699–2712, Association for Computational Linguistics.
219
+ [11] Shih-Hsuan Chiu and Berlin Chen, “Innovative BERT-based reranking language models for speech recognition,” in Proc. IEEE Spoken Language Technology Workshop, 2021, pp. 266–271.
220
+ [12] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever, "Language models are unsupervised multitask learners," OpenAI Blog 1(8), 2019.
221
+ [13] Caglar Gulcehre, Orhan Firat, Kelvin Xu, Kyunghyun Cho, Loic Barrault, Huei-Chi Lin, Fethi Bougares, Holger Schwenk, and Yoshua Bengio, "On using monolingual corpora in neural machine translation," arXiv preprint arXiv:1503.03535, 2015.
222
+
223
+ [14] Felix Stahlberg, James Cross, and Veselin Stoyanov, "Simple fusion: Return of the language model," in Proceedings of the Third Conference on Machine Translation: Research Papers, Brussels, Belgium, Oct. 2018, pp. 204-211, Association for Computational Linguistics.
224
+ [15] Xie Chen, Anton Ragni, Xunying Liu, and Mark JF Gales, "Investigating bidirectional recurrent neural network language models for speech recognition," in Proc. Interspeech. International Speech Communication Association (ISCA), 2017, pp. 269-273.
225
+ [16] Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur, "LibriSpeech: an ASR corpus based on public domain audio books," in Proc. IEEE ICASSP, 2015, pp. 5206-5210.
226
+ [17] William Chan, Navdeep Jaitly, Quoc Le, and Oriol Vinyals, "Listen, Attend and Spell: A neural network for large vocabulary conversational speech recognition," in Proc. IEEE ICASSP, 2016, pp. 4960-4964.
227
+ [18] Yanzhang He, Tara N Sainath, Rohit Prabhavalkar, Ian McGraw, Raziel Alvarez, Ding Zhao, David Rybach, Anjuli Kannan, Yonghui Wu, Ruoming Pang, et al., "Streaming end-to-end speech recognition for mobile devices," in Proc. IEEE ICASSP, 2019, pp. 6381-6385.
228
+ [19] Geoffrey Hinton, Oriol Vinyals, and Jeffrey Dean, “Distilling the knowledge in a neural network,” in NIPS Deep Learning and Representation Learning Workshop, 2015.
229
+ [20] Anirudh Raju, Denis Filimonov, Gautam Tiwari, Guitang Lan, and Ariya Rastrow, "Scalable multi corpora neural language models for ASR," in Proc. Interspeech, Gernot Kubin and Zdravko Kacic, Eds. 2019, pp. 3910-3914, ISCA.
2202.01xxx/2202.01094/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b03b262c70a6d972881cf4fda0f8db8b2ed4c9c99f68e8962644c0c09725804b
3
+ size 149545
2202.01xxx/2202.01094/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01110/e0f9d575-e95f-42fa-a038-5b4a519e5d7b_content_list.json ADDED
@@ -0,0 +1,1203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "A Survey on Retrieval-Augmented Text Generation",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 231,
8
+ 89,
9
+ 766,
10
+ 111
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Huayang Li $^{1,2}$ ,* Yixuan Su $^{1,2}$ ,* Deng Cai $^{1,2}$ ,* Yan Wang $^{3,4}$ ,* Lemao Liu $^{5,6}$",
17
+ "bbox": [
18
+ 174,
19
+ 123,
20
+ 825,
21
+ 141
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{\\diamond}$ Nara Institute of Science and Technology $\\spadesuit$ University of Cambridge",
28
+ "bbox": [
29
+ 206,
30
+ 141,
31
+ 796,
32
+ 158
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "$\\diamond$ The Chinese University of Hong Kong $\\clubsuit$ Tencent AI Lab",
39
+ "bbox": [
40
+ 253,
41
+ 158,
42
+ 747,
43
+ 174
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "li.huayang.lh6@is.naist.jp, ys484@cam.ac.uk",
50
+ "bbox": [
51
+ 240,
52
+ 175,
53
+ 761,
54
+ 192
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "thisisjcykcd@gmail.com, brandenwang@tencent.com",
61
+ "bbox": [
62
+ 216,
63
+ 192,
64
+ 786,
65
+ 208
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "lemaoliu@gmail.com",
72
+ "bbox": [
73
+ 391,
74
+ 210,
75
+ 611,
76
+ 224
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "Abstract",
83
+ "text_level": 1,
84
+ "bbox": [
85
+ 260,
86
+ 252,
87
+ 339,
88
+ 268
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Recently, retrieval-augmented text generation attracted increasing attention of the computational linguistics community. Compared with conventional generation models, retrieval-augmented text generation has remarkable advantages and particularly has achieved state-of-the-art performance in many NLP tasks. This paper aims to conduct a survey about retrieval-augmented text generation. It firstly highlights the generic paradigm of retrieval-augmented generation, and then it reviews notable approaches according to different tasks including dialogue response generation, machine translation, and other generation tasks. Finally, it points out some promising directions on top of recent methods to facilitate future research.",
95
+ "bbox": [
96
+ 141,
97
+ 282,
98
+ 460,
99
+ 512
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "1 Introduction",
106
+ "text_level": 1,
107
+ "bbox": [
108
+ 114,
109
+ 527,
110
+ 260,
111
+ 542
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "Retrieval-augmented text generation, as a new text generation paradigm that fuses emerging deep learning technology and traditional retrieval technology, has achieved state-of-the-art (SOTA) performance in many NLP tasks and attracted the attention of the computational linguistics community (Weston et al., 2018; Dinan et al., 2018; Cai et al., 2021). Compared with generation-based counterpart, this new paradigm has some remarkable advantages: 1) The knowledge is not necessary to be implicitly stored in model parameters, but is explicitly acquired in a plug-and-play manner, leading to great scalability; 2) Instead of generating from scratch, the paradigm generating text from some retrieved human-written reference, which potentially alleviates the difficulty of text generation.",
118
+ "bbox": [
119
+ 112,
120
+ 556,
121
+ 489,
122
+ 813
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "This paper aims to review many representative approaches for retrieval-augmented text generation tasks including dialogue response generation (Weston et al., 2018), machine translation (Gu et al., 2018) and others (Hashimoto et al., 2018). We",
129
+ "bbox": [
130
+ 112,
131
+ 814,
132
+ 489,
133
+ 894
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "firstly present the generic paradigm of retrieval-augmented generation as well as three key components under this paradigm, which are retrieval sources, retrieval metrics and generation models.",
140
+ "bbox": [
141
+ 507,
142
+ 253,
143
+ 884,
144
+ 316
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "Then, we introduce notable methods about retrieval-augmented generation, which are organized with respect to different tasks. Specifically, on the dialogue response generation task, exemplar/template retrieval as an intermediate step has been shown beneficial to informative response generation (Weston et al., 2018; Wu et al., 2019; Cai et al., 2019a,b). In addition, there has been growing interest in knowledge-grounded generation exploring different forms of knowledge such as knowledge bases and external documents (Dinan et al., 2018; Zhou et al., 2018; Lian et al., 2019; Li et al., 2019; Qin et al., 2019; Wu et al., 2021; Zhang et al., 2021). On the machine translation task, we summarize the early work on how the retrieved sentences (called translation memory) are used to improve statistical machine translation (SMT) (Koehn et al., 2003) models (Simard and Isabelle, 2009; Koehn and Senellart, 2010) and in particular, we intensively highlight several popular methods to integrating translation memory to NMT models (Gu et al., 2018; Zhang et al., 2018; Xu et al., 2020; He et al., 2021). We also review the applications of retrieval-augmented generation in other generation tasks such as abstractive summarization (Peng et al., 2019), code generation (Hashimoto et al., 2018), paraphrase (Kazemnejad et al., 2020; Su et al., 2021b), and knowledge-intensive generation (Lewis et al., 2020b). Finally, we also point out some promising directions on retrieval-augmented generation to push forward the future research.",
151
+ "bbox": [
152
+ 507,
153
+ 317,
154
+ 884,
155
+ 818
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "text",
161
+ "text": "2 Retrieval-Augmented Paradigm",
162
+ "text_level": 1,
163
+ "bbox": [
164
+ 507,
165
+ 828,
166
+ 818,
167
+ 845
168
+ ],
169
+ "page_idx": 0
170
+ },
171
+ {
172
+ "type": "text",
173
+ "text": "In this section, we first give a general formulation of retrieval-augmented text generation. Then, we discuss three major components of the retrieval-augmented generation paradigm, including the re",
174
+ "bbox": [
175
+ 507,
176
+ 854,
177
+ 884,
178
+ 919
179
+ ],
180
+ "page_idx": 0
181
+ },
182
+ {
183
+ "type": "aside_text",
184
+ "text": "arXiv:2202.01110v2 [cs.CL] 13 Feb 2022",
185
+ "bbox": [
186
+ 21,
187
+ 309,
188
+ 60,
189
+ 725
190
+ ],
191
+ "page_idx": 0
192
+ },
193
+ {
194
+ "type": "page_footnote",
195
+ "text": "*All authors contributed equally.",
196
+ "bbox": [
197
+ 141,
198
+ 904,
199
+ 344,
200
+ 917
201
+ ],
202
+ "page_idx": 0
203
+ },
204
+ {
205
+ "type": "image",
206
+ "img_path": "images/62d7703b981a995bd5df016d341b6f833767fda6e3c4c9faae126038f7ecd4c4.jpg",
207
+ "image_caption": [
208
+ "Figure 1: The overview of this survey."
209
+ ],
210
+ "image_footnote": [],
211
+ "bbox": [
212
+ 115,
213
+ 80,
214
+ 884,
215
+ 256
216
+ ],
217
+ "page_idx": 1
218
+ },
219
+ {
220
+ "type": "text",
221
+ "text": "trieval source, retrieval metric and integration methods.",
222
+ "bbox": [
223
+ 112,
224
+ 306,
225
+ 489,
226
+ 335
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "text",
232
+ "text": "2.1 Formulation",
233
+ "text_level": 1,
234
+ "bbox": [
235
+ 112,
236
+ 347,
237
+ 260,
238
+ 362
239
+ ],
240
+ "page_idx": 1
241
+ },
242
+ {
243
+ "type": "text",
244
+ "text": "Most text generation tasks can be formulated as a mapping from input sequence $\\pmb{x}$ to output sequence $\\pmb{y} : \\pmb{y} = f(\\pmb{x})$ . For instance, $\\pmb{x}$ and $\\pmb{y}$ could be the dialogue history and the corresponding response for dialogue response generation, the text in the source language and the translation in the target language for machine translation, and so on.",
245
+ "bbox": [
246
+ 112,
247
+ 368,
248
+ 487,
249
+ 480
250
+ ],
251
+ "page_idx": 1
252
+ },
253
+ {
254
+ "type": "text",
255
+ "text": "Recently, some researchers suggest to endow models the capability to access external memory via some information retrieval techniques, so that they can acquire more information in the generation process (Gu et al., 2018; Weston et al., 2018; Cai et al., 2019b). The retrieval-augmented generation can be further formulated as:",
256
+ "bbox": [
257
+ 112,
258
+ 481,
259
+ 485,
260
+ 592
261
+ ],
262
+ "page_idx": 1
263
+ },
264
+ {
265
+ "type": "equation",
266
+ "text": "\n$$\n\\boldsymbol {y} = f (\\boldsymbol {x}, z) \\tag {1}\n$$\n",
267
+ "text_format": "latex",
268
+ "bbox": [
269
+ 250,
270
+ 602,
271
+ 485,
272
+ 620
273
+ ],
274
+ "page_idx": 1
275
+ },
276
+ {
277
+ "type": "text",
278
+ "text": "where $z = \\{\\langle \\boldsymbol{x}^r, \\boldsymbol{y}^r \\rangle\\}$ is a set of relevant instances retrieved from the original training set or external datasets. The main idea of this paradigm is that $\\boldsymbol{y}^r$ may benefit the response generation, if $\\boldsymbol{x}^r$ (or $\\boldsymbol{y}^r$ ) is similar (or relevant) to the input $\\boldsymbol{x}$ . It is worth noting that $\\boldsymbol{x}^r = \\emptyset$ when unsupervised retrieval sources are used. In general, the retrieval memory can be retrieved from three kinds of sources: the training corpus, external datasets in the same format with the training corpus, and large-scale unsupervised corpus (§2.2). Metrics that evaluate the relevance between text are varied as well, in §2.3 we divided them into three categories: sparse-vector retrieval, dense-vector retrieval, and training-based retrieval. Finally, how to integrate the retrieval memory to the generation model is also significant, we also introduce some popular integration approaches in §2.4.",
279
+ "bbox": [
280
+ 112,
281
+ 629,
282
+ 489,
283
+ 919
284
+ ],
285
+ "page_idx": 1
286
+ },
287
+ {
288
+ "type": "text",
289
+ "text": "2.2 Retrieval Sources",
290
+ "text_level": 1,
291
+ "bbox": [
292
+ 507,
293
+ 306,
294
+ 694,
295
+ 319
296
+ ],
297
+ "page_idx": 1
298
+ },
299
+ {
300
+ "type": "text",
301
+ "text": "Training Corpus Most previous studies search the external memory from its training corpus (Song et al., 2016; Gu et al., 2018; Weston et al., 2018). In the inference time, retrieved examples with high relevant scores could be regarded as extra references and reduce model's uncertainty in generation. The main motivation of those works is to store knowledge not only in the model parameters but also in an explicit and accessible form, making the model be able to re-access it during inference.",
302
+ "bbox": [
303
+ 507,
304
+ 329,
305
+ 884,
306
+ 491
307
+ ],
308
+ "page_idx": 1
309
+ },
310
+ {
311
+ "type": "text",
312
+ "text": "External Data Some researchers also propose to retrieval relevant samples from external datasets (Su et al., 2021c; Xiao et al., 2021). In these studies, the retrieval pool is different with the training corpus, which can further provide additional information that are not contained in the training corpus. This is especially beneficial for applications such as domain adaptation and knowledge update. For example, Khandelwal et al. (2020a); Zheng et al. (2021a) employ the in-domain dataset as the external memory to achieve fast domain adaptation for machine translation.",
313
+ "bbox": [
314
+ 507,
315
+ 502,
316
+ 884,
317
+ 696
318
+ ],
319
+ "page_idx": 1
320
+ },
321
+ {
322
+ "type": "text",
323
+ "text": "Unsupervised Data One limitation for previous two sources is that the datasets have to be supervised datasets consisting of aligned input-output pairs. For machine translation, Cai et al. (2021) propose a cross-lingual retriever to directly retrieve target sentence from unsupervised corpus (i.e., monolingual corpus in the target language). The main idea is aligning source-side sentences and the corresponding target-side translations in a dense vector space, i.e., aligning $\\mathbf{x}$ and $\\mathbf{y}^r$ when $\\mathbf{x}^r$ is absent. As a result, the retriever directly connects the dots between the source-side input and target-side translations, enabling monolingual data in the target",
324
+ "bbox": [
325
+ 507,
326
+ 709,
327
+ 885,
328
+ 919
329
+ ],
330
+ "page_idx": 1
331
+ },
332
+ {
333
+ "type": "text",
334
+ "text": "language to be used alone as memories.",
335
+ "bbox": [
336
+ 112,
337
+ 84,
338
+ 410,
339
+ 99
340
+ ],
341
+ "page_idx": 2
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "2.3 Retrieval Metrics",
346
+ "text_level": 1,
347
+ "bbox": [
348
+ 112,
349
+ 111,
350
+ 297,
351
+ 124
352
+ ],
353
+ "page_idx": 2
354
+ },
355
+ {
356
+ "type": "text",
357
+ "text": "Sparse-vector Retrieval Given an input sequence $\\pmb{x}$ and a retrieval corpus, retrieval model aims to retrieve a set of relevant examples $z = \\{\\langle \\pmb{x}^r, \\pmb{y}^r \\rangle\\}$ from the corpus. When a supervised corpus is used, $\\{\\langle \\pmb{x}^r, \\pmb{y}^r \\rangle\\}$ is retrieved by measuring the similarity between $\\pmb{x}$ and $\\pmb{x}^r$ . For similarity measurement, sparse-vector retrieval methods such as TF-IDF and BM25 (Robertson and Zaragoza, 2009) are widely used. They match keywords efficiently with an inverted index.",
358
+ "bbox": [
359
+ 112,
360
+ 131,
361
+ 489,
362
+ 292
363
+ ],
364
+ "page_idx": 2
365
+ },
366
+ {
367
+ "type": "text",
368
+ "text": "Dense-vector Retrieval However, these methods prefer examples with similar surfaces, and may fail to retrieve examples that are only semantically relevant. To alleviate above problem, some studies (Cao and Xiong, 2018) attempt to retrieve in dense-vector space instead of the lexical overlap. Recent work (Lee et al., 2019) makes use of pretrained language models, which encodes the text to low-dimensional dense vectors via BERT-based encoders. The retrieval score are computed via inner products between vectors.",
369
+ "bbox": [
370
+ 112,
371
+ 300,
372
+ 489,
373
+ 476
374
+ ],
375
+ "page_idx": 2
376
+ },
377
+ {
378
+ "type": "text",
379
+ "text": "Task-specific Retrieval Similarity-based retrieval is based on a simple heuristic. That is, the more $x^r$ resembles with $x$ , the more likely $x^r$ and $y^r$ will help the generation. However, the most similar one by universal textual similarity does not necessarily serve the best for downstream models. Ideally, the retrieval metric would be learned from the data in a task-dependent way: we wish to consider a memory only if it can indeed boost the quality of final generation. To this end, Cai et al. (2021) propose to unify the memory retriever and its downstream generation model into a learnable whole. Such memory retrieval is end-to-end optimized for task-specific objectives.",
380
+ "bbox": [
381
+ 112,
382
+ 486,
383
+ 489,
384
+ 712
385
+ ],
386
+ "page_idx": 2
387
+ },
388
+ {
389
+ "type": "text",
390
+ "text": "2.4 Integration",
391
+ "text_level": 1,
392
+ "bbox": [
393
+ 112,
394
+ 721,
395
+ 250,
396
+ 736
397
+ ],
398
+ "page_idx": 2
399
+ },
400
+ {
401
+ "type": "text",
402
+ "text": "Data Augmentation There are several ways to integrate the retrieved external memory in generation. One straightforward way is data augmentation, which constructs some augmented inputs by concatenating spans from $\\{\\langle x^r,y^r\\rangle \\}$ with the original input $x$ . By training on the augmented inputs, a generation model implicitly leans how to integrate the retrieved information. Despite the simplicity, this kind of methods works efficiently in lots of tasks (Song et al., 2016; Weston et al., 2018; Bulte and Tezcan, 2019).",
403
+ "bbox": [
404
+ 112,
405
+ 741,
406
+ 489,
407
+ 917
408
+ ],
409
+ "page_idx": 2
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "Attention Mechanisms Another integration method is based on attention mechanisms (Bahdanau et al., 2014). The main idea of this fashion is adopting additional encoders (in various architectures) to encode retrieved target sentences, and integrate them through attention (Cao and Xiong, 2018; Gu et al., 2018; Bapna and First, 2019). Since the attention mechanism is becoming (Bahdanau et al., 2014; Vaswani et al., 2017) a key module in lots of NLP models, integrating retrieved memory through attention becomes a very nature and efficient way.",
414
+ "bbox": [
415
+ 507,
416
+ 84,
417
+ 884,
418
+ 277
419
+ ],
420
+ "page_idx": 2
421
+ },
422
+ {
423
+ "type": "text",
424
+ "text": "Skeleton Extraction In the previous two methods, the downstream generation model learns how to filter out irrelevant or even harmful information from the retrieved examples implicitly. There also exist some works that try to explicitly extract useful information, i.e., skeleton extraction, from the retrieved memory (Cai et al., 2019a; Wu et al., 2019; Cai et al., 2019b). For example, one skeleton should be a part of a whole utterance with irrelevant content masked, and the generation model only integrate this skeleton in the generation process.",
425
+ "bbox": [
426
+ 507,
427
+ 286,
428
+ 884,
429
+ 464
430
+ ],
431
+ "page_idx": 2
432
+ },
433
+ {
434
+ "type": "text",
435
+ "text": "3 Dialogue Response Generation",
436
+ "text_level": 1,
437
+ "bbox": [
438
+ 507,
439
+ 475,
440
+ 808,
441
+ 491
442
+ ],
443
+ "page_idx": 2
444
+ },
445
+ {
446
+ "type": "text",
447
+ "text": "Background Dialogue systems can be grouped into two categories: chit-chat systems and task-oriented systems. While task-oriented dialogue systems are designed to accomplish specific user tasks such as air tickets booking, chit-chat dialogue systems aim at giving a meaningful and fluent response for any dialogue history in the open domain. Dialogue response generation in chit-chat dialogue system is challenging partly due to the diversity of possible responses to a single dialogue history (i.e., the one-to-many problem). The dialogue history alone cannot decide a meaningful and specific response. Also, external knowledge that is not present in the dialogue history are often necessary for avoiding safe but boring responses. We focus on recent efforts tackling the challenges to develop chit-chat dialogue systems.",
448
+ "bbox": [
449
+ 507,
450
+ 500,
451
+ 884,
452
+ 772
453
+ ],
454
+ "page_idx": 2
455
+ },
456
+ {
457
+ "type": "text",
458
+ "text": "Most modern chit-chat dialogue systems can be categorized into two classes, namely, retrieval-based models and generation-based models. The retrieval-based models (Ji et al., 2014; Hu et al., 2014) directly copy an existing response from curated dialogue corpora (i.e., the retrieval pool) when receiving a response request. The retrieved responses are often informative and grammatical as they are collected from real-world conversa",
459
+ "bbox": [
460
+ 507,
461
+ 774,
462
+ 884,
463
+ 919
464
+ ],
465
+ "page_idx": 2
466
+ },
467
+ {
468
+ "type": "text",
469
+ "text": "tions and possibly post-edited by a human. However, such systems perform poorly when a given dialogue history is substantially different from those in the retrieval pool. On the other hand, the generation-based models (Shang et al., 2015; Vinyals and Le, 2015; Li et al., 2016a) generate a new utterance from scratch. Those generation-based models have better generalization capacity when handling unseen dialogue contexts. Nevertheless, the generated utterances are inclined to be dull and non-informative (e.g., \"I don't know\", \"I think so\", \"Me too\" etc.) (Li et al., 2016a).",
470
+ "bbox": [
471
+ 115,
472
+ 84,
473
+ 485,
474
+ 275
475
+ ],
476
+ "page_idx": 3
477
+ },
478
+ {
479
+ "type": "text",
480
+ "text": "Shallow Integration As discussed, retrieval-based models may give informative but inappropriate responses while generation-based models often do the opposite. It is desirable to combine the best of both worlds. Early work (Qiu et al., 2017) attempts to re-rank the output from both models. For a deep integration, Song et al. (2016) and Yang et al. (2019) extend the standard SEQ2SEQ encoder-decoder model (Bahdanau et al., 2014) with an extra encoder for encoding the retrieval result. The output of the extra encoder, along with the output from the original encoder for dialogue history, is used to feed the decoder. Weston et al. (2018) use a single encoder that takes the concatenation of the original dialogue history and the retrieved as input. Wu et al. (2019) note that the retrieved information should be used in awareness of the context difference, and further proposed to construct an edit vector by explicitly encoding the lexical differences between the input dialogue history and the retrieved dialogue history. Pandey et al. (2018) further propose to weight different training instances by context similarity.",
481
+ "bbox": [
482
+ 115,
483
+ 293,
484
+ 485,
485
+ 659
486
+ ],
487
+ "page_idx": 3
488
+ },
489
+ {
490
+ "type": "text",
491
+ "text": "Deep Integration To prevent the inflow of erroneous information, Cai et al. (2019a) propose a general framework that first extracts a skeleton from the retrieved response and then generates the response based on the extracted skeleton. This framework is also adopted for stylistic response generation (Su et al., 2021c). Gupta et al. (2021) suggest to use the semantic structure of an exemplar response, instead of the tokens of the exemplar response, to guide generation. Despite their differences, a common issue is that the generation model easily learns to ignore the retrieved response entirely and collapses to a vanilla seq2seq model. This happens with improper training instances. Due to the one-to-many nature, it hap",
492
+ "bbox": [
493
+ 115,
494
+ 678,
495
+ 485,
496
+ 917
497
+ ],
498
+ "page_idx": 3
499
+ },
500
+ {
501
+ "type": "text",
502
+ "text": "pens frequently that a retrieved response (extracted skeleton) is suitable for responding to the query, but inconsistent with the current target response.",
503
+ "bbox": [
504
+ 512,
505
+ 85,
506
+ 880,
507
+ 131
508
+ ],
509
+ "page_idx": 3
510
+ },
511
+ {
512
+ "type": "text",
513
+ "text": "Earlier studies (Weston et al., 2018; Wu et al., 2019; Cai et al., 2019a) alleviate the above problems by putting hard constraints on the data (e.g., discarding data with low similarity of the retrieved response and the target response), which, however, greatly reduces the amount of usable data. Cai et al. (2019b) employ a random mechanism for generating the skeletons used for training, which extract skeletons from the corresponding responses with some deliberate disturbance. Paranjape et al. (2021) propose to model the retriever after the posterior distribution of retrieval given the input and the target output and train it jointly with the standard retriever and the generator by maximizing the evidence lower bound (ELBo) in expectation over retrieval.",
514
+ "bbox": [
515
+ 512,
516
+ 134,
517
+ 880,
518
+ 388
519
+ ],
520
+ "page_idx": 3
521
+ },
522
+ {
523
+ "type": "text",
524
+ "text": "Knowledge-Enhanced Generation The aforementioned work demonstrates that retrieval-based dialogue systems can be used for building better generation-based models. In general, this is done by conditioning the generation on some retrieved responses. More traditionally, to infuse the response with external knowledge, the retrieval pool is not necessarily a dialogue corpus. In fact, knowledge-grounded dialogue response generation exploring different forms of knowledge such as knowledge bases and external documents (Dinan et al., 2018; Zhou et al., 2018; Lian et al., 2019; Li et al., 2019; Qin et al., 2019; Wu et al., 2021; Zhang et al., 2021; Komeili et al., 2021) has been actively explored.",
525
+ "bbox": [
526
+ 512,
527
+ 406,
528
+ 880,
529
+ 646
530
+ ],
531
+ "page_idx": 3
532
+ },
533
+ {
534
+ "type": "text",
535
+ "text": "Limitations We note that there are three major limitations in existing work for dialogue response generation. First, current methods only use one retrieved response for generation. It can be more beneficial to combine multiple retrieval responses. However, this can be difficult due to the one-to-many nature of dialogue response generation. Second, current methods use universal relevance score for retrieval. It can be more effective if we can use more customized retrieval metric especially for controlled dialogue response generation (e.g., persona, emotion, etc). Third, the retrieval pool of existing methods is limited to dialogue corpora (context-response pairs) or documents. It might be useful to enlarge the retrieval pool by including more corpora in other domains or in other modali",
536
+ "bbox": [
537
+ 512,
538
+ 661,
539
+ 880,
540
+ 917
541
+ ],
542
+ "page_idx": 3
543
+ },
544
+ {
545
+ "type": "text",
546
+ "text": "ties. As discussed, there leaves plenty of possible directions to explore in the future.",
547
+ "bbox": [
548
+ 112,
549
+ 84,
550
+ 487,
551
+ 116
552
+ ],
553
+ "page_idx": 4
554
+ },
555
+ {
556
+ "type": "text",
557
+ "text": "4 Machine Translation",
558
+ "text_level": 1,
559
+ "bbox": [
560
+ 112,
561
+ 131,
562
+ 329,
563
+ 148
564
+ ],
565
+ "page_idx": 4
566
+ },
567
+ {
568
+ "type": "text",
569
+ "text": "Retrieval augmented translation originates from human translation scenarios (Somers, 2003). When translating $\\hat{\\pmb{y}}$ from an input source sentence $\\pmb{x}$ , a human translator typically involves a search engine to retrieve similar sentences $\\{\\langle \\pmb{x}^r,\\pmb{y}^r\\rangle\\}$ from a bilingual database. Such a technique called translation memory is helpful to improve the translation quality and efficiency for human translators (Dillon and Fraser, 2006). As the development of machine translation techniques, there is a surge of interests in improving machine translation models with translation memory. In the rest of this section, we will review translation memory for both statistical machine translation (SMT) and neural machine translation (NMT).",
570
+ "bbox": [
571
+ 112,
572
+ 160,
573
+ 489,
574
+ 401
575
+ ],
576
+ "page_idx": 4
577
+ },
578
+ {
579
+ "type": "text",
580
+ "text": "4.1 Translation Memory in SMT",
581
+ "text_level": 1,
582
+ "bbox": [
583
+ 112,
584
+ 416,
585
+ 386,
586
+ 432
587
+ ],
588
+ "page_idx": 4
589
+ },
590
+ {
591
+ "type": "text",
592
+ "text": "Generally, SMT includes three key components in a pipeline manner such as phrase table extraction, parameter tuning and decoding (Koehn et al., 2003; Chiang, 2007). As a result, many efforts have been made to make use of translation memory (TM) on top of each component.",
593
+ "bbox": [
594
+ 112,
595
+ 439,
596
+ 489,
597
+ 536
598
+ ],
599
+ "page_idx": 4
600
+ },
601
+ {
602
+ "type": "text",
603
+ "text": "Constrained Decoding with TM Constrained decoding is the most straightforward way to integrating TM into SMT (Smith and Clark, 2009; Koehn and Senellart, 2010; Zhechev and Van Genabith, 2010; Ma et al., 2011). Its basic idea is to reuse the useful segments in $\\pmb{y}^{r}$ while translate other segments by SMT. Specifically, the approach consists of three steps: 1) identify the unmatched segments in both $\\pmb{x}^{r}$ and $\\pmb{x}$ through the edit-distance algorithm; 2) identify the unmatched segments in $\\pmb{y}^{r}$ , each of which is aligned to one unmatched segment in $\\pmb{x}^{r}$ by a word alignment algorithm; 3) decode each unmatched segment in $\\pmb{x}$ by SMT and then use the result to replace its corresponding unmatched segment in $\\pmb{y}^{r}$ . Li et al. (2016b) further extend this approach from sentence level to phrase level. The advantage in constrained decoding is that it does not require to change the translation model (including phrase table and parameters) and can be applied in a plug-and-play way. This approach is successful when $\\pmb{x}$ is highly similar to $\\pmb{x}^{r}$ ; otherwise its performance is degraded largely, because it explicitly isolates TM",
604
+ "bbox": [
605
+ 112,
606
+ 548,
607
+ 489,
608
+ 920
609
+ ],
610
+ "page_idx": 4
611
+ },
612
+ {
613
+ "type": "text",
614
+ "text": "matching and SMT decoding and reuses the results in $\\pmb{x}^r$ or not in a deterministic way.",
615
+ "bbox": [
616
+ 507,
617
+ 84,
618
+ 882,
619
+ 116
620
+ ],
621
+ "page_idx": 4
622
+ },
623
+ {
624
+ "type": "text",
625
+ "text": "Phrase Table Aggregation with TM There are also notable efforts to augment the phrase table for SMT by extracting translation rules from the retrieved bilingual sentences $\\{\\langle x^r,y^r\\rangle \\}$ . Then they re-tune the parameters for the SMT model which makes use of translation knowledge from $\\{\\langle x^r,y^r\\rangle \\}$ in a implicit way when translating $x$ . For example, Biçici and Dymetman (2008); Simard and Isabelle (2009) directly combine the extracted translation rules into the phrase table in a shallow combination way. They introduce an additional feature to indicate that whether translation rule is from $\\{\\langle x^r,y^r\\rangle \\}$ or not and then train all feature weights with MERT (Och, 2003). One characteristic of these work is that a translation rule extracted from $\\{\\langle x^r,y^r\\rangle \\}$ which can not exactly match any segments in $x$ is useless even if it may contain some useful words in its target side. To remedy this observation, Wang et al. (2013, 2014) resort to a deep combination way to using the extracted translation rules. For each rule in the phrase table, it designs a generative model to reward the rules which are similar to those extracted from $\\{\\langle x^r,y^r\\rangle \\}$ . Then this generative model is used as a feature in the log-linear based SMT model whose weight is tuned together with other features by MERT. In addition, Li et al. (2014) employ a similar way to reward the rules but it relies on a discriminative model which is easy to integrate potential features from $\\{\\langle x^r,y^r\\rangle \\}$ .",
626
+ "bbox": [
627
+ 507,
628
+ 130,
629
+ 885,
630
+ 615
631
+ ],
632
+ "page_idx": 4
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "Parameter Tuning with TM Unlike the above two research lines, Liu et al. (2012, 2014) make use of translation memory only in tuning parameters. To be specific, when translating an input sentence $\\mathbf{x}$ , they firstly retrieve many similar bilingual sentences $\\{\\langle \\mathbf{x}^r, \\mathbf{y}^r \\rangle\\}$ , and then tune the parameters on top of the retrieved sentences as well as a given development dataset in a sentence-wise manner, i.e., it performs an independent tuning for each input sentence. To improve the efficiency of each tuning step, it proposes a local update on top of $\\{\\langle \\mathbf{x}^r, \\mathbf{y}^r \\rangle\\}$ from a baseline model.",
637
+ "bbox": [
638
+ 507,
639
+ 627,
640
+ 884,
641
+ 820
642
+ ],
643
+ "page_idx": 4
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "Despite the successes of translation memory in SMT, there are still some limitations for the above three kinds of methods. Firstly, all these methods employ fuzzy score for retrieval which is highly dependent on word matching and thus can not recall such examples which are similar in word seman",
648
+ "bbox": [
649
+ 507,
650
+ 822,
651
+ 885,
652
+ 920
653
+ ],
654
+ "page_idx": 4
655
+ },
656
+ {
657
+ "type": "text",
658
+ "text": "tics but different in surface form. Secondly, these methods integrate the retrieved examples into a module of SMT in the ways which can not make full use of the knowledge in retrieved examples. For example, the integration ways in the first two kinds (constrained decoding and phrase table aggregation) are heuristic and not optimized towards translation quality; the parameter tuning method fine-tunes few parameters for log-linear based SMT which are not enough to preserve sufficient knowledge from retrieved examples. Thirdly, since SMT performs in a pipeline manner, it is intractable to jointly optimize retrieval metrics as well as SMT models. Consequently, all these methods adopt an off-the-shelf metric for retrieval, leading to suboptimal performance.",
659
+ "bbox": [
660
+ 115,
661
+ 82,
662
+ 490,
663
+ 338
664
+ ],
665
+ "page_idx": 5
666
+ },
667
+ {
668
+ "type": "text",
669
+ "text": "4.2 Translation Memory in NMT",
670
+ "text_level": 1,
671
+ "bbox": [
672
+ 112,
673
+ 354,
674
+ 386,
675
+ 370
676
+ ],
677
+ "page_idx": 5
678
+ },
679
+ {
680
+ "type": "text",
681
+ "text": "Translation memory has been widely explored in Neural Machine Translation (NMT). Depending on when retrieval is involved, we can categorize previous works into two classes: 1) an NMT model leans how to cooperate with the retrieval model in the training phase; 2) an NMT model is only aware of the retrieved data in the inference phase.",
682
+ "bbox": [
683
+ 112,
684
+ 376,
685
+ 487,
686
+ 489
687
+ ],
688
+ "page_idx": 5
689
+ },
690
+ {
691
+ "type": "text",
692
+ "text": "Inference Phase The key point of literature in this line is to reward some target words based on words in $\\pmb{y}^r$ in the inference process. Thus, a decision can be made based on both the distribution of generation model and the additional reward of retrieval model. Some previous works propose to reward target words based on the sentence-level similarity between $\\pmb{x}$ and $\\pmb{x}^r$ , and the word alignment between $\\pmb{x}^r$ and $\\pmb{y}^r$ . Given the input sentence $\\pmb{x}$ , Zhang et al. (2018) try to assign target words in $\\hat{\\pmb{y}}$ with higher rewards, when they appear in $\\pmb{y}^r$ and the aligned source words are in both $\\pmb{x}^r$ and $\\pmb{x}$ . He et al. (2019) follow a similar framework and consider the position information of those target words when rewarding. Those works reward the target words in an explicit way, however, the one-sentence-one-model approach (Li et al., 2016c; Turchi et al., 2017) propose to reward target word implicitly. For each testing input $\\pmb{x}$ , their approach will first finetune the translation model on retrieved memory $\\{\\langle \\pmb{x}^r, \\pmb{y}^r \\rangle\\}$ and then translate $\\pmb{x}$ .",
693
+ "bbox": [
694
+ 115,
695
+ 500,
696
+ 489,
697
+ 837
698
+ ],
699
+ "page_idx": 5
700
+ },
701
+ {
702
+ "type": "text",
703
+ "text": "Others try to reward target words based on token-level similarity score. Most works in this line are based on the dense retriever (Khandelwal et al., 2020a), e.g., faiss. Khandelwal et al. (2020a) build a key-value datastore, where key $h(\\boldsymbol{x}^r, \\boldsymbol{y}_{<t}^r)$ is the",
704
+ "bbox": [
705
+ 112,
706
+ 839,
707
+ 489,
708
+ 920
709
+ ],
710
+ "page_idx": 5
711
+ },
712
+ {
713
+ "type": "text",
714
+ "text": "hidden state at each time step when translating $\\pmb{y}^r$ from $\\pmb{x}^r$ , and value is its golden-truth target word $\\pmb{y}_t^r$ . Therefore, in the inference time, they can use the $h(\\pmb{x}, \\hat{\\pmb{y}}_{<t})$ as query and reward target words with similar hidden representations in the datastore. Although this method achieves significant performance gain, one drawback of it is the high latency. To address this issue, Meng et al. (2021) use some heuristics, e.g., pre-filtering, to avoid searching on the entire datastore. The reward score of previous works is got from some non-parametric approaches, however, Zheng et al. (2021a) propose a light-weight network to learn the reward score. Since dense retrieval has the potential of cross-lingual retrieval, Zheng et al. (2021b) use a similar approach to achieve unsupervised domain adaptation, where a main change is to create the datastore based on synthetic sources sentence and the real target sentences.",
715
+ "bbox": [
716
+ 507,
717
+ 82,
718
+ 882,
719
+ 388
720
+ ],
721
+ "page_idx": 5
722
+ },
723
+ {
724
+ "type": "text",
725
+ "text": "Training Phase Different from those model-agnostic approaches, previous works in this line aim to train the generation model to learn how to cooperate with the retrieval model. It is also worth noting that most works in this line adopt the sentence-level retrieval, when integrating the retrieval information in the training process. To achieve its goal, Bulte and Tezcan (2019) and Hossain et al. (2020) propose a data augmentation method to integrate the retrieved information, where $\\mathbf{x}$ is concatenated with $\\mathbf{y}^r$ before feeding into the model. Following the data augmentation approach, Xu et al. (2020) propose more matching methods to determine including which retrieved example in the source is better.",
726
+ "bbox": [
727
+ 507,
728
+ 401,
729
+ 882,
730
+ 643
731
+ ],
732
+ "page_idx": 5
733
+ },
734
+ {
735
+ "type": "text",
736
+ "text": "There also exist some works that propose new architectures to integrate the retrieval information. Under the RNN-based framework, Cao and Xiong (2018) and Gu et al. (2018) use the gating and attention mechanism to incorporate the retrieved target sentences. When Transformer (Vaswani et al., 2017) becomes the backbone of NMT, some works also use additional transformer encoders to encode retrieved target sentences, and integrate them through attention mechanism (Bapna and First, 2019; Cao et al., 2019). Xia et al. (2019) represent the retrieved target sentences in a different data structure, i.e., a graph structure, and integrate it through attention mechanism. He et al. (2021) propose a light-weight method to encode the retrieved target sentences and leverage the alignment information to filter out irrelevant information. Dif",
737
+ "bbox": [
738
+ 507,
739
+ 646,
740
+ 884,
741
+ 917
742
+ ],
743
+ "page_idx": 5
744
+ },
745
+ {
746
+ "type": "text",
747
+ "text": "ferent from previous works that rely on bilingual memories, Cai et al. (2021) propose a framework that can retrieve the most similar target sentence in a monolingual dataset, using a source sentence as query.",
748
+ "bbox": [
749
+ 112,
750
+ 84,
751
+ 487,
752
+ 165
753
+ ],
754
+ "page_idx": 6
755
+ },
756
+ {
757
+ "type": "text",
758
+ "text": "Limitations In the section of SMT, we have showed some limitations of the retrieval augmented approaches. There also exist some limitations in the line of NMT. First, the information used for deriving reward scores is limited. The similarity between an input and retrieved examples is the primary feature to derive reward scores. However, some information, e.g., frequencies of words and context, may also be beneficial for integrating the translation memory. Second, it remains to be an open question that when should we use the retrieved information and when not. In the inference phase, approaches tend to integrate the translation memory excessively, e.g., at each time step, which not only reduces the translation efficiency but may also dampen the fluency of generated results.",
759
+ "bbox": [
760
+ 115,
761
+ 174,
762
+ 489,
763
+ 432
764
+ ],
765
+ "page_idx": 6
766
+ },
767
+ {
768
+ "type": "text",
769
+ "text": "5 Other Tasks",
770
+ "text_level": 1,
771
+ "bbox": [
772
+ 112,
773
+ 444,
774
+ 253,
775
+ 458
776
+ ],
777
+ "page_idx": 6
778
+ },
779
+ {
780
+ "type": "text",
781
+ "text": "In addition to dialogue system and machine translation, retrieval-augmented generation techniques have shown to be beneficial in many other tasks. In the following, we highlight several key tasks that apply retrieval-augmented generation approaches. $^{1}$",
782
+ "bbox": [
783
+ 112,
784
+ 470,
785
+ 487,
786
+ 551
787
+ ],
788
+ "page_idx": 6
789
+ },
790
+ {
791
+ "type": "text",
792
+ "text": "Language Modelling It has been shown that properly leveraging information from retrieval memory could improve the performance of large pre-trained language model. To build a more accurate language model, Khandelwal et al. (2020b) propose to incorporate a soft memory module into the system. Specifically, an index is built by caching the hidden states of the training corpus. Then, the language model accesses the index via k-NN search and displays a greatly improved performance. As another example, Guu et al. (2020) propose a new paradigm that applies retrieval-augmented technique into the pre-training of generative language model. During learning, they train a neural selector that dynamically samples a relevant text to guide the reconstruction of a corrupted input sequence. In this way, the pre-trained model delivers better results by explicitly grounding on the retrieval memory. Lewis et al. (2020a) combine language model pre-training with a paraphrasing",
793
+ "bbox": [
794
+ 115,
795
+ 561,
796
+ 489,
797
+ 883
798
+ ],
799
+ "page_idx": 6
800
+ },
801
+ {
802
+ "type": "text",
803
+ "text": "approach. During learning, an input sequence to the model is first corrupted. In the meantime, a set of multi-lingual texts are retrieved based on which the model learns to reconstruct the original input sequence. Recently, Borgeaud et al. (2021) propose RETRO, a large pre-trained language model enhanced with retrieved documents, and obtained comparable performances with GPT-3 using $25 \\times$ fewer parameters.",
804
+ "bbox": [
805
+ 507,
806
+ 84,
807
+ 884,
808
+ 229
809
+ ],
810
+ "page_idx": 6
811
+ },
812
+ {
813
+ "type": "text",
814
+ "text": "Summarization Text summarization is another research area that benefits from retrieval-augmented text generation. Peng et al. (2019) propose an adaptive decoding framework which first retrieves an exemplar document given the source document. Then, the summarization of the source document is derived through an adaptive generation process based on the retrieved template. Different from Peng et al. (2019), Cao et al. (2018) and Hossain et al. (2020) introduce an intermediate re-ranking stage into the generation pipeline. Specifically, before generating the document summary, the retrieval documents are first re-ranked based on their similarity scores with respect to the source document. Then, the document summarization is produced by re-writing the selected templates.",
815
+ "bbox": [
816
+ 507,
817
+ 239,
818
+ 884,
819
+ 512
820
+ ],
821
+ "page_idx": 6
822
+ },
823
+ {
824
+ "type": "text",
825
+ "text": "Paraphrase Generation To address the lack of quality as well as diversity in the generation of paraphrases, Kazemnejad et al. (2020) propose a generation framework which first retrieves a sentence that is similar to input sentence. Then, based on the retrieved sentence, a neural editor produces the resulting paraphrased sentence. Chen et al. (2019) investigate a different aspect of paraphrasing, i.e. how to control the linguistic syntax displayed in the generated text. To achieve this goal, Chen et al. (2019) propose to first extract a sentential exemplar that serves as the syntax template. A neural model then generates the paraphrase with desired linguistic syntax following the retrieved exemplar.",
826
+ "bbox": [
827
+ 507,
828
+ 523,
829
+ 882,
830
+ 747
831
+ ],
832
+ "page_idx": 6
833
+ },
834
+ {
835
+ "type": "text",
836
+ "text": "Text Style Transfer To improve the quality of generated text, Li et al. (2018) propose a retrieval-augmented framework which first retrieves texts that are similar to the input based on lexical-level similarity. Then, the retrieved tokens that are irrelevant to the source are deleted, and the output is derived from the edited template. Xiao et al. (2021) also adopte this framework by incorporating retrieval information from two sources (i.e. sparse and dense memories) and obtained an improved",
837
+ "bbox": [
838
+ 507,
839
+ 758,
840
+ 884,
841
+ 919
842
+ ],
843
+ "page_idx": 6
844
+ },
845
+ {
846
+ "type": "page_footnote",
847
+ "text": "<sup>1</sup>Here, we focus on tasks other than question answering. We refer readers interested in QA to Chen and Yih (2020).",
848
+ "bbox": [
849
+ 112,
850
+ 891,
851
+ 487,
852
+ 917
853
+ ],
854
+ "page_idx": 6
855
+ },
856
+ {
857
+ "type": "text",
858
+ "text": "model performance.",
859
+ "bbox": [
860
+ 117,
861
+ 86,
862
+ 263,
863
+ 99
864
+ ],
865
+ "page_idx": 7
866
+ },
867
+ {
868
+ "type": "text",
869
+ "text": "Data-to-Text Generation. Recently, retrieval-augmented generation has been adapted to the task of data-to-text generation. To bridge the gap between the structured data and natural language text, Su et al. (2021a) propose a novel retrieval-augmented framework. Specifically, given the source data, a set of candidate texts are first retrieved from a large unlabelled corpus. Then, a neural selector is applied to measure the similarities between the source data and candidate texts, and extract a set of more fine-grained prototypes from the candidates. Lastly, a generation model takes the prototypes as input to produce the text that describes the given structured data.",
870
+ "bbox": [
871
+ 115,
872
+ 115,
873
+ 485,
874
+ 338
875
+ ],
876
+ "page_idx": 7
877
+ },
878
+ {
879
+ "type": "text",
880
+ "text": "While retrieval-augmented generation has been widely explored in the NLP community, we suggest that future research could extend this approach to tasks that involve data from multiple modalities. For instance, with recent advancements in image-text retrieval (Jia et al., 2021; Radford et al., 2021), the structural gap between images and texts is largely bridged. Some early studies (Zhang et al., 2020) have shown that information retrieved from images could improve the performance of neural machine translation model. Naturally, such methods could be extended to other multi-modal tasks, such as image captioning (Karpathy and Li, 2015). A similar idea could also be applied to tasks beyond images, such as speech-to-text transcription (Gales and Young, 2007).",
881
+ "bbox": [
882
+ 115,
883
+ 342,
884
+ 485,
885
+ 599
886
+ ],
887
+ "page_idx": 7
888
+ },
889
+ {
890
+ "type": "text",
891
+ "text": "6 Future Directions",
892
+ "text_level": 1,
893
+ "bbox": [
894
+ 117,
895
+ 618,
896
+ 299,
897
+ 632
898
+ ],
899
+ "page_idx": 7
900
+ },
901
+ {
902
+ "type": "text",
903
+ "text": "Despite the current success of retrieval augmented text generation, there is still a long way to go as discussed in previous sections. We highlight some directions to facilitate the future research as follows:",
904
+ "bbox": [
905
+ 117,
906
+ 646,
907
+ 485,
908
+ 725
909
+ ],
910
+ "page_idx": 7
911
+ },
912
+ {
913
+ "type": "text",
914
+ "text": "Retrieval Sensitivity The performance of retrieval augmented text generation is very sensitive to the retrieval quality, i.e., the similarity between the query and the retrieved examples. Currently, retrieval augmented text generation models perform well when the retrieved examples are very similar to the query. However, they are even worse than the generation models without retrieval when the retrieval examples are less similar. Therefore, it would be important to exploit new methods to address such an issue on similarity.",
915
+ "bbox": [
916
+ 115,
917
+ 741,
918
+ 485,
919
+ 917
920
+ ],
921
+ "page_idx": 7
922
+ },
923
+ {
924
+ "type": "text",
925
+ "text": "Retrieval Efficiency Generally, if one enlarges the retrieval memory to some extent, it would be possible to retrieve an example which is very similar to the query. Unfortunately, the downside is that the overall inference for the retrieval augmented generation models is less efficient due the considerable retrieval overhead. In this sense, it is urgent to consider some methods to trade off the retrieval memory size and retrieval efficiency, for example, data compression for the retrieval memory.",
926
+ "bbox": [
927
+ 512,
928
+ 84,
929
+ 880,
930
+ 244
931
+ ],
932
+ "page_idx": 7
933
+ },
934
+ {
935
+ "type": "text",
936
+ "text": "Local vs. Global Optimization Theoretically, it seems promising to jointly learn retrieval metrics and generation models. However, in practice, there is an essential gap about the retrieval metric between the training and inference phrases. In the training phase, the loss is locally back-propagated to only a few retrieved examples while in the inference phase the metric is globally conducted among all examples in the memory. It would be interesting to narrow such a gap when learning a better metric for generation tasks.",
937
+ "bbox": [
938
+ 512,
939
+ 271,
940
+ 880,
941
+ 447
942
+ ],
943
+ "page_idx": 7
944
+ },
945
+ {
946
+ "type": "text",
947
+ "text": "Multi-Modalities With recent advancement in image-text retrieval, directly associating images with relevant text becomes possible. This urges researchers to investigate the possibility of retrieval-based text generation in tasks that involve data from different modalities. One typical task is image captioning. Beyond images, other tasks like speech-to-text transcription could potentially benefit from retrieval-based generation methods as well.",
948
+ "bbox": [
949
+ 512,
950
+ 474,
951
+ 880,
952
+ 618
953
+ ],
954
+ "page_idx": 7
955
+ },
956
+ {
957
+ "type": "text",
958
+ "text": "Diverse & Controllable Retrieval Most of the existing approaches adopt a universal metric for retrieval, such as lexical similarities of sentences. Future work should explore how to use customized metrics for retrieval. This can be beneficial for more controlled text generation. For example, instances with emotions and styles may be more desirable in the personalized dialogue generation, parallel data that contains specific terminologies is more helpful in machine translation, and so on. On the other hand, using a universal metric for retrieval may lead to the lack of diversity of the retrieval results. Collecting a diverse set of retrieval results can improve the coverage of useful information. Thus, considering multiple different metrics for retrieval may lead to generation with higher quality in the future.",
959
+ "bbox": [
960
+ 512,
961
+ 645,
962
+ 880,
963
+ 917
964
+ ],
965
+ "page_idx": 7
966
+ },
967
+ {
968
+ "type": "text",
969
+ "text": "7 Conclusion",
970
+ "text_level": 1,
971
+ "bbox": [
972
+ 114,
973
+ 83,
974
+ 247,
975
+ 99
976
+ ],
977
+ "page_idx": 8
978
+ },
979
+ {
980
+ "type": "text",
981
+ "text": "In this paper, we surveyed recent approaches for retrieval-augmented text generation. We reviewed and summarized the development of different components of retrieval-augmented text generation including retrieval metrics, retrieval sources, and integration paradigms. We gave in-depth discussions when retrieval-augmented text generation comes to different applications including dialogue response generation, machine translation, and other generation tasks. We also pointed out some future directions for retrieval-augmented text generation.",
982
+ "bbox": [
983
+ 112,
984
+ 110,
985
+ 490,
986
+ 287
987
+ ],
988
+ "page_idx": 8
989
+ },
990
+ {
991
+ "type": "text",
992
+ "text": "References",
993
+ "text_level": 1,
994
+ "bbox": [
995
+ 114,
996
+ 313,
997
+ 213,
998
+ 329
999
+ ],
1000
+ "page_idx": 8
1001
+ },
1002
+ {
1003
+ "type": "list",
1004
+ "sub_type": "ref_text",
1005
+ "list_items": [
1006
+ "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.",
1007
+ "Ankur Bapna and Orhan Firat. 2019. Non-parametric adaptation for neural machine translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1921-1931.",
1008
+ "Ergun Biçici and Marc Dymetman. 2008. Dynamic translation memory: Using statistical machine translation to improve translation memory fuzzy matches. In International Conference on Intelligent Text Processing and Computational Linguistics, pages 454-465. Springer.",
1009
+ "Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George van den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, Diego de Las Casas, Aurelia Guy, Jacob Menick, Roman Ring, Tom Hennigan, Saffron Huang, Loren Maggiore, Chris Jones, Albin Cassirer, Andy Brock, Michela Paganini, Geoffrey Irving, Oriol Vinyals, Simon Osindero, Karen Simonyan, Jack W. Rae, Erich Elsen, and Laurent Sifre. 2021. Improving language models by retrieving from trillions of tokens. CoRR, abs/2112.04426.",
1010
+ "Bram Bulte and Arda Tezcan. 2019. Neural fuzzy repair: Integrating fuzzy matches into neural machine translation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1800-1809.",
1011
+ "Deng Cai, Yan Wang, Wei Bi, Zhaopeng Tu, Xiaojiang Liu, Wai Lam, and Shuming Shi. 2019a. Skeleton-to-response: Dialogue generation guided by retrieval memory. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1219-1228."
1012
+ ],
1013
+ "bbox": [
1014
+ 115,
1015
+ 337,
1016
+ 489,
1017
+ 919
1018
+ ],
1019
+ "page_idx": 8
1020
+ },
1021
+ {
1022
+ "type": "list",
1023
+ "sub_type": "ref_text",
1024
+ "list_items": [
1025
+ "Deng Cai, Yan Wang, Wei Bi, Zhaopeng Tu, Xiaojiang Liu, and Shuming Shi. 2019b. Retrieval-guided dialogue response generation via a matching-to-generation framework. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 1866-1875.",
1026
+ "Deng Cai, Yan Wang, Huayang Li, Wai Lam, and Lemao Liu. 2021. Neural machine translation with monolingual translation memory. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 7307-7318, Online. Association for Computational Linguistics.",
1027
+ "Qian Cao, Shaohui Kuang, and Deyi Xiong. 2019. Learning to reuse translations: Guiding neural machine translation with examples. arXiv preprint arXiv:1911.10732.",
1028
+ "Qian Cao and Deyi Xiong. 2018. Encoding gated translation memory into neural machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3042-3047.",
1029
+ "Ziqiang Cao, Wenjie Li, Sujian Li, and Furu Wei. 2018. Retrieve, rerank and rewrite: Soft template based neural summarization. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, ACL 2018, Melbourne, Australia, July 15-20, 2018, Volume 1: Long Papers, pages 152-161. Association for Computational Linguistics.",
1030
+ "Danqi Chen and Wen-tau Yih. 2020. Open-domain question answering. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts, pages 34-37, Online. Association for Computational Linguistics.",
1031
+ "Mingda Chen, Qingming Tang, Sam Wiseman, and Kevin Gimpel. 2019. Controllable paraphrase generation with a syntactic exemplar. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28- August 2, 2019, Volume 1: Long Papers, pages 5972-5984. Association for Computational Linguistics.",
1032
+ "David Chiang. 2007. Hierarchical phrase-based translation. computational linguistics, 33(2):201-228.",
1033
+ "Sarah Dillon and Janet Fraser. 2006. Translators and tm: An investigation of translators' perceptions of translation memory adoption. Machine Translation, 20(2):67-79.",
1034
+ "Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2018. Wizard of wikipedia: Knowledge-powered conversational agents. arXiv preprint arXiv:1811.01241."
1035
+ ],
1036
+ "bbox": [
1037
+ 510,
1038
+ 85,
1039
+ 884,
1040
+ 917
1041
+ ],
1042
+ "page_idx": 8
1043
+ },
1044
+ {
1045
+ "type": "list",
1046
+ "sub_type": "ref_text",
1047
+ "list_items": [
1048
+ "Mark J. F. Gales and Steve J. Young. 2007. The application of hidden markov models in speech recognition. Found. Trends Signal Process., 1(3):195-304.",
1049
+ "Jiatao Gu, Yong Wang, Kyunghyun Cho, and Victor OK Li. 2018. Search engine guided neural machine translation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32.",
1050
+ "Prakhar Gupta, Jeffrey Bigham, Yulia Tsvetkov, and Amy Pavel. 2021. Controlling dialogue generation with semantic exemplars. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3018-3029, Online. Association for Computational Linguistics.",
1051
+ "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Ming-Wei Chang. 2020. REALM: retrieval-augmented language model pre-training. CoRR, abs/2002.08909.",
1052
+ "Tatsunori B Hashimoto, Kelvin Guu, Yonatan Oren, and Percy S Liang. 2018. A retrieve-and-edit framework for predicting structured outputs. In Advances in Neural Information Processing Systems, pages 10052-10062.",
1053
+ "Qiuxiang He, Guoping Huang, Qu Cui, Li Li, and Lemao Liu. 2021. Fast and accurate neural machine translation with translation memory. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3170-3180.",
1054
+ "Qiuxiang He, Guoping Huang, Lemao Liu, and Li Li. 2019. Word position aware translation memory for neural machine translation. In CCF International Conference on Natural Language Processing and Chinese Computing, pages 367-379. Springer.",
1055
+ "Nabil Hossain, Marjan Ghazvininejad, and Luke Zettlemoyer. 2020. Simple and effective retrieve-edit-terank text generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2532-2538.",
1056
+ "Baotian Hu, Zhengdong Lu, Hang Li, and Qingcai Chen. 2014. Convolutional neural network architectures for matching natural language sentences. In NIPS, pages 2042-2050.",
1057
+ "Zongcheng Ji, Zhengdong Lu, and Hang Li. 2014. An information retrieval approach to short text conversation. arXiv preprint arXiv:1408.6988.",
1058
+ "Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. 2021. Scaling up visual and vision-language representation learning with noisy text supervision. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 4904-4916. PMLR."
1059
+ ],
1060
+ "bbox": [
1061
+ 115,
1062
+ 85,
1063
+ 489,
1064
+ 917
1065
+ ],
1066
+ "page_idx": 9
1067
+ },
1068
+ {
1069
+ "type": "list",
1070
+ "sub_type": "ref_text",
1071
+ "list_items": [
1072
+ "Andrej Karpathy and Fei-Fei Li. 2015. Deep visual-semantic alignments for generating image descriptions. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2015, Boston, MA, USA, June 7-12, 2015, pages 3128-3137. IEEE Computer Society.",
1073
+ "Amirhossein Kazemnejad, Mohammadreza Salehi, and Mahdieh Soleymani Baghshah. 2020. Paraphrase generation by learning how to edit from samples. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6010-6021, Online. Association for Computational Linguistics.",
1074
+ "Urvashi Khandelwal, Angela Fan, Dan Jurafsky, Luke Zettlemoyer, and Mike Lewis. 2020a. Nearest neighbor machine translation. arXiv preprint arXiv:2010.00710.",
1075
+ "Urvashi Khandelwal, Omer Levy, Dan Jurafsky, Luke Zettlemoyer, and Mike Lewis. 2020b. Generalization through memorization: Nearest neighbor language models. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.",
1076
+ "Philipp Koehn, Franz J. Och, and Daniel Marcu. 2003. Statistical phrase-based translation. In Proceedings of the 2003 Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics, pages 127-133.",
1077
+ "Philipp Koehn and Jean Senellart. 2010. Convergence of translation memory and statistical machine translation. In Proceedings of AMTA Workshop on MT Research and the Translation Industry, pages 21-31.",
1078
+ "Mojtaba Komeili, Kurt Shuster, and Jason Weston. 2021. Internet-augmented dialogue generation. arXiv preprint arXiv:2107.07566.",
1079
+ "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. 2019. Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300.",
1080
+ "Mike Lewis, Marjan Ghazvininejad, Gargi Ghosh, Armen Aghajanyan, Sida Wang, and Luke Zettlemoyer. 2020a. Pre-training via paraphrasing. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual.",
1081
+ "Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. 2020b. Retrieval-augmented generation for knowledge-intensive nlp tasks. arXiv preprint arXiv:2005.11401.",
1082
+ "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016a. A diversity-promoting objective function for neural conversation models. In *NAACL*, pages 110-119."
1083
+ ],
1084
+ "bbox": [
1085
+ 510,
1086
+ 85,
1087
+ 882,
1088
+ 917
1089
+ ],
1090
+ "page_idx": 9
1091
+ },
1092
+ {
1093
+ "type": "list",
1094
+ "sub_type": "ref_text",
1095
+ "list_items": [
1096
+ "Juncen Li, Robin Jia, He He, and Percy Liang. 2018. Delete, retrieve, generate: a simple approach to sentiment and style transfer. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2018, New Orleans, Louisiana, USA, June 1-6, 2018, Volume 1 (Long Papers), pages 1865-1874. Association for Computational Linguistics.",
1097
+ "Liangyou Li, Andy Way, and Qun Liu. 2014. A discriminative framework of integrating translation memory features into smt. In Proceedings of the 11th Conference of the Association for Machine Translation in the Americas, volume 1, pages 249-260.",
1098
+ "Liangyou Li, Andy Way, and Qun Liu. 2016b. Phrase-level combination of smt and tm using constrained word lattice. Association for Computational Linguistics (ACL).",
1099
+ "Xiaoqing Li, Jiajun Zhang, and Chengqing Zong. 2016c. One sentence one model for neural machine translation. arXiv preprint arXiv:1609.06490.",
1100
+ "Zekang Li, Cheng Niu, Fandong Meng, Yang Feng, Qian Li, and Jie Zhou. 2019. Incremental transformer with deliberation decoder for document grounded conversations. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 12-21.",
1101
+ "Rongzhong Lian, Min Xie, Fan Wang, Jinhua Peng, and Hua Wu. 2019. Learning to select knowledge for response generation in dialog systems. arXiv preprint arXiv:1902.04911.",
1102
+ "Lemao Liu, Hailong Cao, Taro Watanabe, Tiejun Zhao, Mo Yu, and Conghui Zhu. 2012. Locally training the log-linear model for smt. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 402-411.",
1103
+ "Lemao Liu, Tiejun Zhao, Taro Watanabe, Hailong Cao, and Conghui Zhu. 2014. Discriminative training for log-linear based smt: Global or local methods. ACM Transactions on Asian Language Information Processing (TALIP), 13(4):1-25.",
1104
+ "Yanjun Ma, Yifan He, Andy Way, and Josef van Genabith. 2011. Consistent translation using discriminative learning-a translation memory-inspired approach. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 1239-1248.",
1105
+ "Yuxian Meng, Xiaoya Li, Xiayu Zheng, Fei Wu, Xiaofei Sun, Tianwei Zhang, and Jiwei Li. 2021. Fast nearest neighbor machine translation. arXiv preprint arXiv:2105.14528.",
1106
+ "Franz Josef Och. 2003. Minimum error rate training in statistical machine translation. In Proceedings of the"
1107
+ ],
1108
+ "bbox": [
1109
+ 115,
1110
+ 85,
1111
+ 485,
1112
+ 917
1113
+ ],
1114
+ "page_idx": 10
1115
+ },
1116
+ {
1117
+ "type": "list",
1118
+ "sub_type": "ref_text",
1119
+ "list_items": [
1120
+ "41st Annual Meeting of the Association for Computational Linguistics, pages 160-167, Sapporo, Japan. Association for Computational Linguistics.",
1121
+ "Gaurav Pandey, Danish Contractor, Vineet Kumar, and Sachindra Joshi. 2018. Exemplar encoder-decoder for neural conversation generation. In ACL, pages 1329-1338.",
1122
+ "Ashwin Paranjape, Omar Khattab, Christopher Potts, Matei Zaharia, and Christopher D Manning. 2021. Hindsight: Posterior-guided training of retrievers for improved open-ended generation. arXiv preprint arXiv:2110.07752.",
1123
+ "Hao Peng, Ankur P. Parikh, Manaal Faruqui, Bhuwan Dhingra, and Das Dipanjan. 2019. Text generation with exemplar-based adaptive decoding. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.",
1124
+ "Lianhui Qin, Michel Galley, Chris Brockett, Xiaodong Liu, Xiang Gao, William B Dolan, Yejin Choi, and Jianfeng Gao. 2019. Conversing by reading: Contentful neural conversation with on-demand machine reading. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5427-5436.",
1125
+ "Minghui Qiu, Feng-Lin Li, Siyu Wang, Xing Gao, Yan Chen, Weipeng Zhao, Haiqing Chen, Jun Huang, and Wei Chu. 2017. Alime chat: A sequence to sequence and rerank based chatbot engine. In ACL, pages 498-503.",
1126
+ "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning transferable visual models from natural language supervision. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 8748-8763. PMLR.",
1127
+ "Stephen Robertson and Hugo Zaragoza. 2009. The probabilistic relevance framework: BM25 and beyond. Now Publishers Inc.",
1128
+ "Lifeng Shang, Zhengdong Lu, and Hang Li. 2015. Neural responding machine for short-text conversation. In ACL, pages 1577-1586.",
1129
+ "Michel Simard and Pierre Isabelle. 2009. Phrase-based machine translation in a computer-assisted translation environment. Proceedings of the Twelfth Machine Translation Summit (MT Summit XII), pages 120-127.",
1130
+ "James Smith and Stephen Clark. 2009. Ebmt for smt: a new ebmt-smt hybrid. In Proceedings of the 3rd International Workshop on Example-Based Machine Translation, pages 3-10. CiteSeer."
1131
+ ],
1132
+ "bbox": [
1133
+ 510,
1134
+ 85,
1135
+ 880,
1136
+ 917
1137
+ ],
1138
+ "page_idx": 10
1139
+ },
1140
+ {
1141
+ "type": "list",
1142
+ "sub_type": "ref_text",
1143
+ "list_items": [
1144
+ "Harold Somers. 2003. Translation memory systems. *Benjamins Translation Library*, 35:31-48.",
1145
+ "Yiping Song, Rui Yan, Xiang Li, Dongyan Zhao, and Ming Zhang. 2016. Two are better than one: An ensemble of retrieval-and generation-based dialog systems. arXiv preprint arXiv:1610.07149.",
1146
+ "Yixuan Su, Zaiqiao Meng, Simon Baker, and Nigel Collier. 2021a. Few-shot table-to-text generation with prototype memory. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, Virtual Event / Punta Cana, Dominican Republic, 16-20 November, 2021, pages 910-917. Association for Computational Linguistics.",
1147
+ "Yixuan Su, David Vandyke, Simon Baker, Yan Wang, and Nigel Collier. 2021b. Keep the primary, rewrite the secondary: A two-stage approach for paraphrase generation. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 560-569, Online. Association for Computational Linguistics.",
1148
+ "Yixuan Su, Yan Wang, Deng Cai, Simon Baker, Anna Korhonen, and Nigel Collier. 2021c. PROTOTYPE-TO-STYLE: dialogue generation with style-aware editing on retrieval memory. IEEE ACM Trans. Audio Speech Lang. Process., 29:2152-2161.",
1149
+ "Marco Turchi, Matteo Negri, M Farajian, and Marcello Federico. 2017. Continuous learning from human post-edits for neural machine translation.",
1150
+ "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.",
1151
+ "Oriol Vinyals and Quoc Le. 2015. A neural conversational model. In ICML (Deep Learning Workshop).",
1152
+ "Kun Wang, Chengqing Zong, and Keh-Yih Su. 2013. Integrating translation memory into phrase-based machine translation during decoding. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 11-21.",
1153
+ "Kun Wang, Chengqing Zong, and Keh-Yih Su. 2014. Dynamically integrating cross-domain translation memory into phrase-based machine translation during decoding. In Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pages 398-408.",
1154
+ "Jason Weston, Emily Dinan, and Alexander Miller. 2018. Retrieve and refine: Improved sequence generation models for dialogue. In Proceedings of the 2018 EMNLP Workshop SCAI: The 2nd International Workshop on Search-Oriented Conversational AI, pages 87-92."
1155
+ ],
1156
+ "bbox": [
1157
+ 115,
1158
+ 85,
1159
+ 487,
1160
+ 917
1161
+ ],
1162
+ "page_idx": 11
1163
+ },
1164
+ {
1165
+ "type": "list",
1166
+ "sub_type": "ref_text",
1167
+ "list_items": [
1168
+ "Yu Wu, Furu Wei, Shaohan Huang, Yunli Wang, Zhoujun Li, and Ming Zhou. 2019. Response generation by context-aware prototype editing. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 7281-7288.",
1169
+ "Zeqiu Wu, Michel Galley, Chris Brockett, Yizhe Zhang, Xiang Gao, Chris Quirk, Rik Koncel-Kedziorski, Jianfeng Gao, Hannaneh Hajishirzi, Mari Ostendorf, et al. 2021. A controllable model of grounded response generation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 14085-14093.",
1170
+ "Mengzhou Xia, Guoping Huang, Lemao Liu, and Shuming Shi. 2019. Graph based translation memory for neural machine translation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 7297-7304.",
1171
+ "Fei Xiao, Liang Pang, Yanyan Lan, Yan Wang, Huawei Shen, and Xueqi Cheng. 2021. Transductive learning for unsupervised text style transfer. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, EMNLP 2021, Virtual Event / Punta Cana, Dominican Republic, 7-11 November, 2021, pages 2510-2521. Association for Computational Linguistics.",
1172
+ "Jitao Xu, Josep M Crego, and Jean Senellart. 2020. Boosting neural machine translation with similar translations. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1580-1590.",
1173
+ "Liu Yang, Junjie Hu, Minghui Qiu, Chen Qu, Jianfeng Gao, W Bruce Croft, Xiaodong Liu, Yelong Shen, and Jingjing Liu. 2019. A hybrid retrieval-generation neural conversation model. In Proceedings of the 28th ACM international conference on information and knowledge management, pages 1341-1350.",
1174
+ "Jingyi Zhang, Masao Utiyama, Eiichiro Sumita, Graham Neubig, and Satoshi Nakamura. 2018. Guiding neural machine translation with retrieved translation pieces. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1325-1335.",
1175
+ "Yizhe Zhang, Siqi Sun, Xiang Gao, Yuwei Fang, Chris Brockett, Michel Galley, Jianfeng Gao, and Bill Dolan. 2021. Joint retrieval and generation training for grounded text generation. arXiv preprint arXiv:2105.06597.",
1176
+ "Zhuosheng Zhang, Kehai Chen, Rui Wang, Masao Utiyama, Eiichiro Sumita, Zuchao Li, and Hai Zhao. 2020. Neural machine translation with universal visual representation. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net."
1177
+ ],
1178
+ "bbox": [
1179
+ 510,
1180
+ 85,
1181
+ 882,
1182
+ 917
1183
+ ],
1184
+ "page_idx": 11
1185
+ },
1186
+ {
1187
+ "type": "list",
1188
+ "sub_type": "ref_text",
1189
+ "list_items": [
1190
+ "Ventsislav Zhechev and Josef Van Genabith. 2010. Seeding statistical machine translation with translation memory output through tree-based structural alignment. In Proceedings of the 4th Workshop on Syntax and Structure in Statistical Translation, pages 43-51.",
1191
+ "Xin Zheng, Zhirui Zhang, Junliang Guo, Shujian Huang, Boxing Chen, Weihua Luo, and Jiajun Chen. 2021a. Adaptive nearest neighbor machine translation. arXiv preprint arXiv:2105.13022.",
1192
+ "Xin Zheng, Zhirui Zhang, Shujian Huang, Boxing Chen, Jun Xie, Weihua Luo, and Jiajun Chen. 2021b. Non-parametric unsupervised domain adaptation for neural machine translation. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 4234-4241.",
1193
+ "Kangyan Zhou, Shrimai Prabhumoye, and Alan W Black. 2018. A dataset for document grounded conversations. arXiv preprint arXiv:1809.07358."
1194
+ ],
1195
+ "bbox": [
1196
+ 115,
1197
+ 85,
1198
+ 489,
1199
+ 365
1200
+ ],
1201
+ "page_idx": 12
1202
+ }
1203
+ ]
2202.01xxx/2202.01110/e0f9d575-e95f-42fa-a038-5b4a519e5d7b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01110/e0f9d575-e95f-42fa-a038-5b4a519e5d7b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e3d63dd0d0cff6960d054f1348d15687d09c71fd812800702fa611e7652381
3
+ size 327285
2202.01xxx/2202.01110/full.md ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Survey on Retrieval-Augmented Text Generation
2
+
3
+ Huayang Li $^{1,2}$ ,* Yixuan Su $^{1,2}$ ,* Deng Cai $^{1,2}$ ,* Yan Wang $^{3,4}$ ,* Lemao Liu $^{5,6}$
4
+
5
+ $^{\diamond}$ Nara Institute of Science and Technology $\spadesuit$ University of Cambridge
6
+
7
+ $\diamond$ The Chinese University of Hong Kong $\clubsuit$ Tencent AI Lab
8
+
9
+ li.huayang.lh6@is.naist.jp, ys484@cam.ac.uk
10
+
11
+ thisisjcykcd@gmail.com, brandenwang@tencent.com
12
+
13
+ lemaoliu@gmail.com
14
+
15
+ # Abstract
16
+
17
+ Recently, retrieval-augmented text generation attracted increasing attention of the computational linguistics community. Compared with conventional generation models, retrieval-augmented text generation has remarkable advantages and particularly has achieved state-of-the-art performance in many NLP tasks. This paper aims to conduct a survey about retrieval-augmented text generation. It firstly highlights the generic paradigm of retrieval-augmented generation, and then it reviews notable approaches according to different tasks including dialogue response generation, machine translation, and other generation tasks. Finally, it points out some promising directions on top of recent methods to facilitate future research.
18
+
19
+ # 1 Introduction
20
+
21
+ Retrieval-augmented text generation, as a new text generation paradigm that fuses emerging deep learning technology and traditional retrieval technology, has achieved state-of-the-art (SOTA) performance in many NLP tasks and attracted the attention of the computational linguistics community (Weston et al., 2018; Dinan et al., 2018; Cai et al., 2021). Compared with generation-based counterpart, this new paradigm has some remarkable advantages: 1) The knowledge is not necessary to be implicitly stored in model parameters, but is explicitly acquired in a plug-and-play manner, leading to great scalability; 2) Instead of generating from scratch, the paradigm generating text from some retrieved human-written reference, which potentially alleviates the difficulty of text generation.
22
+
23
+ This paper aims to review many representative approaches for retrieval-augmented text generation tasks including dialogue response generation (Weston et al., 2018), machine translation (Gu et al., 2018) and others (Hashimoto et al., 2018). We
24
+
25
+ firstly present the generic paradigm of retrieval-augmented generation as well as three key components under this paradigm, which are retrieval sources, retrieval metrics and generation models.
26
+
27
+ Then, we introduce notable methods about retrieval-augmented generation, which are organized with respect to different tasks. Specifically, on the dialogue response generation task, exemplar/template retrieval as an intermediate step has been shown beneficial to informative response generation (Weston et al., 2018; Wu et al., 2019; Cai et al., 2019a,b). In addition, there has been growing interest in knowledge-grounded generation exploring different forms of knowledge such as knowledge bases and external documents (Dinan et al., 2018; Zhou et al., 2018; Lian et al., 2019; Li et al., 2019; Qin et al., 2019; Wu et al., 2021; Zhang et al., 2021). On the machine translation task, we summarize the early work on how the retrieved sentences (called translation memory) are used to improve statistical machine translation (SMT) (Koehn et al., 2003) models (Simard and Isabelle, 2009; Koehn and Senellart, 2010) and in particular, we intensively highlight several popular methods to integrating translation memory to NMT models (Gu et al., 2018; Zhang et al., 2018; Xu et al., 2020; He et al., 2021). We also review the applications of retrieval-augmented generation in other generation tasks such as abstractive summarization (Peng et al., 2019), code generation (Hashimoto et al., 2018), paraphrase (Kazemnejad et al., 2020; Su et al., 2021b), and knowledge-intensive generation (Lewis et al., 2020b). Finally, we also point out some promising directions on retrieval-augmented generation to push forward the future research.
28
+
29
+ # 2 Retrieval-Augmented Paradigm
30
+
31
+ In this section, we first give a general formulation of retrieval-augmented text generation. Then, we discuss three major components of the retrieval-augmented generation paradigm, including the re
32
+
33
+ ![](images/62d7703b981a995bd5df016d341b6f833767fda6e3c4c9faae126038f7ecd4c4.jpg)
34
+ Figure 1: The overview of this survey.
35
+
36
+ trieval source, retrieval metric and integration methods.
37
+
38
+ # 2.1 Formulation
39
+
40
+ Most text generation tasks can be formulated as a mapping from input sequence $\pmb{x}$ to output sequence $\pmb{y} : \pmb{y} = f(\pmb{x})$ . For instance, $\pmb{x}$ and $\pmb{y}$ could be the dialogue history and the corresponding response for dialogue response generation, the text in the source language and the translation in the target language for machine translation, and so on.
41
+
42
+ Recently, some researchers suggest to endow models the capability to access external memory via some information retrieval techniques, so that they can acquire more information in the generation process (Gu et al., 2018; Weston et al., 2018; Cai et al., 2019b). The retrieval-augmented generation can be further formulated as:
43
+
44
+ $$
45
+ \boldsymbol {y} = f (\boldsymbol {x}, z) \tag {1}
46
+ $$
47
+
48
+ where $z = \{\langle \boldsymbol{x}^r, \boldsymbol{y}^r \rangle\}$ is a set of relevant instances retrieved from the original training set or external datasets. The main idea of this paradigm is that $\boldsymbol{y}^r$ may benefit the response generation, if $\boldsymbol{x}^r$ (or $\boldsymbol{y}^r$ ) is similar (or relevant) to the input $\boldsymbol{x}$ . It is worth noting that $\boldsymbol{x}^r = \emptyset$ when unsupervised retrieval sources are used. In general, the retrieval memory can be retrieved from three kinds of sources: the training corpus, external datasets in the same format with the training corpus, and large-scale unsupervised corpus (§2.2). Metrics that evaluate the relevance between text are varied as well, in §2.3 we divided them into three categories: sparse-vector retrieval, dense-vector retrieval, and training-based retrieval. Finally, how to integrate the retrieval memory to the generation model is also significant, we also introduce some popular integration approaches in §2.4.
49
+
50
+ # 2.2 Retrieval Sources
51
+
52
+ Training Corpus Most previous studies search the external memory from its training corpus (Song et al., 2016; Gu et al., 2018; Weston et al., 2018). In the inference time, retrieved examples with high relevant scores could be regarded as extra references and reduce model's uncertainty in generation. The main motivation of those works is to store knowledge not only in the model parameters but also in an explicit and accessible form, making the model be able to re-access it during inference.
53
+
54
+ External Data Some researchers also propose to retrieval relevant samples from external datasets (Su et al., 2021c; Xiao et al., 2021). In these studies, the retrieval pool is different with the training corpus, which can further provide additional information that are not contained in the training corpus. This is especially beneficial for applications such as domain adaptation and knowledge update. For example, Khandelwal et al. (2020a); Zheng et al. (2021a) employ the in-domain dataset as the external memory to achieve fast domain adaptation for machine translation.
55
+
56
+ Unsupervised Data One limitation for previous two sources is that the datasets have to be supervised datasets consisting of aligned input-output pairs. For machine translation, Cai et al. (2021) propose a cross-lingual retriever to directly retrieve target sentence from unsupervised corpus (i.e., monolingual corpus in the target language). The main idea is aligning source-side sentences and the corresponding target-side translations in a dense vector space, i.e., aligning $\mathbf{x}$ and $\mathbf{y}^r$ when $\mathbf{x}^r$ is absent. As a result, the retriever directly connects the dots between the source-side input and target-side translations, enabling monolingual data in the target
57
+
58
+ language to be used alone as memories.
59
+
60
+ # 2.3 Retrieval Metrics
61
+
62
+ Sparse-vector Retrieval Given an input sequence $\pmb{x}$ and a retrieval corpus, retrieval model aims to retrieve a set of relevant examples $z = \{\langle \pmb{x}^r, \pmb{y}^r \rangle\}$ from the corpus. When a supervised corpus is used, $\{\langle \pmb{x}^r, \pmb{y}^r \rangle\}$ is retrieved by measuring the similarity between $\pmb{x}$ and $\pmb{x}^r$ . For similarity measurement, sparse-vector retrieval methods such as TF-IDF and BM25 (Robertson and Zaragoza, 2009) are widely used. They match keywords efficiently with an inverted index.
63
+
64
+ Dense-vector Retrieval However, these methods prefer examples with similar surfaces, and may fail to retrieve examples that are only semantically relevant. To alleviate above problem, some studies (Cao and Xiong, 2018) attempt to retrieve in dense-vector space instead of the lexical overlap. Recent work (Lee et al., 2019) makes use of pretrained language models, which encodes the text to low-dimensional dense vectors via BERT-based encoders. The retrieval score are computed via inner products between vectors.
65
+
66
+ Task-specific Retrieval Similarity-based retrieval is based on a simple heuristic. That is, the more $x^r$ resembles with $x$ , the more likely $x^r$ and $y^r$ will help the generation. However, the most similar one by universal textual similarity does not necessarily serve the best for downstream models. Ideally, the retrieval metric would be learned from the data in a task-dependent way: we wish to consider a memory only if it can indeed boost the quality of final generation. To this end, Cai et al. (2021) propose to unify the memory retriever and its downstream generation model into a learnable whole. Such memory retrieval is end-to-end optimized for task-specific objectives.
67
+
68
+ # 2.4 Integration
69
+
70
+ Data Augmentation There are several ways to integrate the retrieved external memory in generation. One straightforward way is data augmentation, which constructs some augmented inputs by concatenating spans from $\{\langle x^r,y^r\rangle \}$ with the original input $x$ . By training on the augmented inputs, a generation model implicitly leans how to integrate the retrieved information. Despite the simplicity, this kind of methods works efficiently in lots of tasks (Song et al., 2016; Weston et al., 2018; Bulte and Tezcan, 2019).
71
+
72
+ Attention Mechanisms Another integration method is based on attention mechanisms (Bahdanau et al., 2014). The main idea of this fashion is adopting additional encoders (in various architectures) to encode retrieved target sentences, and integrate them through attention (Cao and Xiong, 2018; Gu et al., 2018; Bapna and First, 2019). Since the attention mechanism is becoming (Bahdanau et al., 2014; Vaswani et al., 2017) a key module in lots of NLP models, integrating retrieved memory through attention becomes a very nature and efficient way.
73
+
74
+ Skeleton Extraction In the previous two methods, the downstream generation model learns how to filter out irrelevant or even harmful information from the retrieved examples implicitly. There also exist some works that try to explicitly extract useful information, i.e., skeleton extraction, from the retrieved memory (Cai et al., 2019a; Wu et al., 2019; Cai et al., 2019b). For example, one skeleton should be a part of a whole utterance with irrelevant content masked, and the generation model only integrate this skeleton in the generation process.
75
+
76
+ # 3 Dialogue Response Generation
77
+
78
+ Background Dialogue systems can be grouped into two categories: chit-chat systems and task-oriented systems. While task-oriented dialogue systems are designed to accomplish specific user tasks such as air tickets booking, chit-chat dialogue systems aim at giving a meaningful and fluent response for any dialogue history in the open domain. Dialogue response generation in chit-chat dialogue system is challenging partly due to the diversity of possible responses to a single dialogue history (i.e., the one-to-many problem). The dialogue history alone cannot decide a meaningful and specific response. Also, external knowledge that is not present in the dialogue history are often necessary for avoiding safe but boring responses. We focus on recent efforts tackling the challenges to develop chit-chat dialogue systems.
79
+
80
+ Most modern chit-chat dialogue systems can be categorized into two classes, namely, retrieval-based models and generation-based models. The retrieval-based models (Ji et al., 2014; Hu et al., 2014) directly copy an existing response from curated dialogue corpora (i.e., the retrieval pool) when receiving a response request. The retrieved responses are often informative and grammatical as they are collected from real-world conversa
81
+
82
+ tions and possibly post-edited by a human. However, such systems perform poorly when a given dialogue history is substantially different from those in the retrieval pool. On the other hand, the generation-based models (Shang et al., 2015; Vinyals and Le, 2015; Li et al., 2016a) generate a new utterance from scratch. Those generation-based models have better generalization capacity when handling unseen dialogue contexts. Nevertheless, the generated utterances are inclined to be dull and non-informative (e.g., "I don't know", "I think so", "Me too" etc.) (Li et al., 2016a).
83
+
84
+ Shallow Integration As discussed, retrieval-based models may give informative but inappropriate responses while generation-based models often do the opposite. It is desirable to combine the best of both worlds. Early work (Qiu et al., 2017) attempts to re-rank the output from both models. For a deep integration, Song et al. (2016) and Yang et al. (2019) extend the standard SEQ2SEQ encoder-decoder model (Bahdanau et al., 2014) with an extra encoder for encoding the retrieval result. The output of the extra encoder, along with the output from the original encoder for dialogue history, is used to feed the decoder. Weston et al. (2018) use a single encoder that takes the concatenation of the original dialogue history and the retrieved as input. Wu et al. (2019) note that the retrieved information should be used in awareness of the context difference, and further proposed to construct an edit vector by explicitly encoding the lexical differences between the input dialogue history and the retrieved dialogue history. Pandey et al. (2018) further propose to weight different training instances by context similarity.
85
+
86
+ Deep Integration To prevent the inflow of erroneous information, Cai et al. (2019a) propose a general framework that first extracts a skeleton from the retrieved response and then generates the response based on the extracted skeleton. This framework is also adopted for stylistic response generation (Su et al., 2021c). Gupta et al. (2021) suggest to use the semantic structure of an exemplar response, instead of the tokens of the exemplar response, to guide generation. Despite their differences, a common issue is that the generation model easily learns to ignore the retrieved response entirely and collapses to a vanilla seq2seq model. This happens with improper training instances. Due to the one-to-many nature, it hap
87
+
88
+ pens frequently that a retrieved response (extracted skeleton) is suitable for responding to the query, but inconsistent with the current target response.
89
+
90
+ Earlier studies (Weston et al., 2018; Wu et al., 2019; Cai et al., 2019a) alleviate the above problems by putting hard constraints on the data (e.g., discarding data with low similarity of the retrieved response and the target response), which, however, greatly reduces the amount of usable data. Cai et al. (2019b) employ a random mechanism for generating the skeletons used for training, which extract skeletons from the corresponding responses with some deliberate disturbance. Paranjape et al. (2021) propose to model the retriever after the posterior distribution of retrieval given the input and the target output and train it jointly with the standard retriever and the generator by maximizing the evidence lower bound (ELBo) in expectation over retrieval.
91
+
92
+ Knowledge-Enhanced Generation The aforementioned work demonstrates that retrieval-based dialogue systems can be used for building better generation-based models. In general, this is done by conditioning the generation on some retrieved responses. More traditionally, to infuse the response with external knowledge, the retrieval pool is not necessarily a dialogue corpus. In fact, knowledge-grounded dialogue response generation exploring different forms of knowledge such as knowledge bases and external documents (Dinan et al., 2018; Zhou et al., 2018; Lian et al., 2019; Li et al., 2019; Qin et al., 2019; Wu et al., 2021; Zhang et al., 2021; Komeili et al., 2021) has been actively explored.
93
+
94
+ Limitations We note that there are three major limitations in existing work for dialogue response generation. First, current methods only use one retrieved response for generation. It can be more beneficial to combine multiple retrieval responses. However, this can be difficult due to the one-to-many nature of dialogue response generation. Second, current methods use universal relevance score for retrieval. It can be more effective if we can use more customized retrieval metric especially for controlled dialogue response generation (e.g., persona, emotion, etc). Third, the retrieval pool of existing methods is limited to dialogue corpora (context-response pairs) or documents. It might be useful to enlarge the retrieval pool by including more corpora in other domains or in other modali
95
+
96
+ ties. As discussed, there leaves plenty of possible directions to explore in the future.
97
+
98
+ # 4 Machine Translation
99
+
100
+ Retrieval augmented translation originates from human translation scenarios (Somers, 2003). When translating $\hat{\pmb{y}}$ from an input source sentence $\pmb{x}$ , a human translator typically involves a search engine to retrieve similar sentences $\{\langle \pmb{x}^r,\pmb{y}^r\rangle\}$ from a bilingual database. Such a technique called translation memory is helpful to improve the translation quality and efficiency for human translators (Dillon and Fraser, 2006). As the development of machine translation techniques, there is a surge of interests in improving machine translation models with translation memory. In the rest of this section, we will review translation memory for both statistical machine translation (SMT) and neural machine translation (NMT).
101
+
102
+ # 4.1 Translation Memory in SMT
103
+
104
+ Generally, SMT includes three key components in a pipeline manner such as phrase table extraction, parameter tuning and decoding (Koehn et al., 2003; Chiang, 2007). As a result, many efforts have been made to make use of translation memory (TM) on top of each component.
105
+
106
+ Constrained Decoding with TM Constrained decoding is the most straightforward way to integrating TM into SMT (Smith and Clark, 2009; Koehn and Senellart, 2010; Zhechev and Van Genabith, 2010; Ma et al., 2011). Its basic idea is to reuse the useful segments in $\pmb{y}^{r}$ while translate other segments by SMT. Specifically, the approach consists of three steps: 1) identify the unmatched segments in both $\pmb{x}^{r}$ and $\pmb{x}$ through the edit-distance algorithm; 2) identify the unmatched segments in $\pmb{y}^{r}$ , each of which is aligned to one unmatched segment in $\pmb{x}^{r}$ by a word alignment algorithm; 3) decode each unmatched segment in $\pmb{x}$ by SMT and then use the result to replace its corresponding unmatched segment in $\pmb{y}^{r}$ . Li et al. (2016b) further extend this approach from sentence level to phrase level. The advantage in constrained decoding is that it does not require to change the translation model (including phrase table and parameters) and can be applied in a plug-and-play way. This approach is successful when $\pmb{x}$ is highly similar to $\pmb{x}^{r}$ ; otherwise its performance is degraded largely, because it explicitly isolates TM
107
+
108
+ matching and SMT decoding and reuses the results in $\pmb{x}^r$ or not in a deterministic way.
109
+
110
+ Phrase Table Aggregation with TM There are also notable efforts to augment the phrase table for SMT by extracting translation rules from the retrieved bilingual sentences $\{\langle x^r,y^r\rangle \}$ . Then they re-tune the parameters for the SMT model which makes use of translation knowledge from $\{\langle x^r,y^r\rangle \}$ in a implicit way when translating $x$ . For example, Biçici and Dymetman (2008); Simard and Isabelle (2009) directly combine the extracted translation rules into the phrase table in a shallow combination way. They introduce an additional feature to indicate that whether translation rule is from $\{\langle x^r,y^r\rangle \}$ or not and then train all feature weights with MERT (Och, 2003). One characteristic of these work is that a translation rule extracted from $\{\langle x^r,y^r\rangle \}$ which can not exactly match any segments in $x$ is useless even if it may contain some useful words in its target side. To remedy this observation, Wang et al. (2013, 2014) resort to a deep combination way to using the extracted translation rules. For each rule in the phrase table, it designs a generative model to reward the rules which are similar to those extracted from $\{\langle x^r,y^r\rangle \}$ . Then this generative model is used as a feature in the log-linear based SMT model whose weight is tuned together with other features by MERT. In addition, Li et al. (2014) employ a similar way to reward the rules but it relies on a discriminative model which is easy to integrate potential features from $\{\langle x^r,y^r\rangle \}$ .
111
+
112
+ Parameter Tuning with TM Unlike the above two research lines, Liu et al. (2012, 2014) make use of translation memory only in tuning parameters. To be specific, when translating an input sentence $\mathbf{x}$ , they firstly retrieve many similar bilingual sentences $\{\langle \mathbf{x}^r, \mathbf{y}^r \rangle\}$ , and then tune the parameters on top of the retrieved sentences as well as a given development dataset in a sentence-wise manner, i.e., it performs an independent tuning for each input sentence. To improve the efficiency of each tuning step, it proposes a local update on top of $\{\langle \mathbf{x}^r, \mathbf{y}^r \rangle\}$ from a baseline model.
113
+
114
+ Despite the successes of translation memory in SMT, there are still some limitations for the above three kinds of methods. Firstly, all these methods employ fuzzy score for retrieval which is highly dependent on word matching and thus can not recall such examples which are similar in word seman
115
+
116
+ tics but different in surface form. Secondly, these methods integrate the retrieved examples into a module of SMT in the ways which can not make full use of the knowledge in retrieved examples. For example, the integration ways in the first two kinds (constrained decoding and phrase table aggregation) are heuristic and not optimized towards translation quality; the parameter tuning method fine-tunes few parameters for log-linear based SMT which are not enough to preserve sufficient knowledge from retrieved examples. Thirdly, since SMT performs in a pipeline manner, it is intractable to jointly optimize retrieval metrics as well as SMT models. Consequently, all these methods adopt an off-the-shelf metric for retrieval, leading to suboptimal performance.
117
+
118
+ # 4.2 Translation Memory in NMT
119
+
120
+ Translation memory has been widely explored in Neural Machine Translation (NMT). Depending on when retrieval is involved, we can categorize previous works into two classes: 1) an NMT model leans how to cooperate with the retrieval model in the training phase; 2) an NMT model is only aware of the retrieved data in the inference phase.
121
+
122
+ Inference Phase The key point of literature in this line is to reward some target words based on words in $\pmb{y}^r$ in the inference process. Thus, a decision can be made based on both the distribution of generation model and the additional reward of retrieval model. Some previous works propose to reward target words based on the sentence-level similarity between $\pmb{x}$ and $\pmb{x}^r$ , and the word alignment between $\pmb{x}^r$ and $\pmb{y}^r$ . Given the input sentence $\pmb{x}$ , Zhang et al. (2018) try to assign target words in $\hat{\pmb{y}}$ with higher rewards, when they appear in $\pmb{y}^r$ and the aligned source words are in both $\pmb{x}^r$ and $\pmb{x}$ . He et al. (2019) follow a similar framework and consider the position information of those target words when rewarding. Those works reward the target words in an explicit way, however, the one-sentence-one-model approach (Li et al., 2016c; Turchi et al., 2017) propose to reward target word implicitly. For each testing input $\pmb{x}$ , their approach will first finetune the translation model on retrieved memory $\{\langle \pmb{x}^r, \pmb{y}^r \rangle\}$ and then translate $\pmb{x}$ .
123
+
124
+ Others try to reward target words based on token-level similarity score. Most works in this line are based on the dense retriever (Khandelwal et al., 2020a), e.g., faiss. Khandelwal et al. (2020a) build a key-value datastore, where key $h(\boldsymbol{x}^r, \boldsymbol{y}_{<t}^r)$ is the
125
+
126
+ hidden state at each time step when translating $\pmb{y}^r$ from $\pmb{x}^r$ , and value is its golden-truth target word $\pmb{y}_t^r$ . Therefore, in the inference time, they can use the $h(\pmb{x}, \hat{\pmb{y}}_{<t})$ as query and reward target words with similar hidden representations in the datastore. Although this method achieves significant performance gain, one drawback of it is the high latency. To address this issue, Meng et al. (2021) use some heuristics, e.g., pre-filtering, to avoid searching on the entire datastore. The reward score of previous works is got from some non-parametric approaches, however, Zheng et al. (2021a) propose a light-weight network to learn the reward score. Since dense retrieval has the potential of cross-lingual retrieval, Zheng et al. (2021b) use a similar approach to achieve unsupervised domain adaptation, where a main change is to create the datastore based on synthetic sources sentence and the real target sentences.
127
+
128
+ Training Phase Different from those model-agnostic approaches, previous works in this line aim to train the generation model to learn how to cooperate with the retrieval model. It is also worth noting that most works in this line adopt the sentence-level retrieval, when integrating the retrieval information in the training process. To achieve its goal, Bulte and Tezcan (2019) and Hossain et al. (2020) propose a data augmentation method to integrate the retrieved information, where $\mathbf{x}$ is concatenated with $\mathbf{y}^r$ before feeding into the model. Following the data augmentation approach, Xu et al. (2020) propose more matching methods to determine including which retrieved example in the source is better.
129
+
130
+ There also exist some works that propose new architectures to integrate the retrieval information. Under the RNN-based framework, Cao and Xiong (2018) and Gu et al. (2018) use the gating and attention mechanism to incorporate the retrieved target sentences. When Transformer (Vaswani et al., 2017) becomes the backbone of NMT, some works also use additional transformer encoders to encode retrieved target sentences, and integrate them through attention mechanism (Bapna and First, 2019; Cao et al., 2019). Xia et al. (2019) represent the retrieved target sentences in a different data structure, i.e., a graph structure, and integrate it through attention mechanism. He et al. (2021) propose a light-weight method to encode the retrieved target sentences and leverage the alignment information to filter out irrelevant information. Dif
131
+
132
+ ferent from previous works that rely on bilingual memories, Cai et al. (2021) propose a framework that can retrieve the most similar target sentence in a monolingual dataset, using a source sentence as query.
133
+
134
+ Limitations In the section of SMT, we have showed some limitations of the retrieval augmented approaches. There also exist some limitations in the line of NMT. First, the information used for deriving reward scores is limited. The similarity between an input and retrieved examples is the primary feature to derive reward scores. However, some information, e.g., frequencies of words and context, may also be beneficial for integrating the translation memory. Second, it remains to be an open question that when should we use the retrieved information and when not. In the inference phase, approaches tend to integrate the translation memory excessively, e.g., at each time step, which not only reduces the translation efficiency but may also dampen the fluency of generated results.
135
+
136
+ # 5 Other Tasks
137
+
138
+ In addition to dialogue system and machine translation, retrieval-augmented generation techniques have shown to be beneficial in many other tasks. In the following, we highlight several key tasks that apply retrieval-augmented generation approaches. $^{1}$
139
+
140
+ Language Modelling It has been shown that properly leveraging information from retrieval memory could improve the performance of large pre-trained language model. To build a more accurate language model, Khandelwal et al. (2020b) propose to incorporate a soft memory module into the system. Specifically, an index is built by caching the hidden states of the training corpus. Then, the language model accesses the index via k-NN search and displays a greatly improved performance. As another example, Guu et al. (2020) propose a new paradigm that applies retrieval-augmented technique into the pre-training of generative language model. During learning, they train a neural selector that dynamically samples a relevant text to guide the reconstruction of a corrupted input sequence. In this way, the pre-trained model delivers better results by explicitly grounding on the retrieval memory. Lewis et al. (2020a) combine language model pre-training with a paraphrasing
141
+
142
+ approach. During learning, an input sequence to the model is first corrupted. In the meantime, a set of multi-lingual texts are retrieved based on which the model learns to reconstruct the original input sequence. Recently, Borgeaud et al. (2021) propose RETRO, a large pre-trained language model enhanced with retrieved documents, and obtained comparable performances with GPT-3 using $25 \times$ fewer parameters.
143
+
144
+ Summarization Text summarization is another research area that benefits from retrieval-augmented text generation. Peng et al. (2019) propose an adaptive decoding framework which first retrieves an exemplar document given the source document. Then, the summarization of the source document is derived through an adaptive generation process based on the retrieved template. Different from Peng et al. (2019), Cao et al. (2018) and Hossain et al. (2020) introduce an intermediate re-ranking stage into the generation pipeline. Specifically, before generating the document summary, the retrieval documents are first re-ranked based on their similarity scores with respect to the source document. Then, the document summarization is produced by re-writing the selected templates.
145
+
146
+ Paraphrase Generation To address the lack of quality as well as diversity in the generation of paraphrases, Kazemnejad et al. (2020) propose a generation framework which first retrieves a sentence that is similar to input sentence. Then, based on the retrieved sentence, a neural editor produces the resulting paraphrased sentence. Chen et al. (2019) investigate a different aspect of paraphrasing, i.e. how to control the linguistic syntax displayed in the generated text. To achieve this goal, Chen et al. (2019) propose to first extract a sentential exemplar that serves as the syntax template. A neural model then generates the paraphrase with desired linguistic syntax following the retrieved exemplar.
147
+
148
+ Text Style Transfer To improve the quality of generated text, Li et al. (2018) propose a retrieval-augmented framework which first retrieves texts that are similar to the input based on lexical-level similarity. Then, the retrieved tokens that are irrelevant to the source are deleted, and the output is derived from the edited template. Xiao et al. (2021) also adopte this framework by incorporating retrieval information from two sources (i.e. sparse and dense memories) and obtained an improved
149
+
150
+ model performance.
151
+
152
+ Data-to-Text Generation. Recently, retrieval-augmented generation has been adapted to the task of data-to-text generation. To bridge the gap between the structured data and natural language text, Su et al. (2021a) propose a novel retrieval-augmented framework. Specifically, given the source data, a set of candidate texts are first retrieved from a large unlabelled corpus. Then, a neural selector is applied to measure the similarities between the source data and candidate texts, and extract a set of more fine-grained prototypes from the candidates. Lastly, a generation model takes the prototypes as input to produce the text that describes the given structured data.
153
+
154
+ While retrieval-augmented generation has been widely explored in the NLP community, we suggest that future research could extend this approach to tasks that involve data from multiple modalities. For instance, with recent advancements in image-text retrieval (Jia et al., 2021; Radford et al., 2021), the structural gap between images and texts is largely bridged. Some early studies (Zhang et al., 2020) have shown that information retrieved from images could improve the performance of neural machine translation model. Naturally, such methods could be extended to other multi-modal tasks, such as image captioning (Karpathy and Li, 2015). A similar idea could also be applied to tasks beyond images, such as speech-to-text transcription (Gales and Young, 2007).
155
+
156
+ # 6 Future Directions
157
+
158
+ Despite the current success of retrieval augmented text generation, there is still a long way to go as discussed in previous sections. We highlight some directions to facilitate the future research as follows:
159
+
160
+ Retrieval Sensitivity The performance of retrieval augmented text generation is very sensitive to the retrieval quality, i.e., the similarity between the query and the retrieved examples. Currently, retrieval augmented text generation models perform well when the retrieved examples are very similar to the query. However, they are even worse than the generation models without retrieval when the retrieval examples are less similar. Therefore, it would be important to exploit new methods to address such an issue on similarity.
161
+
162
+ Retrieval Efficiency Generally, if one enlarges the retrieval memory to some extent, it would be possible to retrieve an example which is very similar to the query. Unfortunately, the downside is that the overall inference for the retrieval augmented generation models is less efficient due the considerable retrieval overhead. In this sense, it is urgent to consider some methods to trade off the retrieval memory size and retrieval efficiency, for example, data compression for the retrieval memory.
163
+
164
+ Local vs. Global Optimization Theoretically, it seems promising to jointly learn retrieval metrics and generation models. However, in practice, there is an essential gap about the retrieval metric between the training and inference phrases. In the training phase, the loss is locally back-propagated to only a few retrieved examples while in the inference phase the metric is globally conducted among all examples in the memory. It would be interesting to narrow such a gap when learning a better metric for generation tasks.
165
+
166
+ Multi-Modalities With recent advancement in image-text retrieval, directly associating images with relevant text becomes possible. This urges researchers to investigate the possibility of retrieval-based text generation in tasks that involve data from different modalities. One typical task is image captioning. Beyond images, other tasks like speech-to-text transcription could potentially benefit from retrieval-based generation methods as well.
167
+
168
+ Diverse & Controllable Retrieval Most of the existing approaches adopt a universal metric for retrieval, such as lexical similarities of sentences. Future work should explore how to use customized metrics for retrieval. This can be beneficial for more controlled text generation. For example, instances with emotions and styles may be more desirable in the personalized dialogue generation, parallel data that contains specific terminologies is more helpful in machine translation, and so on. On the other hand, using a universal metric for retrieval may lead to the lack of diversity of the retrieval results. Collecting a diverse set of retrieval results can improve the coverage of useful information. Thus, considering multiple different metrics for retrieval may lead to generation with higher quality in the future.
169
+
170
+ # 7 Conclusion
171
+
172
+ In this paper, we surveyed recent approaches for retrieval-augmented text generation. We reviewed and summarized the development of different components of retrieval-augmented text generation including retrieval metrics, retrieval sources, and integration paradigms. We gave in-depth discussions when retrieval-augmented text generation comes to different applications including dialogue response generation, machine translation, and other generation tasks. We also pointed out some future directions for retrieval-augmented text generation.
173
+
174
+ # References
175
+
176
+ Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.
177
+ Ankur Bapna and Orhan Firat. 2019. Non-parametric adaptation for neural machine translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1921-1931.
178
+ Ergun Biçici and Marc Dymetman. 2008. Dynamic translation memory: Using statistical machine translation to improve translation memory fuzzy matches. In International Conference on Intelligent Text Processing and Computational Linguistics, pages 454-465. Springer.
179
+ Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George van den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, Diego de Las Casas, Aurelia Guy, Jacob Menick, Roman Ring, Tom Hennigan, Saffron Huang, Loren Maggiore, Chris Jones, Albin Cassirer, Andy Brock, Michela Paganini, Geoffrey Irving, Oriol Vinyals, Simon Osindero, Karen Simonyan, Jack W. Rae, Erich Elsen, and Laurent Sifre. 2021. Improving language models by retrieving from trillions of tokens. CoRR, abs/2112.04426.
180
+ Bram Bulte and Arda Tezcan. 2019. Neural fuzzy repair: Integrating fuzzy matches into neural machine translation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1800-1809.
181
+ Deng Cai, Yan Wang, Wei Bi, Zhaopeng Tu, Xiaojiang Liu, Wai Lam, and Shuming Shi. 2019a. Skeleton-to-response: Dialogue generation guided by retrieval memory. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1219-1228.
182
+
183
+ Deng Cai, Yan Wang, Wei Bi, Zhaopeng Tu, Xiaojiang Liu, and Shuming Shi. 2019b. Retrieval-guided dialogue response generation via a matching-to-generation framework. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 1866-1875.
184
+ Deng Cai, Yan Wang, Huayang Li, Wai Lam, and Lemao Liu. 2021. Neural machine translation with monolingual translation memory. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 7307-7318, Online. Association for Computational Linguistics.
185
+ Qian Cao, Shaohui Kuang, and Deyi Xiong. 2019. Learning to reuse translations: Guiding neural machine translation with examples. arXiv preprint arXiv:1911.10732.
186
+ Qian Cao and Deyi Xiong. 2018. Encoding gated translation memory into neural machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3042-3047.
187
+ Ziqiang Cao, Wenjie Li, Sujian Li, and Furu Wei. 2018. Retrieve, rerank and rewrite: Soft template based neural summarization. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, ACL 2018, Melbourne, Australia, July 15-20, 2018, Volume 1: Long Papers, pages 152-161. Association for Computational Linguistics.
188
+ Danqi Chen and Wen-tau Yih. 2020. Open-domain question answering. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts, pages 34-37, Online. Association for Computational Linguistics.
189
+ Mingda Chen, Qingming Tang, Sam Wiseman, and Kevin Gimpel. 2019. Controllable paraphrase generation with a syntactic exemplar. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28- August 2, 2019, Volume 1: Long Papers, pages 5972-5984. Association for Computational Linguistics.
190
+ David Chiang. 2007. Hierarchical phrase-based translation. computational linguistics, 33(2):201-228.
191
+ Sarah Dillon and Janet Fraser. 2006. Translators and tm: An investigation of translators' perceptions of translation memory adoption. Machine Translation, 20(2):67-79.
192
+ Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2018. Wizard of wikipedia: Knowledge-powered conversational agents. arXiv preprint arXiv:1811.01241.
193
+
194
+ Mark J. F. Gales and Steve J. Young. 2007. The application of hidden markov models in speech recognition. Found. Trends Signal Process., 1(3):195-304.
195
+ Jiatao Gu, Yong Wang, Kyunghyun Cho, and Victor OK Li. 2018. Search engine guided neural machine translation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32.
196
+ Prakhar Gupta, Jeffrey Bigham, Yulia Tsvetkov, and Amy Pavel. 2021. Controlling dialogue generation with semantic exemplars. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3018-3029, Online. Association for Computational Linguistics.
197
+ Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Ming-Wei Chang. 2020. REALM: retrieval-augmented language model pre-training. CoRR, abs/2002.08909.
198
+ Tatsunori B Hashimoto, Kelvin Guu, Yonatan Oren, and Percy S Liang. 2018. A retrieve-and-edit framework for predicting structured outputs. In Advances in Neural Information Processing Systems, pages 10052-10062.
199
+ Qiuxiang He, Guoping Huang, Qu Cui, Li Li, and Lemao Liu. 2021. Fast and accurate neural machine translation with translation memory. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3170-3180.
200
+ Qiuxiang He, Guoping Huang, Lemao Liu, and Li Li. 2019. Word position aware translation memory for neural machine translation. In CCF International Conference on Natural Language Processing and Chinese Computing, pages 367-379. Springer.
201
+ Nabil Hossain, Marjan Ghazvininejad, and Luke Zettlemoyer. 2020. Simple and effective retrieve-edit-terank text generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2532-2538.
202
+ Baotian Hu, Zhengdong Lu, Hang Li, and Qingcai Chen. 2014. Convolutional neural network architectures for matching natural language sentences. In NIPS, pages 2042-2050.
203
+ Zongcheng Ji, Zhengdong Lu, and Hang Li. 2014. An information retrieval approach to short text conversation. arXiv preprint arXiv:1408.6988.
204
+ Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. 2021. Scaling up visual and vision-language representation learning with noisy text supervision. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 4904-4916. PMLR.
205
+
206
+ Andrej Karpathy and Fei-Fei Li. 2015. Deep visual-semantic alignments for generating image descriptions. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2015, Boston, MA, USA, June 7-12, 2015, pages 3128-3137. IEEE Computer Society.
207
+ Amirhossein Kazemnejad, Mohammadreza Salehi, and Mahdieh Soleymani Baghshah. 2020. Paraphrase generation by learning how to edit from samples. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6010-6021, Online. Association for Computational Linguistics.
208
+ Urvashi Khandelwal, Angela Fan, Dan Jurafsky, Luke Zettlemoyer, and Mike Lewis. 2020a. Nearest neighbor machine translation. arXiv preprint arXiv:2010.00710.
209
+ Urvashi Khandelwal, Omer Levy, Dan Jurafsky, Luke Zettlemoyer, and Mike Lewis. 2020b. Generalization through memorization: Nearest neighbor language models. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.
210
+ Philipp Koehn, Franz J. Och, and Daniel Marcu. 2003. Statistical phrase-based translation. In Proceedings of the 2003 Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics, pages 127-133.
211
+ Philipp Koehn and Jean Senellart. 2010. Convergence of translation memory and statistical machine translation. In Proceedings of AMTA Workshop on MT Research and the Translation Industry, pages 21-31.
212
+ Mojtaba Komeili, Kurt Shuster, and Jason Weston. 2021. Internet-augmented dialogue generation. arXiv preprint arXiv:2107.07566.
213
+ Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. 2019. Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300.
214
+ Mike Lewis, Marjan Ghazvininejad, Gargi Ghosh, Armen Aghajanyan, Sida Wang, and Luke Zettlemoyer. 2020a. Pre-training via paraphrasing. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual.
215
+ Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. 2020b. Retrieval-augmented generation for knowledge-intensive nlp tasks. arXiv preprint arXiv:2005.11401.
216
+ Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016a. A diversity-promoting objective function for neural conversation models. In *NAACL*, pages 110-119.
217
+
218
+ Juncen Li, Robin Jia, He He, and Percy Liang. 2018. Delete, retrieve, generate: a simple approach to sentiment and style transfer. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2018, New Orleans, Louisiana, USA, June 1-6, 2018, Volume 1 (Long Papers), pages 1865-1874. Association for Computational Linguistics.
219
+ Liangyou Li, Andy Way, and Qun Liu. 2014. A discriminative framework of integrating translation memory features into smt. In Proceedings of the 11th Conference of the Association for Machine Translation in the Americas, volume 1, pages 249-260.
220
+ Liangyou Li, Andy Way, and Qun Liu. 2016b. Phrase-level combination of smt and tm using constrained word lattice. Association for Computational Linguistics (ACL).
221
+ Xiaoqing Li, Jiajun Zhang, and Chengqing Zong. 2016c. One sentence one model for neural machine translation. arXiv preprint arXiv:1609.06490.
222
+ Zekang Li, Cheng Niu, Fandong Meng, Yang Feng, Qian Li, and Jie Zhou. 2019. Incremental transformer with deliberation decoder for document grounded conversations. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 12-21.
223
+ Rongzhong Lian, Min Xie, Fan Wang, Jinhua Peng, and Hua Wu. 2019. Learning to select knowledge for response generation in dialog systems. arXiv preprint arXiv:1902.04911.
224
+ Lemao Liu, Hailong Cao, Taro Watanabe, Tiejun Zhao, Mo Yu, and Conghui Zhu. 2012. Locally training the log-linear model for smt. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 402-411.
225
+ Lemao Liu, Tiejun Zhao, Taro Watanabe, Hailong Cao, and Conghui Zhu. 2014. Discriminative training for log-linear based smt: Global or local methods. ACM Transactions on Asian Language Information Processing (TALIP), 13(4):1-25.
226
+ Yanjun Ma, Yifan He, Andy Way, and Josef van Genabith. 2011. Consistent translation using discriminative learning-a translation memory-inspired approach. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 1239-1248.
227
+ Yuxian Meng, Xiaoya Li, Xiayu Zheng, Fei Wu, Xiaofei Sun, Tianwei Zhang, and Jiwei Li. 2021. Fast nearest neighbor machine translation. arXiv preprint arXiv:2105.14528.
228
+ Franz Josef Och. 2003. Minimum error rate training in statistical machine translation. In Proceedings of the
229
+
230
+ 41st Annual Meeting of the Association for Computational Linguistics, pages 160-167, Sapporo, Japan. Association for Computational Linguistics.
231
+ Gaurav Pandey, Danish Contractor, Vineet Kumar, and Sachindra Joshi. 2018. Exemplar encoder-decoder for neural conversation generation. In ACL, pages 1329-1338.
232
+ Ashwin Paranjape, Omar Khattab, Christopher Potts, Matei Zaharia, and Christopher D Manning. 2021. Hindsight: Posterior-guided training of retrievers for improved open-ended generation. arXiv preprint arXiv:2110.07752.
233
+ Hao Peng, Ankur P. Parikh, Manaal Faruqui, Bhuwan Dhingra, and Das Dipanjan. 2019. Text generation with exemplar-based adaptive decoding. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.
234
+ Lianhui Qin, Michel Galley, Chris Brockett, Xiaodong Liu, Xiang Gao, William B Dolan, Yejin Choi, and Jianfeng Gao. 2019. Conversing by reading: Contentful neural conversation with on-demand machine reading. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5427-5436.
235
+ Minghui Qiu, Feng-Lin Li, Siyu Wang, Xing Gao, Yan Chen, Weipeng Zhao, Haiqing Chen, Jun Huang, and Wei Chu. 2017. Alime chat: A sequence to sequence and rerank based chatbot engine. In ACL, pages 498-503.
236
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning transferable visual models from natural language supervision. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 8748-8763. PMLR.
237
+ Stephen Robertson and Hugo Zaragoza. 2009. The probabilistic relevance framework: BM25 and beyond. Now Publishers Inc.
238
+ Lifeng Shang, Zhengdong Lu, and Hang Li. 2015. Neural responding machine for short-text conversation. In ACL, pages 1577-1586.
239
+ Michel Simard and Pierre Isabelle. 2009. Phrase-based machine translation in a computer-assisted translation environment. Proceedings of the Twelfth Machine Translation Summit (MT Summit XII), pages 120-127.
240
+ James Smith and Stephen Clark. 2009. Ebmt for smt: a new ebmt-smt hybrid. In Proceedings of the 3rd International Workshop on Example-Based Machine Translation, pages 3-10. CiteSeer.
241
+
242
+ Harold Somers. 2003. Translation memory systems. *Benjamins Translation Library*, 35:31-48.
243
+ Yiping Song, Rui Yan, Xiang Li, Dongyan Zhao, and Ming Zhang. 2016. Two are better than one: An ensemble of retrieval-and generation-based dialog systems. arXiv preprint arXiv:1610.07149.
244
+ Yixuan Su, Zaiqiao Meng, Simon Baker, and Nigel Collier. 2021a. Few-shot table-to-text generation with prototype memory. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, Virtual Event / Punta Cana, Dominican Republic, 16-20 November, 2021, pages 910-917. Association for Computational Linguistics.
245
+ Yixuan Su, David Vandyke, Simon Baker, Yan Wang, and Nigel Collier. 2021b. Keep the primary, rewrite the secondary: A two-stage approach for paraphrase generation. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 560-569, Online. Association for Computational Linguistics.
246
+ Yixuan Su, Yan Wang, Deng Cai, Simon Baker, Anna Korhonen, and Nigel Collier. 2021c. PROTOTYPE-TO-STYLE: dialogue generation with style-aware editing on retrieval memory. IEEE ACM Trans. Audio Speech Lang. Process., 29:2152-2161.
247
+ Marco Turchi, Matteo Negri, M Farajian, and Marcello Federico. 2017. Continuous learning from human post-edits for neural machine translation.
248
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.
249
+ Oriol Vinyals and Quoc Le. 2015. A neural conversational model. In ICML (Deep Learning Workshop).
250
+ Kun Wang, Chengqing Zong, and Keh-Yih Su. 2013. Integrating translation memory into phrase-based machine translation during decoding. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 11-21.
251
+ Kun Wang, Chengqing Zong, and Keh-Yih Su. 2014. Dynamically integrating cross-domain translation memory into phrase-based machine translation during decoding. In Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pages 398-408.
252
+ Jason Weston, Emily Dinan, and Alexander Miller. 2018. Retrieve and refine: Improved sequence generation models for dialogue. In Proceedings of the 2018 EMNLP Workshop SCAI: The 2nd International Workshop on Search-Oriented Conversational AI, pages 87-92.
253
+
254
+ Yu Wu, Furu Wei, Shaohan Huang, Yunli Wang, Zhoujun Li, and Ming Zhou. 2019. Response generation by context-aware prototype editing. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 7281-7288.
255
+ Zeqiu Wu, Michel Galley, Chris Brockett, Yizhe Zhang, Xiang Gao, Chris Quirk, Rik Koncel-Kedziorski, Jianfeng Gao, Hannaneh Hajishirzi, Mari Ostendorf, et al. 2021. A controllable model of grounded response generation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 14085-14093.
256
+ Mengzhou Xia, Guoping Huang, Lemao Liu, and Shuming Shi. 2019. Graph based translation memory for neural machine translation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 7297-7304.
257
+ Fei Xiao, Liang Pang, Yanyan Lan, Yan Wang, Huawei Shen, and Xueqi Cheng. 2021. Transductive learning for unsupervised text style transfer. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, EMNLP 2021, Virtual Event / Punta Cana, Dominican Republic, 7-11 November, 2021, pages 2510-2521. Association for Computational Linguistics.
258
+ Jitao Xu, Josep M Crego, and Jean Senellart. 2020. Boosting neural machine translation with similar translations. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1580-1590.
259
+ Liu Yang, Junjie Hu, Minghui Qiu, Chen Qu, Jianfeng Gao, W Bruce Croft, Xiaodong Liu, Yelong Shen, and Jingjing Liu. 2019. A hybrid retrieval-generation neural conversation model. In Proceedings of the 28th ACM international conference on information and knowledge management, pages 1341-1350.
260
+ Jingyi Zhang, Masao Utiyama, Eiichiro Sumita, Graham Neubig, and Satoshi Nakamura. 2018. Guiding neural machine translation with retrieved translation pieces. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1325-1335.
261
+ Yizhe Zhang, Siqi Sun, Xiang Gao, Yuwei Fang, Chris Brockett, Michel Galley, Jianfeng Gao, and Bill Dolan. 2021. Joint retrieval and generation training for grounded text generation. arXiv preprint arXiv:2105.06597.
262
+ Zhuosheng Zhang, Kehai Chen, Rui Wang, Masao Utiyama, Eiichiro Sumita, Zuchao Li, and Hai Zhao. 2020. Neural machine translation with universal visual representation. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.
263
+
264
+ Ventsislav Zhechev and Josef Van Genabith. 2010. Seeding statistical machine translation with translation memory output through tree-based structural alignment. In Proceedings of the 4th Workshop on Syntax and Structure in Statistical Translation, pages 43-51.
265
+ Xin Zheng, Zhirui Zhang, Junliang Guo, Shujian Huang, Boxing Chen, Weihua Luo, and Jiajun Chen. 2021a. Adaptive nearest neighbor machine translation. arXiv preprint arXiv:2105.13022.
266
+ Xin Zheng, Zhirui Zhang, Shujian Huang, Boxing Chen, Jun Xie, Weihua Luo, and Jiajun Chen. 2021b. Non-parametric unsupervised domain adaptation for neural machine translation. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 4234-4241.
267
+ Kangyan Zhou, Shrimai Prabhumoye, and Alan W Black. 2018. A dataset for document grounded conversations. arXiv preprint arXiv:1809.07358.
2202.01xxx/2202.01110/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:accdaef7b2b2053af797556469a493ee9a3d2ffccb2234e76757fc6ecd791445
3
+ size 69008
2202.01xxx/2202.01110/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01113/6750c7cd-1832-4e06-91d6-3a748ed5fecf_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01113/6750c7cd-1832-4e06-91d6-3a748ed5fecf_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01113/6750c7cd-1832-4e06-91d6-3a748ed5fecf_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f898509feb08adbd6cdd4491298f445b2837e1cb498703f044a8f0897095578
3
+ size 456107
2202.01xxx/2202.01113/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01113/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2bc6dfb11e5d922d72e6490ae10c98f884f65171955c597f3507dfa5530c6ce
3
+ size 1715483
2202.01xxx/2202.01113/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01136/64512bb3-257e-447f-9d41-4659d42602f0_content_list.json ADDED
The diff for this file is too large to render. See raw diff