Chelsea707 commited on
Commit
bd8f5aa
·
verified ·
1 Parent(s): cc71c4f

Re-upload MinerU batch 642cc026-8ab8-41c0-9852-07c4aceab89a

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. data/2020/2005_05xxx/2005.05906/c019bbe9-7a77-4c4f-b34a-5a2cde979abc_content_list.json +0 -0
  2. data/2020/2005_05xxx/2005.05906/c019bbe9-7a77-4c4f-b34a-5a2cde979abc_model.json +0 -0
  3. data/2020/2005_05xxx/2005.05906/full.md +0 -0
  4. data/2020/2005_05xxx/2005.05906/images.zip +1 -1
  5. data/2020/2005_05xxx/2005.05906/layout.json +0 -0
  6. data/2020/2005_05xxx/2005.05951/4c83963c-c41a-461b-bfbf-afc9f2f3a746_content_list.json +0 -0
  7. data/2020/2005_05xxx/2005.05951/4c83963c-c41a-461b-bfbf-afc9f2f3a746_model.json +0 -0
  8. data/2020/2005_05xxx/2005.05951/full.md +0 -0
  9. data/2020/2005_05xxx/2005.05951/images.zip +1 -1
  10. data/2020/2005_05xxx/2005.05951/layout.json +0 -0
  11. data/2020/2005_05xxx/2005.05957/3bb5c045-9ee6-4415-9cd3-ec7fa94a2a26_content_list.json +3 -1729
  12. data/2020/2005_05xxx/2005.05957/3bb5c045-9ee6-4415-9cd3-ec7fa94a2a26_model.json +3 -2145
  13. data/2020/2005_05xxx/2005.05957/full.md +3 -322
  14. data/2020/2005_05xxx/2005.05957/images.zip +1 -1
  15. data/2020/2005_05xxx/2005.05957/layout.json +0 -0
  16. data/2020/2005_05xxx/2005.05960/4c5c873d-7788-4d67-afc4-75f6ae3012da_content_list.json +0 -0
  17. data/2020/2005_05xxx/2005.05960/4c5c873d-7788-4d67-afc4-75f6ae3012da_model.json +0 -0
  18. data/2020/2005_05xxx/2005.05960/full.md +3 -454
  19. data/2020/2005_05xxx/2005.05960/images.zip +1 -1
  20. data/2020/2005_05xxx/2005.05960/layout.json +0 -0
  21. data/2020/2005_05xxx/2005.05999/55470d3a-eba0-4dac-8f5a-e101cace3cb0_content_list.json +3 -1601
  22. data/2020/2005_05xxx/2005.05999/55470d3a-eba0-4dac-8f5a-e101cace3cb0_model.json +3 -1899
  23. data/2020/2005_05xxx/2005.05999/full.md +3 -340
  24. data/2020/2005_05xxx/2005.05999/images.zip +1 -1
  25. data/2020/2005_05xxx/2005.05999/layout.json +0 -0
  26. data/2020/2005_06xxx/2005.06001/54cec5b6-a91c-4aea-ae6a-d5399be25314_content_list.json +0 -0
  27. data/2020/2005_06xxx/2005.06001/54cec5b6-a91c-4aea-ae6a-d5399be25314_model.json +0 -0
  28. data/2020/2005_06xxx/2005.06001/full.md +0 -0
  29. data/2020/2005_06xxx/2005.06001/images.zip +1 -1
  30. data/2020/2005_06xxx/2005.06001/layout.json +0 -0
  31. data/2020/2005_06xxx/2005.06040/56b116d0-0e46-408a-9475-7cd9f859a9ca_content_list.json +3 -1315
  32. data/2020/2005_06xxx/2005.06040/56b116d0-0e46-408a-9475-7cd9f859a9ca_model.json +3 -1846
  33. data/2020/2005_06xxx/2005.06040/full.md +3 -279
  34. data/2020/2005_06xxx/2005.06040/images.zip +1 -1
  35. data/2020/2005_06xxx/2005.06040/layout.json +0 -0
  36. data/2020/2005_06xxx/2005.06209/96df9e36-3e85-4a55-a3b1-bc6eb5c65f79_content_list.json +0 -0
  37. data/2020/2005_06xxx/2005.06209/96df9e36-3e85-4a55-a3b1-bc6eb5c65f79_model.json +0 -0
  38. data/2020/2005_06xxx/2005.06209/full.md +0 -0
  39. data/2020/2005_06xxx/2005.06209/images.zip +1 -1
  40. data/2020/2005_06xxx/2005.06209/layout.json +0 -0
  41. data/2020/2005_06xxx/2005.06227/76517cb5-7ba7-44f7-855a-b106df86ca7b_content_list.json +0 -0
  42. data/2020/2005_06xxx/2005.06227/76517cb5-7ba7-44f7-855a-b106df86ca7b_model.json +0 -0
  43. data/2020/2005_06xxx/2005.06227/full.md +0 -0
  44. data/2020/2005_06xxx/2005.06227/images.zip +1 -1
  45. data/2020/2005_06xxx/2005.06227/layout.json +0 -0
  46. data/2020/2005_06xxx/2005.06247/1dbe5a8e-513b-4268-b9d4-023a41101438_content_list.json +3 -1722
  47. data/2020/2005_06xxx/2005.06247/1dbe5a8e-513b-4268-b9d4-023a41101438_model.json +0 -0
  48. data/2020/2005_06xxx/2005.06247/full.md +3 -288
  49. data/2020/2005_06xxx/2005.06247/images.zip +1 -1
  50. data/2020/2005_06xxx/2005.06247/layout.json +0 -0
data/2020/2005_05xxx/2005.05906/c019bbe9-7a77-4c4f-b34a-5a2cde979abc_content_list.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05906/c019bbe9-7a77-4c4f-b34a-5a2cde979abc_model.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05906/full.md CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05906/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1edc03cba80c8906bf144b1521d3ef42de95cd922a3b7a91d8569f133bfccb64
3
  size 224509
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0393b92cb10965507f89adbed1e3edcd26f3c75550afc71a3d8e0f6f2dd8ee1
3
  size 224509
data/2020/2005_05xxx/2005.05906/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05951/4c83963c-c41a-461b-bfbf-afc9f2f3a746_content_list.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05951/4c83963c-c41a-461b-bfbf-afc9f2f3a746_model.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05951/full.md CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05951/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:165cf3fab1a6abb5e87e65375ba1dbba4f0b400c6d4cd824d760400dff1c4a2a
3
  size 1300375
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:776e8cde7041b76c35adafe750ec8505b75e5f406f8216bf308a6f78d23f1684
3
  size 1300375
data/2020/2005_05xxx/2005.05951/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05957/3bb5c045-9ee6-4415-9cd3-ec7fa94a2a26_content_list.json CHANGED
@@ -1,1729 +1,3 @@
1
- [
2
- {
3
- "type": "text",
4
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
5
- "text_level": 1,
6
- "bbox": [
7
- 160,
8
- 109,
9
- 813,
10
- 156
11
- ],
12
- "page_idx": 0
13
- },
14
- {
15
- "type": "text",
16
- "text": "Rafael Valle<sup>1</sup> Kevin Shih<sup>1</sup> Ryan Prenger<sup>1</sup> Bryan Catanzaro<sup>1</sup>",
17
- "bbox": [
18
- 259,
19
- 198,
20
- 709,
21
- 215
22
- ],
23
- "page_idx": 0
24
- },
25
- {
26
- "type": "text",
27
- "text": "Abstract",
28
- "text_level": 1,
29
- "bbox": [
30
- 241,
31
- 242,
32
- 318,
33
- 258
34
- ],
35
- "page_idx": 0
36
- },
37
- {
38
- "type": "text",
39
- "text": "In this paper we propose Flowtron: an autoregressive flow-based generative network for text-to-speech synthesis with control over speech variation and style transfer. Flowtron borrows insights from IAF and revamps Tacotron in order to provide high-quality and expressive melspectrogram synthesis. Flowtron is optimized by maximizing the likelihood of the training data, which makes training simple and stable. Flowtron learns an invertible mapping of data to a latent space that can be manipulated to control many aspects of speech synthesis (pitch, tone, speech rate, cadence, accent). Our mean opinion scores (MOS) show that Flowtron matches state-of-the-art TTS models in terms of speech quality. In addition, we provide results on control of speech variation, interpolation between samples and style transfer between speakers seen and unseen during training. Code and pretrained models will be made publicly available at https://github.com/NVIDIA/flowtron.",
40
- "bbox": [
41
- 117,
42
- 266,
43
- 444,
44
- 583
45
- ],
46
- "page_idx": 0
47
- },
48
- {
49
- "type": "text",
50
- "text": "1. Introduction",
51
- "text_level": 1,
52
- "bbox": [
53
- 86,
54
- 614,
55
- 217,
56
- 630
57
- ],
58
- "page_idx": 0
59
- },
60
- {
61
- "type": "text",
62
- "text": "Current speech synthesis methods do not give the user enough control over how speech actually sounds. Automatically converting text to audio that successfully communicates the text was achieved a long time ago (Umeda et al., 1968; Badham et al., 1983). However, communicating only the text information leaves out all of the acoustic properties of the voice that convey much of the meaning and human expressiveness. Nearly all the research into speech synthesis since the 1960s has focused on adding that non-textual information to synthesized speech. But in spite of this, the typical speech synthesis problem is formulated as a text to speech problem in which the user inputs only text.",
63
- "bbox": [
64
- 84,
65
- 638,
66
- 475,
67
- 821
68
- ],
69
- "page_idx": 0
70
- },
71
- {
72
- "type": "text",
73
- "text": "Taming the non-textual information in speech is difficult",
74
- "bbox": [
75
- 84,
76
- 828,
77
- 473,
78
- 844
79
- ],
80
- "page_idx": 0
81
- },
82
- {
83
- "type": "text",
84
- "text": "because the non-textual is unlabeled. A voice actor may speak the same text with different emphasis or emotion based on context, but it is unclear how to label a particular reading. Without labels for the non-textual information, models have fallen back to unsupervised learning. Recent models have achieved nearly human-level quality, despite treating the non-textual information as a black box. The model's only goal is to match the patterns in the training data (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017). Despite these models' excellent ability to recreate the non-textual information in the training set, the user has no insight into or control over the non-textual information.",
85
- "bbox": [
86
- 495,
87
- 243,
88
- 887,
89
- 426
90
- ],
91
- "page_idx": 0
92
- },
93
- {
94
- "type": "text",
95
- "text": "It is possible to formulate an unsupervised learning problem in such a way that the user can gain insights into the structure of a data set. One way is to formulate the problem such that the data is assumed to have a representation in some latent space, and have the model learn that representation. This latent space can then be investigated and manipulated to give the user more control over the generative model's output. Such approaches have been popular in image generation for some time now, allowing users to interpolate smoothly between images and to identify portions of the latent space that correlate with various features (Radford et al., 2015; Kingma & Dhariwal, 2018).",
96
- "bbox": [
97
- 495,
98
- 431,
99
- 888,
100
- 616
101
- ],
102
- "page_idx": 0
103
- },
104
- {
105
- "type": "text",
106
- "text": "In audio, however, approaches have focused on embeddings that remove a large amount of information and are obtained from assumptions about what is interesting. Recent approaches that utilize deep learning for expressive speech synthesis combine text and a learned latent embedding for prosody or global style (Wang et al., 2018; Skerry-Ryan et al., 2018). A variation of this approach is proposed by (Hsu et al., 2018), wherein a Gaussian mixture model (GMM) encoding the audio is added to Tacotron to learn a latent embedding. These approaches control the nontextual information by learning a bank of embeddings or by providing the target output as an input to the model and compressing it. However, these approaches require making assumptions about the dimensionality of the embeddings before hand and are not guaranteed to contain all the nontextual information it takes to reconstruct speech, including the risk of having dummy dimensions or not enough capacity, as the appendix sections in (Wang et al., 2018;",
107
- "bbox": [
108
- 495,
109
- 621,
110
- 888,
111
- 893
112
- ],
113
- "page_idx": 0
114
- },
115
- {
116
- "type": "aside_text",
117
- "text": "arXiv:2005.05957v3 [cs.SD] 16 Jul 2020",
118
- "bbox": [
119
- 22,
120
- 273,
121
- 57,
122
- 707
123
- ],
124
- "page_idx": 0
125
- },
126
- {
127
- "type": "page_footnote",
128
- "text": "$^{1}$ NVIDIA Applied Deep Learning Research (ADLR). Correspondence to: Rafael Valle <rafaelvalle@nvidia.com>.",
129
- "bbox": [
130
- 84,
131
- 852,
132
- 477,
133
- 880
134
- ],
135
- "page_idx": 0
136
- },
137
- {
138
- "type": "text",
139
- "text": "Skerry-Ryan et al., 2018; Hsu et al., 2018) confirm. They also require finding an encoder and embedding that prevents the model from simply learning a complex identity function that ignores other inputs. Furthermore, these approaches focus on fixed-length embeddings under the assumption that variable-length embeddings are not robust to text and speaker perturbations. Finally, most of these approaches do not give the user control over the degree of variability in the synthesized speech.",
140
- "bbox": [
141
- 88,
142
- 85,
143
- 472,
144
- 220
145
- ],
146
- "page_idx": 1
147
- },
148
- {
149
- "type": "text",
150
- "text": "In this paper we propose Flowtron: an autoregressive flow-based generative network for mel-spectrogram synthesis with control over acoustics and speech. Flowtron learns an invertible function that maps a distribution over mel-spectrograms to a latent $z$ space parameterized by a spherical Gaussian. With this formalization, we can generate samples containing specific speech characteristics manifested in mel-space by finding and sampling the corresponding region in $z$ -space. In the basic approach, we generate samples by sampling a zero mean spherical Gaussian prior and control the amount of variation by adjusting its variance. Despite its simplicity, this approach offers more speech variation and control than Tacotron.",
151
- "bbox": [
152
- 88,
153
- 228,
154
- 473,
155
- 422
156
- ],
157
- "page_idx": 1
158
- },
159
- {
160
- "type": "text",
161
- "text": "In Flowtron, we can access specific regions of mel-spectrogram space by sampling a posterior distribution conditioned on prior evidence from existing samples (Kingma & Dhariwal, 2018; Gambardella et al., 2019). This approach allows us to make a monotonous speaker more expressive by computing the region in $z$ -space associated with expressive speech as it is manifested in the prior evidence. Finally, our formulation also allows us to impose a structure to the $z$ -space and parametrize it with a Gaussian mixture, for example. In this approach related to (Hsu et al., 2018), speech characteristics in mel-spectrogram space can be associated with individual components. Hence, it is possible to generate samples with specific speech characteristics by selecting a component or a mixture thereof<sup>1</sup>.",
162
- "bbox": [
163
- 88,
164
- 431,
165
- 473,
166
- 641
167
- ],
168
- "page_idx": 1
169
- },
170
- {
171
- "type": "text",
172
- "text": "Although VAEs and GANs (Hsu et al., 2018; Binkowski et al., 2019; Akuzawa et al., 2018) based models also provide a latent prior that can be easily manipulated, in Flowtron this comes at no cost in speech quality nor optimization challenges.",
173
- "bbox": [
174
- 88,
175
- 650,
176
- 472,
177
- 726
178
- ],
179
- "page_idx": 1
180
- },
181
- {
182
- "type": "text",
183
- "text": "We find that Flowtron is able to generalize and produce sharp mel-spectrograms by simply maximizing the likelihood of the data while not requiring any additional Prenet or Postnet layer (Wang et al., 2017), nor compound loss functions required by most state of the art models like (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017; Skerry-Ryan et al., 2018; Wang et al., 2018; Binkowski et al., 2019).",
184
- "bbox": [
185
- 88,
186
- 733,
187
- 472,
188
- 838
189
- ],
190
- "page_idx": 1
191
- },
192
- {
193
- "type": "text",
194
- "text": "Flowtron is optimized by maximizing the likelihood of the training data, which makes training simple and stable. It",
195
- "bbox": [
196
- 88,
197
- 847,
198
- 472,
199
- 876
200
- ],
201
- "page_idx": 1
202
- },
203
- {
204
- "type": "text",
205
- "text": "learns an invertible mapping of the a latent space that can be manipulated to control many aspects of speech synthesis. Our mean opinion scores (MOS) show that Flowtron matches state-of-the-art TTS models in terms of speech quality. In addition, we provide results on control of speech variation, interpolation between samples, and style transfer between seen and unseen speakers with similar and different sentences. To our knowledge, this work is the first to show evidence that normalizing flow models can also be used for text-to-speech synthesis. We hope this will further stimulate developments in normalizing flows.",
206
- "bbox": [
207
- 501,
208
- 85,
209
- 883,
210
- 250
211
- ],
212
- "page_idx": 1
213
- },
214
- {
215
- "type": "text",
216
- "text": "2. Related Work",
217
- "text_level": 1,
218
- "bbox": [
219
- 501,
220
- 270,
221
- 637,
222
- 286
223
- ],
224
- "page_idx": 1
225
- },
226
- {
227
- "type": "text",
228
- "text": "Earlier approaches to text-to-speech synthesis that achieve human like results focus on synthesizing acoustic features from text, treating the non-textual information as a black box. (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017). Approaches like (Wang et al., 2017; Shen et al., 2017) require adding a critical Prenet layer to help with convergence and improve generalization (Wang et al., 2017) Furthermore, such models require an additional Postnet residual layer and modified loss to produce \"better resolved harmonics and high frequency formant structures, which reduces synthesis artifacts.\"",
229
- "bbox": [
230
- 501,
231
- 296,
232
- 883,
233
- 460
234
- ],
235
- "page_idx": 1
236
- },
237
- {
238
- "type": "text",
239
- "text": "One approach to dealing with this lack of labels for underlying non-textual information is to look for hand engineered statistics based on the audio that we believe are correlated with this underlying information.",
240
- "bbox": [
241
- 501,
242
- 469,
243
- 883,
244
- 529
245
- ],
246
- "page_idx": 1
247
- },
248
- {
249
- "type": "text",
250
- "text": "This is the approach taken by models like (Nishimura et al., 2016; Lee et al., 2019), wherein utterances are conditioned on audio statistics that can be calculated directly from the training data such as $F_{0}$ (fundamental frequency). However, in order to use such models, the statistics we hope to approximate must be decided upon a-priori, and the target value of these statistics must be determined before synthesis.",
251
- "bbox": [
252
- 501,
253
- 537,
254
- 883,
255
- 642
256
- ],
257
- "page_idx": 1
258
- },
259
- {
260
- "type": "text",
261
- "text": "Another approach to dealing with the issue of unlabeled non-textual information is to learn a latent embedding for prosody or global style. This is the approach taken by models like (Skerry-Ryan et al., 2018; Wang et al., 2018), wherein in a bank of embeddings or a latent embedding space of prosody is learned from unlabelled data. While these approaches have shown promise, manipulating such latent variables only offers a coarse control over expressive characteristics of speech.",
262
- "bbox": [
263
- 501,
264
- 650,
265
- 883,
266
- 786
267
- ],
268
- "page_idx": 1
269
- },
270
- {
271
- "type": "text",
272
- "text": "A mixed approach consists of combining engineered statistics with latent embeddings learned in an unsupervised fashion. This is the approach taken by models like Mellotron (Valle et al., 2019b). In Mellotron, utterances are conditioned on both audio statistics and a latent embedding of acoustic features derived from a reference acoustic representation. Despite its advantages, this approach still requires",
273
- "bbox": [
274
- 501,
275
- 794,
276
- 883,
277
- 898
278
- ],
279
- "page_idx": 1
280
- },
281
- {
282
- "type": "header",
283
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
284
- "bbox": [
285
- 200,
286
- 56,
287
- 767,
288
- 70
289
- ],
290
- "page_idx": 1
291
- },
292
- {
293
- "type": "page_footnote",
294
- "text": "What is relevant statistically might not be perceptually.",
295
- "bbox": [
296
- 109,
297
- 887,
298
- 437,
299
- 901
300
- ],
301
- "page_idx": 1
302
- },
303
- {
304
- "type": "text",
305
- "text": "determining these statistics before synthesis.",
306
- "bbox": [
307
- 84,
308
- 85,
309
- 379,
310
- 99
311
- ],
312
- "page_idx": 2
313
- },
314
- {
315
- "type": "text",
316
- "text": "3. Flowtron",
317
- "text_level": 1,
318
- "bbox": [
319
- 86,
320
- 119,
321
- 187,
322
- 133
323
- ],
324
- "page_idx": 2
325
- },
326
- {
327
- "type": "text",
328
- "text": "Flowtron is an autoregressive generative model that generates a sequence of mel spectrogram frames $p(x)$ by producing each mel-spectrogram frame based on previous mel-spectrogram frames $p(x) = \\prod p(x_{t}|x_{1:t - 1})$ . Our setup uses a neural network as a generative model by sampling from a simple distribution $p(z)$ . We consider two simple distributions with the same number of dimensions as our desired mel-spectrogram: a zero-mean spherical Gaussian and a mixture of spherical Gaussians with fixed or learnable parameters.",
329
- "bbox": [
330
- 84,
331
- 145,
332
- 475,
333
- 296
334
- ],
335
- "page_idx": 2
336
- },
337
- {
338
- "type": "equation",
339
- "text": "\n$$\n\\boldsymbol {z} \\sim \\mathcal {N} (\\boldsymbol {z}; 0, \\boldsymbol {I}) \\tag {1}\n$$\n",
340
- "text_format": "latex",
341
- "bbox": [
342
- 225,
343
- 329,
344
- 473,
345
- 347
346
- ],
347
- "page_idx": 2
348
- },
349
- {
350
- "type": "equation",
351
- "text": "\n$$\n\\boldsymbol {z} \\sim \\sum_ {k} \\hat {\\phi} _ {k} \\mathcal {N} (\\boldsymbol {z}; \\boldsymbol {\\mu} _ {k}, \\boldsymbol {\\Sigma} _ {k}) \\tag {2}\n$$\n",
352
- "text_format": "latex",
353
- "bbox": [
354
- 191,
355
- 349,
356
- 473,
357
- 380
358
- ],
359
- "page_idx": 2
360
- },
361
- {
362
- "type": "text",
363
- "text": "These samples are put through a series of invertible, parametrized transformations $\\pmb{f}$ , in our case affine transformations that transform $p(\\pmb{z})$ into $p(x)$ .",
364
- "bbox": [
365
- 84,
366
- 398,
367
- 475,
368
- 444
369
- ],
370
- "page_idx": 2
371
- },
372
- {
373
- "type": "equation",
374
- "text": "\n$$\n\\boldsymbol {x} = \\boldsymbol {f} _ {0} \\circ \\boldsymbol {f} _ {1} \\circ \\dots \\boldsymbol {f} _ {k} (z) \\tag {3}\n$$\n",
375
- "text_format": "latex",
376
- "bbox": [
377
- 194,
378
- 454,
379
- 473,
380
- 472
381
- ],
382
- "page_idx": 2
383
- },
384
- {
385
- "type": "text",
386
- "text": "As it is illustrated in (Kingma et al., 2016), in autoregressive normalizing flows the $t$ -th variable $\\boldsymbol{z}_t^\\prime$ only depends on previous timesteps $\\boldsymbol{z}_{1:t - 1}$ :",
387
- "bbox": [
388
- 84,
389
- 488,
390
- 475,
391
- 534
392
- ],
393
- "page_idx": 2
394
- },
395
- {
396
- "type": "equation",
397
- "text": "\n$$\n\\boldsymbol {z} _ {t} ^ {\\prime} = \\boldsymbol {f} _ {k} \\left(\\boldsymbol {z} _ {1: t - 1}\\right) \\tag {4}\n$$\n",
398
- "text_format": "latex",
399
- "bbox": [
400
- 220,
401
- 544,
402
- 473,
403
- 561
404
- ],
405
- "page_idx": 2
406
- },
407
- {
408
- "type": "text",
409
- "text": "By using parametrized affine transformations for $f$ and due to the autoregressive structure, the Jacobian determinant of each of the transformations $f$ is lower triangular, hence easy to compute. With this setup we can train Flowtron by maximizing the log-likelihood of the data, which can be done using the change of variables:",
410
- "bbox": [
411
- 84,
412
- 579,
413
- 473,
414
- 670
415
- ],
416
- "page_idx": 2
417
- },
418
- {
419
- "type": "equation",
420
- "text": "\n$$\n\\log p _ {\\theta} (\\boldsymbol {x}) = \\log p _ {\\theta} (\\boldsymbol {z}) + \\sum_ {i = 1} ^ {k} \\log | \\det (\\boldsymbol {J} (\\boldsymbol {f} _ {i} ^ {- 1} (\\boldsymbol {x}))) | \\tag {5}\n$$\n",
421
- "text_format": "latex",
422
- "bbox": [
423
- 96,
424
- 680,
425
- 473,
426
- 720
427
- ],
428
- "page_idx": 2
429
- },
430
- {
431
- "type": "equation",
432
- "text": "\n$$\n\\boldsymbol {z} = \\boldsymbol {f} _ {k} ^ {- 1} \\circ \\boldsymbol {f} _ {k - 1} ^ {- 1} \\circ \\dots \\boldsymbol {f} _ {0} ^ {- 1} (\\boldsymbol {x}) \\tag {6}\n$$\n",
433
- "text_format": "latex",
434
- "bbox": [
435
- 176,
436
- 723,
437
- 473,
438
- 743
439
- ],
440
- "page_idx": 2
441
- },
442
- {
443
- "type": "text",
444
- "text": "For the forward pass through the network, we take the melspectrograms as vectors and process them through several \"steps of flow conditioned on the text and speaker ids. A step of flow here consists of an affine coupling layer, described below.",
445
- "bbox": [
446
- 84,
447
- 760,
448
- 475,
449
- 834
450
- ],
451
- "page_idx": 2
452
- },
453
- {
454
- "type": "text",
455
- "text": "3.1. Affine Coupling Layer",
456
- "text_level": 1,
457
- "bbox": [
458
- 84,
459
- 851,
460
- 276,
461
- 867
462
- ],
463
- "page_idx": 2
464
- },
465
- {
466
- "type": "text",
467
- "text": "Invertible neural networks are typically constructed using coupling layers (Dinh et al., 2014; 2016; Kingma &",
468
- "bbox": [
469
- 84,
470
- 875,
471
- 475,
472
- 906
473
- ],
474
- "page_idx": 2
475
- },
476
- {
477
- "type": "text",
478
- "text": "Dhariwal, 2018). In our case, we use an affine coupling layer (Dinh et al., 2016). Every input $\\boldsymbol{x}_{t-1}$ produces scale and bias terms, $s$ and $b$ respectively, that affine-transform the succeeding input $\\boldsymbol{x}_t$ :",
479
- "bbox": [
480
- 496,
481
- 84,
482
- 885,
483
- 146
484
- ],
485
- "page_idx": 2
486
- },
487
- {
488
- "type": "equation",
489
- "text": "\n$$\n\\left(\\log \\boldsymbol {s} _ {t}, \\boldsymbol {b} _ {t}\\right) = N N \\left(\\boldsymbol {x} _ {1: t - 1}, \\text {t e x t}, \\text {s p e a k e r}\\right) \\tag {7}\n$$\n",
490
- "text_format": "latex",
491
- "bbox": [
492
- 542,
493
- 178,
494
- 885,
495
- 194
496
- ],
497
- "page_idx": 2
498
- },
499
- {
500
- "type": "equation",
501
- "text": "\n$$\n\\boldsymbol {x} _ {t} ^ {\\prime} = \\boldsymbol {s} _ {t} \\odot \\boldsymbol {x} _ {t} + \\boldsymbol {b} _ {t} \\tag {8}\n$$\n",
502
- "text_format": "latex",
503
- "bbox": [
504
- 627,
505
- 196,
506
- 885,
507
- 213
508
- ],
509
- "page_idx": 2
510
- },
511
- {
512
- "type": "text",
513
- "text": "Here $NN()$ can be any autoregressive causal transformation. This can be achieved by time-wise concatenation of a 0-valued vector to the input provided to $NN()$ . The affine coupling layer preserves invertibility for the overall network, even though $NN()$ does not need to be invertible. This follows because the first input of $NN()$ is a constant and due to the autoregressive nature of the model the scaling and translation terms $s_t$ and $b_t$ only depend on $x_{1:t-1}$ and the fixed text and speaker vectors. Accordingly, when inverting the network, we can compute $s_t$ and $b_t$ from the preceding input $x_{1:t-1}$ , and then invert $x_t'$ to compute $x_t$ , by simply recomputing $NN(x_{1:t-1},text,Speaker)$ .",
514
- "bbox": [
515
- 496,
516
- 231,
517
- 885,
518
- 414
519
- ],
520
- "page_idx": 2
521
- },
522
- {
523
- "type": "text",
524
- "text": "With an affine coupling layer, only the $s_t$ term changes the volume of the mapping and adds a change of variables term to the loss. This term also serves to penalize the model for non-invertible affine mappings.",
525
- "bbox": [
526
- 496,
527
- 420,
528
- 885,
529
- 481
530
- ],
531
- "page_idx": 2
532
- },
533
- {
534
- "type": "equation",
535
- "text": "\n$$\n\\log | \\det (\\boldsymbol {J} (\\boldsymbol {f} _ {\\text {c o u p l i n g}} ^ {- 1} (\\boldsymbol {x}))) | = \\log | \\boldsymbol {s} | \\tag {9}\n$$\n",
536
- "text_format": "latex",
537
- "bbox": [
538
- 565,
539
- 489,
540
- 885,
541
- 510
542
- ],
543
- "page_idx": 2
544
- },
545
- {
546
- "type": "text",
547
- "text": "With this setup, it is also possible to revert the ordering of the input $x$ without loss of generality. Hence, we choose to revert the order of the input at every even step of flow and to maintain the original order on odd steps of flow. This allows the model to learn dependencies both forward and backwards in time while remaining causal and invertible.",
548
- "bbox": [
549
- 496,
550
- 525,
551
- 885,
552
- 616
553
- ],
554
- "page_idx": 2
555
- },
556
- {
557
- "type": "text",
558
- "text": "3.2. Model architecture",
559
- "text_level": 1,
560
- "bbox": [
561
- 496,
562
- 633,
563
- 663,
564
- 646
565
- ],
566
- "page_idx": 2
567
- },
568
- {
569
- "type": "text",
570
- "text": "Our text encoder modifies Tacotron's by replacing batchnorm with instance-norm. Our decoder and $NN$ architecture, depicted in Figure 1, removes the essential Prenet and Postnet layers from Tacotron. We use the content-based tanh attention described in (Vinyals et al., 2015). We use the Mel Encoder described in (Hsu et al., 2018) for Flowtron models that predict the parameters of the Gaussian mixture.",
571
- "bbox": [
572
- 495,
573
- 656,
574
- 885,
575
- 762
576
- ],
577
- "page_idx": 2
578
- },
579
- {
580
- "type": "text",
581
- "text": "Unlike (Ping et al., 2017; Gibiansky et al., 2017), where site specific speaker embeddings are used, we use a single speaker embedding that is channel-wise concatenated with the encoder outputs at every token. We use a fixed dummy speaker embedding for models not conditioned on speaker id. Finally, we add a dense layer with a sigmoid output the flow step closest to $z$ . This provides the model with a gating mechanism as early as possible during inference to avoid extra computation.",
582
- "bbox": [
583
- 495,
584
- 768,
585
- 885,
586
- 905
587
- ],
588
- "page_idx": 2
589
- },
590
- {
591
- "type": "header",
592
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
593
- "bbox": [
594
- 200,
595
- 56,
596
- 769,
597
- 70
598
- ],
599
- "page_idx": 2
600
- },
601
- {
602
- "type": "image",
603
- "img_path": "images/829dc9169a7f7f045a4d5475447794c6bfc08282e8364637c340292bd9ae2c77.jpg",
604
- "image_caption": [
605
- "Figure 1: Flowtron network. Text and speaker embeddings are channel-wise concatenated. A 0-valued vector is concatenated with $x$ in the time dimension."
606
- ],
607
- "image_footnote": [],
608
- "bbox": [
609
- 86,
610
- 80,
611
- 475,
612
- 244
613
- ],
614
- "page_idx": 3
615
- },
616
- {
617
- "type": "text",
618
- "text": "3.3. Inference",
619
- "text_level": 1,
620
- "bbox": [
621
- 84,
622
- 330,
623
- 186,
624
- 344
625
- ],
626
- "page_idx": 3
627
- },
628
- {
629
- "type": "text",
630
- "text": "Once the network is trained, doing inference is simply a matter of randomly sampling $z$ values from a spherical Gaussian, or Gaussian Mixture, and running them through the network, reverting the order of the input when necessary. During training we used $\\sigma^2 = 1$ . The parameters of the Gaussian mixture are either fixed or predicted by Flowtron. In section 4.3 we explore the effects of different values for $\\sigma^2$ . In general, we found that sampling $z$ values from a Gaussian with a lower standard deviation from that assumed during training resulted in mel-spectrograms that sounded better, as found in (Kingma & Dhariwal, 2018), and earlier work on likelihood-based generative models (Parmar et al., 2018). During inference we sampled $z$ values from a Gaussian with $\\sigma^2 = 0.5$ , unless otherwise specified. The text and speaker embeddings are included at each of the coupling layers as before, but now the affine transforms are inverted in time, and these inverses are also guaranteed by the loss.",
631
- "bbox": [
632
- 84,
633
- 354,
634
- 475,
635
- 609
636
- ],
637
- "page_idx": 3
638
- },
639
- {
640
- "type": "text",
641
- "text": "4. Experiments",
642
- "text_level": 1,
643
- "bbox": [
644
- 84,
645
- 630,
646
- 217,
647
- 648
648
- ],
649
- "page_idx": 3
650
- },
651
- {
652
- "type": "text",
653
- "text": "This section describes our training setup and provides quantitative and qualitative results. Our quantitative results show that Flowtron has mean opinion scores (MOS) that are comparable to that of state of the art models for text to mel-spectrogram synthesis such as Tacotron 2. Our qualitative results display many features that are not possible or not efficient with Tacotron and Tacotron 2 GST. These features include control of the amount of variation in speech, interpolation between samples and style transfer between seen and unseen speakers during training.",
654
- "bbox": [
655
- 84,
656
- 656,
657
- 475,
658
- 808
659
- ],
660
- "page_idx": 3
661
- },
662
- {
663
- "type": "text",
664
- "text": "We decode all mel-spectrograms into waveforms by using a single pre-trained WaveGlow (Prenger et al., 2019) model trained on a single speaker and available on github (Valle et al., 2019a). During inference we used $\\sigma^2 = 0.7$ . In consonance with (Valle et al., 2019b), our results suggest that WaveGlow can be used as an universal decoder.",
665
- "bbox": [
666
- 84,
667
- 814,
668
- 473,
669
- 905
670
- ],
671
- "page_idx": 3
672
- },
673
- {
674
- "type": "text",
675
- "text": "Although we provide images to illustrate our results, they can best be appreciated by listening. Hence, we ask the readers to visit our website ${}^{2}$ to listen to Flowtron samples.",
676
- "bbox": [
677
- 496,
678
- 84,
679
- 885,
680
- 130
681
- ],
682
- "page_idx": 3
683
- },
684
- {
685
- "type": "text",
686
- "text": "4.1. Training setup",
687
- "text_level": 1,
688
- "bbox": [
689
- 496,
690
- 146,
691
- 633,
692
- 162
693
- ],
694
- "page_idx": 3
695
- },
696
- {
697
- "type": "text",
698
- "text": "We train our Flowtron, Tacotron 2 and Tacotron 2 GST models using a dataset that combines the LJSpeech (LJS) dataset (Ito et al., 2017) with two proprietary single speaker datasets with 20 and 10 hours each (Sally and Helen). We will refer to this combined dataset as LSH. We also train a Flowtron model on the train-clean-100 subset of LibriTTS (Zen et al., 2019) with 123 speakers and 25 minutes on average per speaker. Speakers with less than 5 minutes of data and files that are larger than 10 seconds are filtered out. For each dataset we use at least 180 randomly chosen samples for the validation set and the remainder for the training set.",
699
- "bbox": [
700
- 495,
701
- 169,
702
- 885,
703
- 351
704
- ],
705
- "page_idx": 3
706
- },
707
- {
708
- "type": "text",
709
- "text": "The models are trained on uniformly sampled normalized text and ARPAbet encodings obtained from the CMU Pronouncing Dictionary (Weide, 1998). We do not perform any data augmentation. We adapt the public Tacotron 2 and Tacotron 2 GST repos to include speaker embeddings as described in Section 3.",
710
- "bbox": [
711
- 495,
712
- 358,
713
- 885,
714
- 449
715
- ],
716
- "page_idx": 3
717
- },
718
- {
719
- "type": "text",
720
- "text": "We use a sampling rate of $22050\\mathrm{Hz}$ and mel-spectrograms with 80 bins using librosa mel filter defaults. We apply the STFT with a FFT size of 1024, window size of 1024 samples and hop size of 256 samples $(\\sim 12ms)$ .",
721
- "bbox": [
722
- 495,
723
- 455,
724
- 885,
725
- 518
726
- ],
727
- "page_idx": 3
728
- },
729
- {
730
- "type": "text",
731
- "text": "We use the ADAM (Kingma & Ba, 2014) optimizer with default parameters, 1e-4 learning rate and 1e-6 weight decay for Flowtron and 1e-3 learning rate and 1e-5 weight decay for the other models, following guidelines in (Wang et al., 2017). We anneal the learning rate once the generalization error starts to plateau and stop training once the the generalization error stops significantly decreasing or starts increasing. The Flowtron models with 2 steps of flow were trained on the LSH dataset for approximately 1000 epochs and then fine-tuned on LibriTTS for 500 epochs. Tacotron 2 and Tacotron 2 GST are trained for approximately 500 epochs. Each model is trained on a single NVIDIA DGX-1 with 8 GPUs.",
732
- "bbox": [
733
- 495,
734
- 523,
735
- 885,
736
- 720
737
- ],
738
- "page_idx": 3
739
- },
740
- {
741
- "type": "text",
742
- "text": "We find it faster to first learn to attend on a Flowtron model with a single step of flow and large amounts of data than multiple steps of flow and less data. After the model has learned to attend, we transfer its parameters to models with more steps of flow and speakers with less data. Thus, we first train Flowtron model with a single step of flow on the LSH dataset with many hours per speaker. Then we fine tune this model to Flowtron models with more steps of flow. Finally, these models are fine tuned on LibriTTS with an optional new speaker embedding.",
743
- "bbox": [
744
- 495,
745
- 729,
746
- 885,
747
- 880
748
- ],
749
- "page_idx": 3
750
- },
751
- {
752
- "type": "header",
753
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
754
- "bbox": [
755
- 200,
756
- 56,
757
- 767,
758
- 70
759
- ],
760
- "page_idx": 3
761
- },
762
- {
763
- "type": "page_footnote",
764
- "text": "2 https://nv-adlr.github.io/Flowtron",
765
- "bbox": [
766
- 517,
767
- 888,
768
- 723,
769
- 902
770
- ],
771
- "page_idx": 3
772
- },
773
- {
774
- "type": "text",
775
- "text": "4.2. Mean Opinion Score comparison",
776
- "text_level": 1,
777
- "bbox": [
778
- 84,
779
- 85,
780
- 349,
781
- 99
782
- ],
783
- "page_idx": 4
784
- },
785
- {
786
- "type": "text",
787
- "text": "We provide results that compare mean opinion scores (MOS) from real data from the LJS dataset, samples from a Flowtron with 2 steps of flow and samples from our implementation of Tacotron 2, both trained on LSH. Although the models evaluated are multi-speaker, we only compute mean opinion scores on LJS. In addition, we use the mean opinion scores provided in (Prenger et al., 2019) for ground truth data from the LJS dataset.",
788
- "bbox": [
789
- 84,
790
- 108,
791
- 473,
792
- 228
793
- ],
794
- "page_idx": 4
795
- },
796
- {
797
- "type": "text",
798
- "text": "We crowd-sourced mean opinion score (MOS) tests on Amazon Mechanical Turk. Raters first had to pass a hearing test to be eligible. Then they listened to an utterance, after which they rated pleasantness on a five-point scale. We used 30 volume normalized utterances from all speakers disjoint from the training set for evaluation, and randomly chose the utterances for each subject.",
799
- "bbox": [
800
- 84,
801
- 236,
802
- 475,
803
- 342
804
- ],
805
- "page_idx": 4
806
- },
807
- {
808
- "type": "text",
809
- "text": "The mean opinion scores are shown in Table 1 with $95\\%$ confidence intervals computed over approximately 250 scores per source. The results roughly match our subjective qualitative assessment. The larger advantage of Flowtron is in the control over the amount of speech variation and the manipulation of the latent space.",
810
- "bbox": [
811
- 84,
812
- 349,
813
- 475,
814
- 441
815
- ],
816
- "page_idx": 4
817
- },
818
- {
819
- "type": "table",
820
- "img_path": "images/2a1d4f2b0166ce20e5db19f9721566fa58a12872062551a37e9713cdb0e7155b.jpg",
821
- "table_caption": [],
822
- "table_footnote": [],
823
- "table_body": "<table><tr><td>Source</td><td>Flows</td><td>Mean Opinion Score (MOS)</td></tr><tr><td>Real</td><td>-</td><td>4.274 ± 0.1340</td></tr><tr><td>Flowtron</td><td>3</td><td>3.665 ± 0.1634</td></tr><tr><td>Tacotron 2</td><td>-</td><td>3.521 ± 0.1721</td></tr></table>",
824
- "bbox": [
825
- 94,
826
- 450,
827
- 465,
828
- 515
829
- ],
830
- "page_idx": 4
831
- },
832
- {
833
- "type": "text",
834
- "text": "Table 1: Mean Opinion Score (MOS) evaluations with $95\\%$ confidence intervals for various sources.",
835
- "bbox": [
836
- 84,
837
- 525,
838
- 473,
839
- 555
840
- ],
841
- "page_idx": 4
842
- },
843
- {
844
- "type": "text",
845
- "text": "4.3. Sampling the prior",
846
- "text_level": 1,
847
- "bbox": [
848
- 84,
849
- 587,
850
- 253,
851
- 603
852
- ],
853
- "page_idx": 4
854
- },
855
- {
856
- "type": "text",
857
- "text": "The simplest approach to generate samples with Flowtron is to sample from a prior distribution $z \\sim \\mathcal{N}(0, \\sigma^2)$ and adjust $\\sigma^2$ to control amount of variation. Whereas $\\sigma^2 = 0$ completely removes variation and produces outputs based on the model bias, increasing the value of $\\sigma^2$ will increase the amount of variation in speech.",
858
- "bbox": [
859
- 84,
860
- 609,
861
- 473,
862
- 702
863
- ],
864
- "page_idx": 4
865
- },
866
- {
867
- "type": "text",
868
- "text": "4.3.1. SPEECH VARIATION",
869
- "text_level": 1,
870
- "bbox": [
871
- 84,
872
- 715,
873
- 272,
874
- 729
875
- ],
876
- "page_idx": 4
877
- },
878
- {
879
- "type": "text",
880
- "text": "To showcase the amount of variation and control thereof in Flowtron, we synthesize 10 mel-spectrograms and sample the Gaussian prior with $\\sigma^2 \\in \\{0.0, 0.5, 1.0\\}$ . All samples are generated conditioned on a fixed speaker Sally and text \"How much variation is there?\" to illustrate the relationship between $\\sigma^2$ and variability.",
881
- "bbox": [
882
- 84,
883
- 739,
884
- 473,
885
- 830
886
- ],
887
- "page_idx": 4
888
- },
889
- {
890
- "type": "text",
891
- "text": "Our results show that despite all the variability added by increasing $\\sigma^2$ , all the samples synthesized with Flowtron still produce high quality speech.",
892
- "bbox": [
893
- 84,
894
- 837,
895
- 473,
896
- 883
897
- ],
898
- "page_idx": 4
899
- },
900
- {
901
- "type": "text",
902
- "text": "Figure 2 also shows that unlike most SOTA models (Shen",
903
- "bbox": [
904
- 84,
905
- 890,
906
- 473,
907
- 906
908
- ],
909
- "page_idx": 4
910
- },
911
- {
912
- "type": "text",
913
- "text": "et al., 2017; Arik et al., 2017b;a; Ping et al., 2017; Skerry-Ryan et al., 2018; Wang et al., 2018; Binkowski et al., 2019), Flowtron generates sharp harmonics and well resolved formants without a compound loss nor Prenet or Postnet layers.",
914
- "bbox": [
915
- 496,
916
- 84,
917
- 888,
918
- 146
919
- ],
920
- "page_idx": 4
921
- },
922
- {
923
- "type": "image",
924
- "img_path": "images/f531ff4688197dedc3f519712b69c73640420e32e215aa3fa17b6321580ddb03.jpg",
925
- "image_caption": [
926
- "(a) $\\sigma^2 = 0$"
927
- ],
928
- "image_footnote": [],
929
- "bbox": [
930
- 542,
931
- 176,
932
- 856,
933
- 315
934
- ],
935
- "page_idx": 4
936
- },
937
- {
938
- "type": "image",
939
- "img_path": "images/c824fec24fc000d559eeb6f5034a1b6de248c08750f99912a3663c785251a8cd.jpg",
940
- "image_caption": [
941
- "(b) $\\sigma^2 = 0.5$"
942
- ],
943
- "image_footnote": [],
944
- "bbox": [
945
- 542,
946
- 369,
947
- 856,
948
- 506
949
- ],
950
- "page_idx": 4
951
- },
952
- {
953
- "type": "image",
954
- "img_path": "images/687bb83515ae804a699ca82ebfa8cbad946bacbf8437f26a739c6c524fb58773.jpg",
955
- "image_caption": [
956
- "(c) $\\sigma^2 = 1$",
957
- "Figure 2: Mel-spectrograms generated with Flowtron using different $\\sigma^2$ . This parameter can be adjusted to control mel-spectrogram variability during inference."
958
- ],
959
- "image_footnote": [],
960
- "bbox": [
961
- 542,
962
- 561,
963
- 856,
964
- 698
965
- ],
966
- "page_idx": 4
967
- },
968
- {
969
- "type": "text",
970
- "text": "Now we show that adjusting $\\sigma^2$ is a simple and valuable approach that provides more variation and control than Tacotron, without sacrificing speech quality. For this, we synthesize 10 samples with Tacotron 2 using different values for the Prenet dropout probability $p\\in \\{0.45,0.5,0.55\\}$ . We scale the outputs of the dropout output such that the mean of the output remains equal to the mean with $p = 0.5$ ,",
971
- "bbox": [
972
- 496,
973
- 799,
974
- 888,
975
- 906
976
- ],
977
- "page_idx": 4
978
- },
979
- {
980
- "type": "header",
981
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
982
- "bbox": [
983
- 200,
984
- 56,
985
- 769,
986
- 70
987
- ],
988
- "page_idx": 4
989
- },
990
- {
991
- "type": "text",
992
- "text": "the value used during training. Although we also provide samples computed on values of $p \\in [0,1]$ in our supplemental material, we do not include them in our results because they are unintelligible.",
993
- "bbox": [
994
- 84,
995
- 84,
996
- 473,
997
- 145
998
- ],
999
- "page_idx": 5
1000
- },
1001
- {
1002
- "type": "text",
1003
- "text": "In Figure 3 below we provide scatter plots from sample duration in seconds. Our results show that whereas $\\sigma^2 = 0$ produces samples with no variation in duration, larger values of $\\sigma^2$ produces samples with more variation in duration. Humans manipulate word and sentence length to express themselves, hence this is valuable.",
1004
- "bbox": [
1005
- 84,
1006
- 152,
1007
- 475,
1008
- 243
1009
- ],
1010
- "page_idx": 5
1011
- },
1012
- {
1013
- "type": "image",
1014
- "img_path": "images/2dc6d966f1aa35365fc598f41080e3eb94de0570fd245b4c3708e026191c12c4.jpg",
1015
- "image_caption": [
1016
- "Figure 3: Sample duration in seconds given parameters $\\sigma^2$ and $p$ . These results show that Flowtron provides more variation in sample duration than Tacotron 2."
1017
- ],
1018
- "image_footnote": [],
1019
- "bbox": [
1020
- 109,
1021
- 265,
1022
- 450,
1023
- 404
1024
- ],
1025
- "page_idx": 5
1026
- },
1027
- {
1028
- "type": "text",
1029
- "text": "In Figure 4 we provide scatter plots of $F_{0}$ contours extracted with the YIN algorithm (De Cheveigné & Kawahara, 2002), with minimum $F_{0}$ , maximum $F_{0}$ and harmonicity threshold equal to $80\\mathrm{Hz}$ , $400\\mathrm{Hz}$ and 0.3 respectively. Our results show a behavior similar to the previous sample duration analysis. As expected, $\\sigma^{2} = 0$ provides no variation in $F_{0}$ contour<sup>3</sup>, while increasing the value of $\\sigma^{2}$ will increase the amount of variation in $F_{0}$ contours.",
1030
- "bbox": [
1031
- 84,
1032
- 488,
1033
- 473,
1034
- 608
1035
- ],
1036
- "page_idx": 5
1037
- },
1038
- {
1039
- "type": "text",
1040
- "text": "Our results in Figure 4 also show that the samples produced with Flowtron are considerably less monotonous than the samples produced with Tacotron 2. Whereas increasing $\\sigma^2$ considerably increases variation in $F_0$ , modifying $p$ barely produces any variation. This is valuable because expressive speech is associated with non-monotonic $F_0$ contours.",
1041
- "bbox": [
1042
- 84,
1043
- 616,
1044
- 473,
1045
- 708
1046
- ],
1047
- "page_idx": 5
1048
- },
1049
- {
1050
- "type": "text",
1051
- "text": "4.3.2. INTERPOLATION BETWEEN SAMPLES",
1052
- "text_level": 1,
1053
- "bbox": [
1054
- 84,
1055
- 722,
1056
- 390,
1057
- 736
1058
- ],
1059
- "page_idx": 5
1060
- },
1061
- {
1062
- "type": "text",
1063
- "text": "With Flowtron we can perform interpolation in $z$ -space to achieve interpolation in mel-spectrogram space. For this experiment we evaluate Flowtron models with and without speaker embeddings. For the experiment with speaker embeddings we choose the Sally speaker and the phrase \"It is well known that deep generative models have a rich latent space\". We generate mel-spectrograms by sampling $z \\sim \\mathcal{N}(0, 0.8)$ twice and interpolating between them over 100 steps.",
1064
- "bbox": [
1065
- 84,
1066
- 744,
1067
- 475,
1068
- 881
1069
- ],
1070
- "page_idx": 5
1071
- },
1072
- {
1073
- "type": "image",
1074
- "img_path": "images/74b6f6d178ebe9e6a7f06a082d3ae60945991a1c3dd318cbb8938c05c77fbc18.jpg",
1075
- "image_caption": [
1076
- "(a) Flowtron $\\sigma^2 = 0$"
1077
- ],
1078
- "image_footnote": [],
1079
- "bbox": [
1080
- 506,
1081
- 82,
1082
- 887,
1083
- 146
1084
- ],
1085
- "page_idx": 5
1086
- },
1087
- {
1088
- "type": "image",
1089
- "img_path": "images/4cc52763d45a27a041fed184dce63e422f3e853380bbd5b7d7d37b4adf7b7739.jpg",
1090
- "image_caption": [
1091
- "(b) Flowtron $\\sigma^2 = 0.5$"
1092
- ],
1093
- "image_footnote": [],
1094
- "bbox": [
1095
- 504,
1096
- 181,
1097
- 887,
1098
- 246
1099
- ],
1100
- "page_idx": 5
1101
- },
1102
- {
1103
- "type": "image",
1104
- "img_path": "images/9e21bd90e7eb1bf44033713012683b9ff5c70e0e6b6f81daccf04388c2a0d555.jpg",
1105
- "image_caption": [
1106
- "(c) Flowtron $\\sigma^2 = 1$"
1107
- ],
1108
- "image_footnote": [],
1109
- "bbox": [
1110
- 504,
1111
- 280,
1112
- 887,
1113
- 344
1114
- ],
1115
- "page_idx": 5
1116
- },
1117
- {
1118
- "type": "image",
1119
- "img_path": "images/902ebaafea4e8691cb8f9576c354a86d410508db35a2b7f090901babc1fa1d0a.jpg",
1120
- "image_caption": [
1121
- "(d) Tacotron $2p\\in \\{0.45,0.5,0.55\\}$",
1122
- "Figure 4: $F_{0}$ contours obtained from samples generated by Flowtron and Tacotron 2 with different values for $\\sigma^{2}$ and $p$ . Flowtron provides more expressivity than Tacotron 2."
1123
- ],
1124
- "image_footnote": [],
1125
- "bbox": [
1126
- 504,
1127
- 378,
1128
- 887,
1129
- 443
1130
- ],
1131
- "page_idx": 5
1132
- },
1133
- {
1134
- "type": "text",
1135
- "text": "For the experiment without speaker embeddings we interpolate between Sally and Helen using the phrase \"We are testing this model\". First, we perform inference by sampling $z \\sim \\mathcal{N}(0, 0.5)$ until we find two $z$ values, $z_h$ and $z_s$ , that produce mel-spectrograms with Helen's and Sally's voice respectively. We then generate samples by performing inference while linearly interpolating between $z_h$ and $z_s$ .",
1136
- "bbox": [
1137
- 495,
1138
- 571,
1139
- 887,
1140
- 676
1141
- ],
1142
- "page_idx": 5
1143
- },
1144
- {
1145
- "type": "text",
1146
- "text": "Our same speaker interpolation samples show that Flowtron is able to interpolate between multiple samples while producing correct alignment maps. In addition, our different speaker interpolation samples show that Flowtron is able to blurry the boundaries between two speakers, creating a speaker that combines the characteristics of both.",
1147
- "bbox": [
1148
- 495,
1149
- 684,
1150
- 887,
1151
- 773
1152
- ],
1153
- "page_idx": 5
1154
- },
1155
- {
1156
- "type": "text",
1157
- "text": "4.4. Sampling the posterior",
1158
- "text_level": 1,
1159
- "bbox": [
1160
- 496,
1161
- 791,
1162
- 691,
1163
- 806
1164
- ],
1165
- "page_idx": 5
1166
- },
1167
- {
1168
- "type": "text",
1169
- "text": "In this approach we generate samples with Flowtron by sampling a posterior distribution conditioned on prior evidence containing speech characteristics of interest, as described in (Gambardella et al., 2019; Kingma & Dhariwal, 2018). In this experiment, we collect prior evidence $z_{e}$ by performing a forward pass with the speaker id to be used during",
1170
- "bbox": [
1171
- 495,
1172
- 814,
1173
- 887,
1174
- 906
1175
- ],
1176
- "page_idx": 5
1177
- },
1178
- {
1179
- "type": "header",
1180
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
1181
- "bbox": [
1182
- 200,
1183
- 56,
1184
- 769,
1185
- 70
1186
- ],
1187
- "page_idx": 5
1188
- },
1189
- {
1190
- "type": "page_footnote",
1191
- "text": "3Variations in $\\sigma^2 = 0$ are due to different $z$ for WaveGlow.",
1192
- "bbox": [
1193
- 104,
1194
- 888,
1195
- 457,
1196
- 904
1197
- ],
1198
- "page_idx": 5
1199
- },
1200
- {
1201
- "type": "text",
1202
- "text": "inference $^4$ , observed mel-spectrogram and text from a set of samples with characteristics of interest. If necessary, we time-concatenate each $z_{e}$ with itself to fulfill minimum length requirements defined according to the text length to be said during inference.",
1203
- "bbox": [
1204
- 84,
1205
- 84,
1206
- 473,
1207
- 161
1208
- ],
1209
- "page_idx": 6
1210
- },
1211
- {
1212
- "type": "text",
1213
- "text": "Tacotron 2 GST (Wang et al., 2018) has an equivalent posterior sampling approach, in which during inference the model is conditioned on a weighted sum of global style tokens (posterior) queried through an embedding of existing audio samples (prior). For Tacotron 2 GST, we evaluate two approaches: in one we use a single sample to query a style token in the other we use an average style token computed over multiple samples.",
1214
- "bbox": [
1215
- 84,
1216
- 167,
1217
- 475,
1218
- 289
1219
- ],
1220
- "page_idx": 6
1221
- },
1222
- {
1223
- "type": "text",
1224
- "text": "4.4.1. SEEN SPEAKER WITHOUT ALIGNMENTS",
1225
- "text_level": 1,
1226
- "bbox": [
1227
- 84,
1228
- 304,
1229
- 410,
1230
- 316
1231
- ],
1232
- "page_idx": 6
1233
- },
1234
- {
1235
- "type": "text",
1236
- "text": "In this experiment we compare Sally samples from Flowtron and Tacotron 2 GST generated by conditioning on the posterior computed over 30 Helen samples with the highest variance in fundamental frequency. The goal is to make a monotonic speaker sound expressive. Our experiments show that by sampling from the posterior or interpolating between the posterior and a standard Gaussian prior, Flowtron is able to make a monotonic speaker gradually sound more expressive. On the other hand, Tacotron 2 GST is barely able to alter characteristics of the monotonic speaker.",
1237
- "bbox": [
1238
- 84,
1239
- 327,
1240
- 475,
1241
- 479
1242
- ],
1243
- "page_idx": 6
1244
- },
1245
- {
1246
- "type": "text",
1247
- "text": "4.4.2. SEEN SPEAKER WITH ALIGNMENTS",
1248
- "text_level": 1,
1249
- "bbox": [
1250
- 84,
1251
- 493,
1252
- 380,
1253
- 507
1254
- ],
1255
- "page_idx": 6
1256
- },
1257
- {
1258
- "type": "text",
1259
- "text": "We use a Flowtron model with speaker embeddings to illustrate Flowtron's ability to learn and transfer acoustic characteristics that are hard to express algorithmically but easy to perceive acoustically, we select a female speaker from LibriTTS with a distinguished nasal voice and oscillation in $F_{0}$ as our source speaker and transfer her style to a male speaker, also from LibriTTS, with acoustic characteristics that sound different from the female speaker. Unlike the previous experiment, this time the text and the alignment maps are transferred from the female to the male speaker.",
1260
- "bbox": [
1261
- 84,
1262
- 515,
1263
- 475,
1264
- 667
1265
- ],
1266
- "page_idx": 6
1267
- },
1268
- {
1269
- "type": "text",
1270
- "text": "Figure 5 is an attempt to visualize the transfer of these acoustic qualities we described. It shows that after the transfer, the lower partials of the male speaker oscillate more and become more similar to the female speaker.",
1271
- "bbox": [
1272
- 84,
1273
- 674,
1274
- 473,
1275
- 736
1276
- ],
1277
- "page_idx": 6
1278
- },
1279
- {
1280
- "type": "text",
1281
- "text": "4.4.3. UNSEEN SPEAKER STYLE",
1282
- "text_level": 1,
1283
- "bbox": [
1284
- 84,
1285
- 750,
1286
- 312,
1287
- 763
1288
- ],
1289
- "page_idx": 6
1290
- },
1291
- {
1292
- "type": "text",
1293
- "text": "We compare samples generated with Flowtron and Tacotron 2 GST with speaker embeddings in which we modify a speaker's style by using data from the same speaker but from a style not seen during training. Whereas Sally's data used during training consists of news article readings, the evaluation samples contain Sally's interpretation of the somber and vampiresque novel Born of Darkness.",
1294
- "bbox": [
1295
- 84,
1296
- 773,
1297
- 475,
1298
- 878
1299
- ],
1300
- "page_idx": 6
1301
- },
1302
- {
1303
- "type": "image",
1304
- "img_path": "images/feec29abeea58716be55d2721b04e418886a161f073d96450d46164a62d783f9.jpg",
1305
- "image_caption": [
1306
- "(a) Female"
1307
- ],
1308
- "image_footnote": [],
1309
- "bbox": [
1310
- 504,
1311
- 82,
1312
- 890,
1313
- 207
1314
- ],
1315
- "page_idx": 6
1316
- },
1317
- {
1318
- "type": "image",
1319
- "img_path": "images/7d7ac4054e134c7de78153892277aed2809297beababa032997272ff97d8540a.jpg",
1320
- "image_caption": [
1321
- "(b) Transfer"
1322
- ],
1323
- "image_footnote": [],
1324
- "bbox": [
1325
- 503,
1326
- 234,
1327
- 890,
1328
- 359
1329
- ],
1330
- "page_idx": 6
1331
- },
1332
- {
1333
- "type": "image",
1334
- "img_path": "images/c76aca10d669e5ca92748ed0f83584de62c03c1b11a9e5405b933e737cbe7643.jpg",
1335
- "image_caption": [
1336
- "(c) Male",
1337
- "Figure 5: Mel-spectrograms from a female speaker, male speaker and a sample where we transfer the acoustic characteristics from the female speaker to the male speaker. It shows that the transferred sample is more similar to the female speaker than the male speaker."
1338
- ],
1339
- "image_footnote": [],
1340
- "bbox": [
1341
- 504,
1342
- 388,
1343
- 890,
1344
- 513
1345
- ],
1346
- "page_idx": 6
1347
- },
1348
- {
1349
- "type": "text",
1350
- "text": "Our samples show that Tacotron 2 GST fails to emulate the somber style from Born of Darkness's data. We show that Flowtron succeeds in transferring not only to the somber style in the evaluation data, but also the long pauses associated with the narrative style.",
1351
- "bbox": [
1352
- 496,
1353
- 648,
1354
- 887,
1355
- 724
1356
- ],
1357
- "page_idx": 6
1358
- },
1359
- {
1360
- "type": "text",
1361
- "text": "4.4.4. UNSEEN SPEAKER",
1362
- "text_level": 1,
1363
- "bbox": [
1364
- 496,
1365
- 738,
1366
- 674,
1367
- 752
1368
- ],
1369
- "page_idx": 6
1370
- },
1371
- {
1372
- "type": "text",
1373
- "text": "In this experiment we compare Flowtron and Tacotron 2 GST samples in which we transfer the speaking style of a speaker not seen during training. Both models use speaker embeddings.",
1374
- "bbox": [
1375
- 496,
1376
- 762,
1377
- 885,
1378
- 823
1379
- ],
1380
- "page_idx": 6
1381
- },
1382
- {
1383
- "type": "text",
1384
- "text": "For these experiments, we consider two speakers. The first comes from speaker ID 03 from RAVDESS, a dataset with emotion labels. We focus on the label \"surprised\". The second speaker is Richard Feynman, using a set of 10 audio samples collected from the web.",
1385
- "bbox": [
1386
- 496,
1387
- 829,
1388
- 885,
1389
- 905
1390
- ],
1391
- "page_idx": 6
1392
- },
1393
- {
1394
- "type": "header",
1395
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
1396
- "bbox": [
1397
- 200,
1398
- 56,
1399
- 769,
1400
- 70
1401
- ],
1402
- "page_idx": 6
1403
- },
1404
- {
1405
- "type": "page_footnote",
1406
- "text": "4To remove this speaker's information from $z_{e}$",
1407
- "bbox": [
1408
- 104,
1409
- 888,
1410
- 385,
1411
- 902
1412
- ],
1413
- "page_idx": 6
1414
- },
1415
- {
1416
- "type": "text",
1417
- "text": "For each experiment, we use the Sally speaker and the sentences \"Humans are walking on the street?\" and \"Surely you are joking mister Feynman,\" which do not exist in RAVDESS nor in the audio samples from Richard Feynman.",
1418
- "bbox": [
1419
- 84,
1420
- 84,
1421
- 473,
1422
- 159
1423
- ],
1424
- "page_idx": 7
1425
- },
1426
- {
1427
- "type": "text",
1428
- "text": "The samples generated with Tacotron 2 GST are not able to emulate the surprised style from RAVDESS nor Feynman's prosody and acoustic characteristics. Flowtron, on the other hand, is able to make Sally sound surprised, which is drastically different from the monotonous baseline. Likewise, Flowtron is able to pick up on the prosody and articulation details particular to Feynman's speaking style, and transfer them to Sally.",
1429
- "bbox": [
1430
- 84,
1431
- 167,
1432
- 473,
1433
- 287
1434
- ],
1435
- "page_idx": 7
1436
- },
1437
- {
1438
- "type": "text",
1439
- "text": "4.5. Sampling the Gaussian Mixture",
1440
- "text_level": 1,
1441
- "bbox": [
1442
- 84,
1443
- 305,
1444
- 339,
1445
- 320
1446
- ],
1447
- "page_idx": 7
1448
- },
1449
- {
1450
- "type": "text",
1451
- "text": "In this last section we showcase visualizations and samples from Flowtron Gaussian Mixture (GM). First we investigate how different mixture components and speakers are correlated. Then we provide sound examples in which we modulate speech characteristics by translating one of the the dimensions of an individual component.",
1452
- "bbox": [
1453
- 84,
1454
- 329,
1455
- 473,
1456
- 419
1457
- ],
1458
- "page_idx": 7
1459
- },
1460
- {
1461
- "type": "text",
1462
- "text": "4.5.1. VISUALIZING ASSIGNMENTS",
1463
- "text_level": 1,
1464
- "bbox": [
1465
- 84,
1466
- 434,
1467
- 333,
1468
- 448
1469
- ],
1470
- "page_idx": 7
1471
- },
1472
- {
1473
- "type": "text",
1474
- "text": "For the first experiment, we train a Flowtrom Gaussian Mixture on LSH with 2 steps of flow, speaker embeddings and fixed mean and covariance (Flowtron GM-A). We obtain mixture component assignments per mel-spectrogram by performing a forward pass and averaging the component assignment over time and samples. Figure 6 shows that whereas most speakers are equally assigned to all components, component 7 is almost exclusively assigned to Helen's data.",
1475
- "bbox": [
1476
- 84,
1477
- 457,
1478
- 475,
1479
- 592
1480
- ],
1481
- "page_idx": 7
1482
- },
1483
- {
1484
- "type": "image",
1485
- "img_path": "images/95dc28151d1802e27b0464f8d320b4f457eeeaefe0f77711babf55383ef0537b.jpg",
1486
- "image_caption": [
1487
- "Figure 6: Component assignments for Flowtron GM-A. Unlike LJS and Sally, Helen is almost exclusively assigned to component 7."
1488
- ],
1489
- "image_footnote": [],
1490
- "bbox": [
1491
- 89,
1492
- 608,
1493
- 470,
1494
- 695
1495
- ],
1496
- "page_idx": 7
1497
- },
1498
- {
1499
- "type": "text",
1500
- "text": "In the second experiment, we train a Flowtron Gaussian Mixture on LibriTTS with 1 step of flow, without speaker embeddings and predicted mean and covariance (Flowtron GM-B). Figure 7 shows that Flowtron GM assigns more probability to component 7 when the speaker is male than when it's female. Conversely, the model assigns more probability to component 6 when the speaker is female than when it's male.",
1501
- "bbox": [
1502
- 84,
1503
- 785,
1504
- 475,
1505
- 905
1506
- ],
1507
- "page_idx": 7
1508
- },
1509
- {
1510
- "type": "image",
1511
- "img_path": "images/3679a60d11546426354812b28643ceab8d77f27c5b05a6af502cd335db17ba09.jpg",
1512
- "image_caption": [
1513
- "Figure 7: Component assignments for Flowtron GM-B. Components 7 and 8 are assigned different probabilities according to gender, suggesting that the information stored in the components is gender dependent."
1514
- ],
1515
- "image_footnote": [],
1516
- "bbox": [
1517
- 500,
1518
- 82,
1519
- 883,
1520
- 170
1521
- ],
1522
- "page_idx": 7
1523
- },
1524
- {
1525
- "type": "text",
1526
- "text": "4.5.2. TRANSLATING DIMENSIONS",
1527
- "text_level": 1,
1528
- "bbox": [
1529
- 496,
1530
- 277,
1531
- 741,
1532
- 292
1533
- ],
1534
- "page_idx": 7
1535
- },
1536
- {
1537
- "type": "text",
1538
- "text": "In this subsection, we use the model Flowtron GM-A described previously. We focus on selecting a single mixture component and translating one of its dimensions by adding an offset.",
1539
- "bbox": [
1540
- 496,
1541
- 301,
1542
- 885,
1543
- 359
1544
- ],
1545
- "page_idx": 7
1546
- },
1547
- {
1548
- "type": "text",
1549
- "text": "The samples in our supplementary material show that we are able to modulate specific speech characteristics like pitch and word duration. Although the samples generated by translating one the dimensions associated with pitch height have different pitch contours, they have the same duration. Similarly, our samples show that translating the dimension associated with length of the first word does not modulate the pitch of the first word. This provides evidence that we can modulate these attributes by manipulating these dimensions and that the model is able to learn a disentangled representation of these speech attributes.",
1550
- "bbox": [
1551
- 495,
1552
- 369,
1553
- 885,
1554
- 535
1555
- ],
1556
- "page_idx": 7
1557
- },
1558
- {
1559
- "type": "text",
1560
- "text": "5. Discussion",
1561
- "text_level": 1,
1562
- "bbox": [
1563
- 496,
1564
- 555,
1565
- 609,
1566
- 570
1567
- ],
1568
- "page_idx": 7
1569
- },
1570
- {
1571
- "type": "text",
1572
- "text": "In this paper we propose a new text to mel-spectrogram synthesis model based on autoregressive flows that is optimized by maximizing the likelihood and allows for control of speech variation and style transfer. Our results show that samples generated with FlowTron achieve mean opinion scores that are similar to samples generated with state-of-the-art text-to-speech synthesis models. In addition, we demonstrate that at no extra cost and without a compound loss term, our model learns a latent space that stores nontextual information. Our experiments show that FlowTron gives the user the possibility to transfer characteristics from a source sample or speaker to a target speaker, for example making a monotonic speaker sound more expressive.",
1573
- "bbox": [
1574
- 495,
1575
- 580,
1576
- 885,
1577
- 777
1578
- ],
1579
- "page_idx": 7
1580
- },
1581
- {
1582
- "type": "text",
1583
- "text": "Our results show that despite all the variability added by increasing $\\sigma^2$ , the samples synthesized with FlowTron still produce high quality speech. Our results show that FlowTron learns a latent space over non-textual features that can be investigated and manipulated to give the user more control over the generative models output. We provide many examples that showcase this including increasing variation in mel-spectrograms in a controllable manner, transferring",
1584
- "bbox": [
1585
- 495,
1586
- 784,
1587
- 885,
1588
- 905
1589
- ],
1590
- "page_idx": 7
1591
- },
1592
- {
1593
- "type": "header",
1594
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
1595
- "bbox": [
1596
- 200,
1597
- 56,
1598
- 767,
1599
- 70
1600
- ],
1601
- "page_idx": 7
1602
- },
1603
- {
1604
- "type": "text",
1605
- "text": "the style from speakers seen and unseen during training to another speaker using sentences with similar or different text, and making a monotonic speaker sound more expressive.",
1606
- "bbox": [
1607
- 88,
1608
- 85,
1609
- 473,
1610
- 143
1611
- ],
1612
- "page_idx": 8
1613
- },
1614
- {
1615
- "type": "text",
1616
- "text": "Flowtron produces expressive speech without labeled data or ever seeing expressive data. It pushes text-to-speech synthesis beyond the expressive limits of personal assistants. It opens new avenues for speech synthesis in human-computer interaction and the arts, where realism and expressivity are of utmost importance. To our knowledge, this work is the first to demonstrate the advantages of using normalizing flow models in text to mel-spectrogram synthesis.",
1617
- "bbox": [
1618
- 88,
1619
- 152,
1620
- 473,
1621
- 273
1622
- ],
1623
- "page_idx": 8
1624
- },
1625
- {
1626
- "type": "text",
1627
- "text": "References",
1628
- "text_level": 1,
1629
- "bbox": [
1630
- 501,
1631
- 84,
1632
- 591,
1633
- 98
1634
- ],
1635
- "page_idx": 8
1636
- },
1637
- {
1638
- "type": "list",
1639
- "sub_type": "ref_text",
1640
- "list_items": [
1641
- "Akuzawa, K., Iwasawa, Y., and Matsuo, Y. Expressive speech synthesis via modeling expressions with variational autoencoder. arXiv preprint arXiv:1804.02135, 2018.",
1642
- "Arik, S., Diamos, G., Gibiansky, A., Miller, J., Peng, K., Ping, W., Raiman, J., and Zhou, Y. Deep voice 2: Multi-speaker neural text-to-speech. arXiv preprint arXiv:1705.08947, 2017a.",
1643
- "Arik, S. O., Chrzanowski, M., Coates, A., Diamos, G., Gibiansky, A., Kang, Y., Li, X., Miller, J., Ng, A., Raiman, J., et al. Deep voice: Real-time neural text-to-speech. arXiv preprint arXiv:1702.07825, 2017b.",
1644
- "Badham, J., Lasker, L., Parkes, W. F., Rubinstein, A. B., Broderick, M., Coleman, D., and Wood, J. Wargames, 1983.",
1645
- "Binkowski, M., Donahue, J., Dieleman, S., Clark, A., Elsen, E., Casagrande, N., Cobo, L. C., and Simonyan, K. High fidelity speech synthesis with adversarial networks. arXiv preprint arXiv:1909.11646, 2019.",
1646
- "De Cheveigné, A. and Kawahara, H. Yin, a fundamental frequency estimator for speech and music. The Journal of the Acoustical Society of America, 111(4):1917-1930, 2002.",
1647
- "Dinh, L., Krueger, D., and Bengio, Y. Nice: Non-linear independent components estimation. arXiv preprint arXiv:1410.8516, 2014.",
1648
- "Dinh, L., Sohl-Dickstein, J., and Bengio, S. Density estimation using real nvp. arXiv preprint arXiv:1605.08803, 2016.",
1649
- "Gambardella, A., Baydin, A. G., and Torr, P. H. Transflow learning: Repurposing flow models without retraining. arXiv preprint arXiv:1911.13270, 2019.",
1650
- "Gibiansky, A., Arik, S., Diamos, G., Miller, J., Peng, K., Ping, W., Raiman, J., and Zhou, Y. Deep voice 2: Multi-speaker neural text-to-speech. In Advances in neural information processing systems, pp. 2962-2970, 2017.",
1651
- "Hsu, W.-N., Zhang, Y., Weiss, R. J., Zen, H., Wu, Y., Wang, Y., Cao, Y., Jia, Y., Chen, Z., Shen, J., et al. Hierarchical generative modeling for controllable speech synthesis. arXiv preprint arXiv:1810.07217, 2018.",
1652
- "Ito, K. et al. The LJ speech dataset, 2017.",
1653
- "Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014."
1654
- ],
1655
- "bbox": [
1656
- 500,
1657
- 108,
1658
- 885,
1659
- 905
1660
- ],
1661
- "page_idx": 8
1662
- },
1663
- {
1664
- "type": "header",
1665
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
1666
- "bbox": [
1667
- 200,
1668
- 56,
1669
- 767,
1670
- 70
1671
- ],
1672
- "page_idx": 8
1673
- },
1674
- {
1675
- "type": "list",
1676
- "sub_type": "ref_text",
1677
- "list_items": [
1678
- "Kingma, D. P. and Dhariwal, P. Glow: Generative flow with invertible 1x1 convolutions. arXiv preprint arXiv:1807.03039, 2018.",
1679
- "Kingma, D. P., Salimans, T., Jozefowicz, R., Chen, X., Sutskever, I., and Welling, M. Improved variational inference with inverse autoregressive flow. In Advances in Neural Information Processing Systems, pp. 4743-4751, 2016.",
1680
- "Lee, J., Choi, H.-S., Jeon, C.-B., Koo, J., and Lee, K. Adversarially trained end-to-end korean singing voice synthesis system. arXiv preprint arXiv:1908.01919, 2019.",
1681
- "Nishimura, M., Hashimoto, K., Oura, K., Nankaku, Y., and Tokuda, K. Singing voice synthesis based on deep neural networks. In Interspeech 2016, pp. 2478-2482, 2016. doi: 10.21437/Interspeech.2016-1027. URL http://dx.doi.org/10.21437/Interspeech.2016-1027.",
1682
- "Parmar, N., Vaswani, A., Uszkoreit, J., Kaiser, L., Shazeer, N., Ku, A., and Tran, D. Image transformer. arXiv preprint arXiv:1802.05751, 2018.",
1683
- "Ping, W., Peng, K., Gibiansky, A., Arik, S. O., Kannan, A., Narang, S., Raiman, J., and Miller, J. Deep voice 3: 2000-speaker neural text-to-speech. arXiv preprint arXiv:1710.07654, 2017.",
1684
- "Prenger, R., Valle, R., and Catanzaro, B. Waveglow: A flow-based generative network for speech synthesis. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3617-3621. IEEE, 2019.",
1685
- "Radford, A., Metz, L., and Chintala, S. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015.",
1686
- "Shen, J., Pang, R., Weiss, R. J., Schuster, M., Jaitly, N., Yang, Z., Chen, Z., Zhang, Y., Wang, Y., Skerry-Ryan, R., et al. Natural tts synthesis by conditioning wavenet on mel spectrogram predictions. arXiv preprint arXiv:1712.05884, 2017.",
1687
- "Skerry-Ryan, R., Battenberg, E., Xiao, Y., Wang, Y., Stanton, D., Shor, J., Weiss, R. J., Clark, R., and Saurous, R. A. Towards end-to-end prosody transfer for expressive speech synthesis with tacotron. arXiv preprint arXiv:1803.09047, 2018.",
1688
- "Umeda, N., Matsui, E., Suzuki, T., and Omura, H. Synthesis of fairy tales using an analog vocal tract. In Proceedings of 6th International Congress on Acoustics, pp. B159-162, 1968."
1689
- ],
1690
- "bbox": [
1691
- 86,
1692
- 84,
1693
- 475,
1694
- 904
1695
- ],
1696
- "page_idx": 9
1697
- },
1698
- {
1699
- "type": "list",
1700
- "sub_type": "ref_text",
1701
- "list_items": [
1702
- "Valle, R., Li, J., Prenger, R., and Catanzaro, B. Mellotron github repo, 2019a. URL https://github.com/NVIDIA/mellotron.",
1703
- "Valle, R., Li, J., Prenger, R., and Catanzaro, B. Mellotron: Multispeaker expressive voice synthesis by conditioning on rhythm, pitch and global style tokens. arXiv preprint arXiv:1910.11997, 2019b.",
1704
- "Vinyals, O., Kaiser, L., Koo, T., Petrov, S., Sutskever, I., and Hinton, G. Grammar as a foreign language. In Advances in neural information processing systems, pp. 2773-2781, 2015.",
1705
- "Wang, Y., Skerry-Ryan, R., Stanton, D., Wu, Y., Weiss, R. J., Jaitly, N., Yang, Z., Xiao, Y., Chen, Z., Bengio, S., et al. Tacotron: A fully end-to-end text-to-speech synthesis model. arXiv preprint arXiv:1703.10135, 2017.",
1706
- "Wang, Y., Stanton, D., Zhang, Y., Skerry-Ryan, R., Battenberg, E., Shor, J., Xiao, Y., Ren, F., Jia, Y., and Saurous, R. A. Style tokens: Unsupervised style modeling, control and transfer in end-to-end speech synthesis. arXiv preprint arXiv:1803.09017, 2018.",
1707
- "Weide, R. L. The cmu pronouncing dictionary. URL: http://wwwspeech.cs.cmu.edu/cgi-bin/cmudict, 1998.",
1708
- "Zen, H., Dang, V., Clark, R., Zhang, Y., Weiss, R. J., Jia, Y., Chen, Z., and Wu, Y. Libritts: A corpus derived from librispeech for text-to-speech. arXiv preprint arXiv:1904.02882, 2019."
1709
- ],
1710
- "bbox": [
1711
- 500,
1712
- 84,
1713
- 885,
1714
- 536
1715
- ],
1716
- "page_idx": 9
1717
- },
1718
- {
1719
- "type": "header",
1720
- "text": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis",
1721
- "bbox": [
1722
- 200,
1723
- 56,
1724
- 769,
1725
- 71
1726
- ],
1727
- "page_idx": 9
1728
- }
1729
- ]
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8f412f209288116dc7168798b8e71eb0ed6f4b712acbd6c4ee2a3ded6f669f4
3
+ size 71773
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_05xxx/2005.05957/3bb5c045-9ee6-4415-9cd3-ec7fa94a2a26_model.json CHANGED
@@ -1,2145 +1,3 @@
1
- [
2
- [
3
- {
4
- "type": "aside_text",
5
- "bbox": [
6
- 0.023,
7
- 0.275,
8
- 0.058,
9
- 0.708
10
- ],
11
- "angle": 270,
12
- "content": "arXiv:2005.05957v3 [cs.SD] 16 Jul 2020"
13
- },
14
- {
15
- "type": "title",
16
- "bbox": [
17
- 0.161,
18
- 0.11,
19
- 0.815,
20
- 0.157
21
- ],
22
- "angle": 0,
23
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
24
- },
25
- {
26
- "type": "text",
27
- "bbox": [
28
- 0.261,
29
- 0.199,
30
- 0.71,
31
- 0.216
32
- ],
33
- "angle": 0,
34
- "content": "Rafael Valle<sup>1</sup> Kevin Shih<sup>1</sup> Ryan Prenger<sup>1</sup> Bryan Catanzaro<sup>1</sup>"
35
- },
36
- {
37
- "type": "title",
38
- "bbox": [
39
- 0.243,
40
- 0.243,
41
- 0.32,
42
- 0.259
43
- ],
44
- "angle": 0,
45
- "content": "Abstract"
46
- },
47
- {
48
- "type": "text",
49
- "bbox": [
50
- 0.118,
51
- 0.267,
52
- 0.445,
53
- 0.584
54
- ],
55
- "angle": 0,
56
- "content": "In this paper we propose Flowtron: an autoregressive flow-based generative network for text-to-speech synthesis with control over speech variation and style transfer. Flowtron borrows insights from IAF and revamps Tacotron in order to provide high-quality and expressive melspectrogram synthesis. Flowtron is optimized by maximizing the likelihood of the training data, which makes training simple and stable. Flowtron learns an invertible mapping of data to a latent space that can be manipulated to control many aspects of speech synthesis (pitch, tone, speech rate, cadence, accent). Our mean opinion scores (MOS) show that Flowtron matches state-of-the-art TTS models in terms of speech quality. In addition, we provide results on control of speech variation, interpolation between samples and style transfer between speakers seen and unseen during training. Code and pretrained models will be made publicly available at https://github.com/NVIDIA/flowtron."
57
- },
58
- {
59
- "type": "title",
60
- "bbox": [
61
- 0.087,
62
- 0.615,
63
- 0.218,
64
- 0.631
65
- ],
66
- "angle": 0,
67
- "content": "1. Introduction"
68
- },
69
- {
70
- "type": "text",
71
- "bbox": [
72
- 0.085,
73
- 0.64,
74
- 0.477,
75
- 0.822
76
- ],
77
- "angle": 0,
78
- "content": "Current speech synthesis methods do not give the user enough control over how speech actually sounds. Automatically converting text to audio that successfully communicates the text was achieved a long time ago (Umeda et al., 1968; Badham et al., 1983). However, communicating only the text information leaves out all of the acoustic properties of the voice that convey much of the meaning and human expressiveness. Nearly all the research into speech synthesis since the 1960s has focused on adding that non-textual information to synthesized speech. But in spite of this, the typical speech synthesis problem is formulated as a text to speech problem in which the user inputs only text."
79
- },
80
- {
81
- "type": "text",
82
- "bbox": [
83
- 0.086,
84
- 0.829,
85
- 0.475,
86
- 0.845
87
- ],
88
- "angle": 0,
89
- "content": "Taming the non-textual information in speech is difficult"
90
- },
91
- {
92
- "type": "text",
93
- "bbox": [
94
- 0.496,
95
- 0.244,
96
- 0.888,
97
- 0.427
98
- ],
99
- "angle": 0,
100
- "content": "because the non-textual is unlabeled. A voice actor may speak the same text with different emphasis or emotion based on context, but it is unclear how to label a particular reading. Without labels for the non-textual information, models have fallen back to unsupervised learning. Recent models have achieved nearly human-level quality, despite treating the non-textual information as a black box. The model's only goal is to match the patterns in the training data (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017). Despite these models' excellent ability to recreate the non-textual information in the training set, the user has no insight into or control over the non-textual information."
101
- },
102
- {
103
- "type": "text",
104
- "bbox": [
105
- 0.496,
106
- 0.433,
107
- 0.889,
108
- 0.617
109
- ],
110
- "angle": 0,
111
- "content": "It is possible to formulate an unsupervised learning problem in such a way that the user can gain insights into the structure of a data set. One way is to formulate the problem such that the data is assumed to have a representation in some latent space, and have the model learn that representation. This latent space can then be investigated and manipulated to give the user more control over the generative model's output. Such approaches have been popular in image generation for some time now, allowing users to interpolate smoothly between images and to identify portions of the latent space that correlate with various features (Radford et al., 2015; Kingma & Dhariwal, 2018)."
112
- },
113
- {
114
- "type": "text",
115
- "bbox": [
116
- 0.496,
117
- 0.622,
118
- 0.889,
119
- 0.895
120
- ],
121
- "angle": 0,
122
- "content": "In audio, however, approaches have focused on embeddings that remove a large amount of information and are obtained from assumptions about what is interesting. Recent approaches that utilize deep learning for expressive speech synthesis combine text and a learned latent embedding for prosody or global style (Wang et al., 2018; Skerry-Ryan et al., 2018). A variation of this approach is proposed by (Hsu et al., 2018), wherein a Gaussian mixture model (GMM) encoding the audio is added to Tacotron to learn a latent embedding. These approaches control the nontextual information by learning a bank of embeddings or by providing the target output as an input to the model and compressing it. However, these approaches require making assumptions about the dimensionality of the embeddings before hand and are not guaranteed to contain all the nontextual information it takes to reconstruct speech, including the risk of having dummy dimensions or not enough capacity, as the appendix sections in (Wang et al., 2018;"
123
- },
124
- {
125
- "type": "page_footnote",
126
- "bbox": [
127
- 0.086,
128
- 0.853,
129
- 0.478,
130
- 0.881
131
- ],
132
- "angle": 0,
133
- "content": "\\(^{1}\\)NVIDIA Applied Deep Learning Research (ADLR). Correspondence to: Rafael Valle <rafaelvalle@nvidia.com>."
134
- }
135
- ],
136
- [
137
- {
138
- "type": "header",
139
- "bbox": [
140
- 0.202,
141
- 0.058,
142
- 0.769,
143
- 0.071
144
- ],
145
- "angle": 0,
146
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
147
- },
148
- {
149
- "type": "text",
150
- "bbox": [
151
- 0.089,
152
- 0.086,
153
- 0.473,
154
- 0.221
155
- ],
156
- "angle": 0,
157
- "content": "Skerry-Ryan et al., 2018; Hsu et al., 2018) confirm. They also require finding an encoder and embedding that prevents the model from simply learning a complex identity function that ignores other inputs. Furthermore, these approaches focus on fixed-length embeddings under the assumption that variable-length embeddings are not robust to text and speaker perturbations. Finally, most of these approaches do not give the user control over the degree of variability in the synthesized speech."
158
- },
159
- {
160
- "type": "text",
161
- "bbox": [
162
- 0.089,
163
- 0.229,
164
- 0.475,
165
- 0.424
166
- ],
167
- "angle": 0,
168
- "content": "In this paper we propose Flowtron: an autoregressive flow-based generative network for mel-spectrogram synthesis with control over acoustics and speech. Flowtron learns an invertible function that maps a distribution over mel-spectrograms to a latent \\( z \\) space parameterized by a spherical Gaussian. With this formalization, we can generate samples containing specific speech characteristics manifested in mel-space by finding and sampling the corresponding region in \\( z \\)-space. In the basic approach, we generate samples by sampling a zero mean spherical Gaussian prior and control the amount of variation by adjusting its variance. Despite its simplicity, this approach offers more speech variation and control than Tacotron."
169
- },
170
- {
171
- "type": "text",
172
- "bbox": [
173
- 0.089,
174
- 0.433,
175
- 0.475,
176
- 0.642
177
- ],
178
- "angle": 0,
179
- "content": "In Flowtron, we can access specific regions of mel-spectrogram space by sampling a posterior distribution conditioned on prior evidence from existing samples (Kingma & Dhariwal, 2018; Gambardella et al., 2019). This approach allows us to make a monotonous speaker more expressive by computing the region in \\(z\\)-space associated with expressive speech as it is manifested in the prior evidence. Finally, our formulation also allows us to impose a structure to the \\(z\\)-space and parametrize it with a Gaussian mixture, for example. In this approach related to (Hsu et al., 2018), speech characteristics in mel-spectrogram space can be associated with individual components. Hence, it is possible to generate samples with specific speech characteristics by selecting a component or a mixture thereof<sup>1</sup>."
180
- },
181
- {
182
- "type": "text",
183
- "bbox": [
184
- 0.089,
185
- 0.651,
186
- 0.473,
187
- 0.727
188
- ],
189
- "angle": 0,
190
- "content": "Although VAEs and GANs (Hsu et al., 2018; Binkowski et al., 2019; Akuzawa et al., 2018) based models also provide a latent prior that can be easily manipulated, in Flowtron this comes at no cost in speech quality nor optimization challenges."
191
- },
192
- {
193
- "type": "text",
194
- "bbox": [
195
- 0.089,
196
- 0.734,
197
- 0.473,
198
- 0.839
199
- ],
200
- "angle": 0,
201
- "content": "We find that Flowtron is able to generalize and produce sharp mel-spectrograms by simply maximizing the likelihood of the data while not requiring any additional Prenet or Postnet layer (Wang et al., 2017), nor compound loss functions required by most state of the art models like (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017; Skerry-Ryan et al., 2018; Wang et al., 2018; Binkowski et al., 2019)."
202
- },
203
- {
204
- "type": "text",
205
- "bbox": [
206
- 0.089,
207
- 0.848,
208
- 0.473,
209
- 0.877
210
- ],
211
- "angle": 0,
212
- "content": "Flowtron is optimized by maximizing the likelihood of the training data, which makes training simple and stable. It"
213
- },
214
- {
215
- "type": "text",
216
- "bbox": [
217
- 0.502,
218
- 0.086,
219
- 0.885,
220
- 0.251
221
- ],
222
- "angle": 0,
223
- "content": "learns an invertible mapping of the a latent space that can be manipulated to control many aspects of speech synthesis. Our mean opinion scores (MOS) show that Flowtron matches state-of-the-art TTS models in terms of speech quality. In addition, we provide results on control of speech variation, interpolation between samples, and style transfer between seen and unseen speakers with similar and different sentences. To our knowledge, this work is the first to show evidence that normalizing flow models can also be used for text-to-speech synthesis. We hope this will further stimulate developments in normalizing flows."
224
- },
225
- {
226
- "type": "title",
227
- "bbox": [
228
- 0.502,
229
- 0.271,
230
- 0.638,
231
- 0.287
232
- ],
233
- "angle": 0,
234
- "content": "2. Related Work"
235
- },
236
- {
237
- "type": "text",
238
- "bbox": [
239
- 0.502,
240
- 0.297,
241
- 0.885,
242
- 0.462
243
- ],
244
- "angle": 0,
245
- "content": "Earlier approaches to text-to-speech synthesis that achieve human like results focus on synthesizing acoustic features from text, treating the non-textual information as a black box. (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017). Approaches like (Wang et al., 2017; Shen et al., 2017) require adding a critical Prenet layer to help with convergence and improve generalization (Wang et al., 2017) Furthermore, such models require an additional Postnet residual layer and modified loss to produce \"better resolved harmonics and high frequency formant structures, which reduces synthesis artifacts.\""
246
- },
247
- {
248
- "type": "text",
249
- "bbox": [
250
- 0.502,
251
- 0.47,
252
- 0.885,
253
- 0.53
254
- ],
255
- "angle": 0,
256
- "content": "One approach to dealing with this lack of labels for underlying non-textual information is to look for hand engineered statistics based on the audio that we believe are correlated with this underlying information."
257
- },
258
- {
259
- "type": "text",
260
- "bbox": [
261
- 0.502,
262
- 0.539,
263
- 0.885,
264
- 0.643
265
- ],
266
- "angle": 0,
267
- "content": "This is the approach taken by models like (Nishimura et al., 2016; Lee et al., 2019), wherein utterances are conditioned on audio statistics that can be calculated directly from the training data such as \\( F_{0} \\) (fundamental frequency). However, in order to use such models, the statistics we hope to approximate must be decided upon a-priori, and the target value of these statistics must be determined before synthesis."
268
- },
269
- {
270
- "type": "text",
271
- "bbox": [
272
- 0.502,
273
- 0.651,
274
- 0.885,
275
- 0.787
276
- ],
277
- "angle": 0,
278
- "content": "Another approach to dealing with the issue of unlabeled non-textual information is to learn a latent embedding for prosody or global style. This is the approach taken by models like (Skerry-Ryan et al., 2018; Wang et al., 2018), wherein in a bank of embeddings or a latent embedding space of prosody is learned from unlabelled data. While these approaches have shown promise, manipulating such latent variables only offers a coarse control over expressive characteristics of speech."
279
- },
280
- {
281
- "type": "text",
282
- "bbox": [
283
- 0.502,
284
- 0.795,
285
- 0.885,
286
- 0.9
287
- ],
288
- "angle": 0,
289
- "content": "A mixed approach consists of combining engineered statistics with latent embeddings learned in an unsupervised fashion. This is the approach taken by models like Mellotron (Valle et al., 2019b). In Mellotron, utterances are conditioned on both audio statistics and a latent embedding of acoustic features derived from a reference acoustic representation. Despite its advantages, this approach still requires"
290
- },
291
- {
292
- "type": "page_footnote",
293
- "bbox": [
294
- 0.11,
295
- 0.888,
296
- 0.438,
297
- 0.902
298
- ],
299
- "angle": 0,
300
- "content": "What is relevant statistically might not be perceptually."
301
- }
302
- ],
303
- [
304
- {
305
- "type": "header",
306
- "bbox": [
307
- 0.201,
308
- 0.057,
309
- 0.771,
310
- 0.071
311
- ],
312
- "angle": 0,
313
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
314
- },
315
- {
316
- "type": "text",
317
- "bbox": [
318
- 0.086,
319
- 0.086,
320
- 0.38,
321
- 0.1
322
- ],
323
- "angle": 0,
324
- "content": "determining these statistics before synthesis."
325
- },
326
- {
327
- "type": "title",
328
- "bbox": [
329
- 0.087,
330
- 0.12,
331
- 0.188,
332
- 0.135
333
- ],
334
- "angle": 0,
335
- "content": "3. Flowtron"
336
- },
337
- {
338
- "type": "text",
339
- "bbox": [
340
- 0.085,
341
- 0.146,
342
- 0.477,
343
- 0.297
344
- ],
345
- "angle": 0,
346
- "content": "Flowtron is an autoregressive generative model that generates a sequence of mel spectrogram frames \\( p(x) \\) by producing each mel-spectrogram frame based on previous mel-spectrogram frames \\( p(x) = \\prod p(x_{t}|x_{1:t - 1}) \\). Our setup uses a neural network as a generative model by sampling from a simple distribution \\( p(z) \\). We consider two simple distributions with the same number of dimensions as our desired mel-spectrogram: a zero-mean spherical Gaussian and a mixture of spherical Gaussians with fixed or learnable parameters."
347
- },
348
- {
349
- "type": "equation",
350
- "bbox": [
351
- 0.226,
352
- 0.33,
353
- 0.474,
354
- 0.348
355
- ],
356
- "angle": 0,
357
- "content": "\\[\n\\boldsymbol {z} \\sim \\mathcal {N} (\\boldsymbol {z}; 0, \\boldsymbol {I}) \\tag {1}\n\\]"
358
- },
359
- {
360
- "type": "equation",
361
- "bbox": [
362
- 0.192,
363
- 0.351,
364
- 0.475,
365
- 0.381
366
- ],
367
- "angle": 0,
368
- "content": "\\[\n\\boldsymbol {z} \\sim \\sum_ {k} \\hat {\\phi} _ {k} \\mathcal {N} (\\boldsymbol {z}; \\boldsymbol {\\mu} _ {k}, \\boldsymbol {\\Sigma} _ {k}) \\tag {2}\n\\]"
369
- },
370
- {
371
- "type": "text",
372
- "bbox": [
373
- 0.085,
374
- 0.399,
375
- 0.477,
376
- 0.445
377
- ],
378
- "angle": 0,
379
- "content": "These samples are put through a series of invertible, parametrized transformations \\( \\pmb{f} \\), in our case affine transformations that transform \\( p(\\pmb{z}) \\) into \\( p(x) \\)."
380
- },
381
- {
382
- "type": "equation",
383
- "bbox": [
384
- 0.196,
385
- 0.455,
386
- 0.475,
387
- 0.473
388
- ],
389
- "angle": 0,
390
- "content": "\\[\n\\boldsymbol {x} = \\boldsymbol {f} _ {0} \\circ \\boldsymbol {f} _ {1} \\circ \\dots \\boldsymbol {f} _ {k} (z) \\tag {3}\n\\]"
391
- },
392
- {
393
- "type": "text",
394
- "bbox": [
395
- 0.085,
396
- 0.489,
397
- 0.477,
398
- 0.535
399
- ],
400
- "angle": 0,
401
- "content": "As it is illustrated in (Kingma et al., 2016), in autoregressive normalizing flows the \\(t\\)-th variable \\(\\boldsymbol{z}_t^\\prime\\) only depends on previous timesteps \\(\\boldsymbol{z}_{1:t - 1}\\):"
402
- },
403
- {
404
- "type": "equation",
405
- "bbox": [
406
- 0.221,
407
- 0.545,
408
- 0.475,
409
- 0.563
410
- ],
411
- "angle": 0,
412
- "content": "\\[\n\\boldsymbol {z} _ {t} ^ {\\prime} = \\boldsymbol {f} _ {k} \\left(\\boldsymbol {z} _ {1: t - 1}\\right) \\tag {4}\n\\]"
413
- },
414
- {
415
- "type": "text",
416
- "bbox": [
417
- 0.085,
418
- 0.58,
419
- 0.475,
420
- 0.671
421
- ],
422
- "angle": 0,
423
- "content": "By using parametrized affine transformations for \\( f \\) and due to the autoregressive structure, the Jacobian determinant of each of the transformations \\( f \\) is lower triangular, hence easy to compute. With this setup we can train Flowtron by maximizing the log-likelihood of the data, which can be done using the change of variables:"
424
- },
425
- {
426
- "type": "equation",
427
- "bbox": [
428
- 0.097,
429
- 0.681,
430
- 0.475,
431
- 0.722
432
- ],
433
- "angle": 0,
434
- "content": "\\[\n\\log p _ {\\theta} (\\boldsymbol {x}) = \\log p _ {\\theta} (\\boldsymbol {z}) + \\sum_ {i = 1} ^ {k} \\log | \\det (\\boldsymbol {J} (\\boldsymbol {f} _ {i} ^ {- 1} (\\boldsymbol {x}))) | \\tag {5}\n\\]"
435
- },
436
- {
437
- "type": "equation",
438
- "bbox": [
439
- 0.178,
440
- 0.724,
441
- 0.475,
442
- 0.744
443
- ],
444
- "angle": 0,
445
- "content": "\\[\n\\boldsymbol {z} = \\boldsymbol {f} _ {k} ^ {- 1} \\circ \\boldsymbol {f} _ {k - 1} ^ {- 1} \\circ \\dots \\boldsymbol {f} _ {0} ^ {- 1} (\\boldsymbol {x}) \\tag {6}\n\\]"
446
- },
447
- {
448
- "type": "text",
449
- "bbox": [
450
- 0.085,
451
- 0.761,
452
- 0.476,
453
- 0.835
454
- ],
455
- "angle": 0,
456
- "content": "For the forward pass through the network, we take the melspectrograms as vectors and process them through several \"steps of flow conditioned on the text and speaker ids. A step of flow here consists of an affine coupling layer, described below."
457
- },
458
- {
459
- "type": "title",
460
- "bbox": [
461
- 0.086,
462
- 0.852,
463
- 0.277,
464
- 0.868
465
- ],
466
- "angle": 0,
467
- "content": "3.1. Affine Coupling Layer"
468
- },
469
- {
470
- "type": "text",
471
- "bbox": [
472
- 0.085,
473
- 0.876,
474
- 0.476,
475
- 0.907
476
- ],
477
- "angle": 0,
478
- "content": "Invertible neural networks are typically constructed using coupling layers (Dinh et al., 2014; 2016; Kingma &"
479
- },
480
- {
481
- "type": "text",
482
- "bbox": [
483
- 0.497,
484
- 0.085,
485
- 0.886,
486
- 0.147
487
- ],
488
- "angle": 0,
489
- "content": "Dhariwal, 2018). In our case, we use an affine coupling layer (Dinh et al., 2016). Every input \\( \\boldsymbol{x}_{t-1} \\) produces scale and bias terms, \\( s \\) and \\( b \\) respectively, that affine-transform the succeeding input \\( \\boldsymbol{x}_t \\):"
490
- },
491
- {
492
- "type": "equation",
493
- "bbox": [
494
- 0.543,
495
- 0.179,
496
- 0.886,
497
- 0.195
498
- ],
499
- "angle": 0,
500
- "content": "\\[\n\\left(\\log \\boldsymbol {s} _ {t}, \\boldsymbol {b} _ {t}\\right) = N N \\left(\\boldsymbol {x} _ {1: t - 1}, \\text {t e x t}, \\text {s p e a k e r}\\right) \\tag {7}\n\\]"
501
- },
502
- {
503
- "type": "equation",
504
- "bbox": [
505
- 0.628,
506
- 0.198,
507
- 0.886,
508
- 0.214
509
- ],
510
- "angle": 0,
511
- "content": "\\[\n\\boldsymbol {x} _ {t} ^ {\\prime} = \\boldsymbol {s} _ {t} \\odot \\boldsymbol {x} _ {t} + \\boldsymbol {b} _ {t} \\tag {8}\n\\]"
512
- },
513
- {
514
- "type": "text",
515
- "bbox": [
516
- 0.497,
517
- 0.232,
518
- 0.887,
519
- 0.415
520
- ],
521
- "angle": 0,
522
- "content": "Here \\( NN() \\) can be any autoregressive causal transformation. This can be achieved by time-wise concatenation of a 0-valued vector to the input provided to \\( NN() \\). The affine coupling layer preserves invertibility for the overall network, even though \\( NN() \\) does not need to be invertible. This follows because the first input of \\( NN() \\) is a constant and due to the autoregressive nature of the model the scaling and translation terms \\( s_t \\) and \\( b_t \\) only depend on \\( x_{1:t-1} \\) and the fixed text and speaker vectors. Accordingly, when inverting the network, we can compute \\( s_t \\) and \\( b_t \\) from the preceding input \\( x_{1:t-1} \\), and then invert \\( x_t' \\) to compute \\( x_t \\), by simply recomputing \\( NN(x_{1:t-1},text,Speaker) \\)."
523
- },
524
- {
525
- "type": "text",
526
- "bbox": [
527
- 0.497,
528
- 0.421,
529
- 0.886,
530
- 0.482
531
- ],
532
- "angle": 0,
533
- "content": "With an affine coupling layer, only the \\( s_t \\) term changes the volume of the mapping and adds a change of variables term to the loss. This term also serves to penalize the model for non-invertible affine mappings."
534
- },
535
- {
536
- "type": "equation",
537
- "bbox": [
538
- 0.566,
539
- 0.491,
540
- 0.886,
541
- 0.511
542
- ],
543
- "angle": 0,
544
- "content": "\\[\n\\log | \\det (\\boldsymbol {J} (\\boldsymbol {f} _ {\\text {c o u p l i n g}} ^ {- 1} (\\boldsymbol {x}))) | = \\log | \\boldsymbol {s} | \\tag {9}\n\\]"
545
- },
546
- {
547
- "type": "text",
548
- "bbox": [
549
- 0.497,
550
- 0.526,
551
- 0.886,
552
- 0.617
553
- ],
554
- "angle": 0,
555
- "content": "With this setup, it is also possible to revert the ordering of the input \\( x \\) without loss of generality. Hence, we choose to revert the order of the input at every even step of flow and to maintain the original order on odd steps of flow. This allows the model to learn dependencies both forward and backwards in time while remaining causal and invertible."
556
- },
557
- {
558
- "type": "title",
559
- "bbox": [
560
- 0.498,
561
- 0.634,
562
- 0.665,
563
- 0.647
564
- ],
565
- "angle": 0,
566
- "content": "3.2. Model architecture"
567
- },
568
- {
569
- "type": "text",
570
- "bbox": [
571
- 0.496,
572
- 0.657,
573
- 0.887,
574
- 0.763
575
- ],
576
- "angle": 0,
577
- "content": "Our text encoder modifies Tacotron's by replacing batchnorm with instance-norm. Our decoder and \\( NN \\) architecture, depicted in Figure 1, removes the essential Prenet and Postnet layers from Tacotron. We use the content-based tanh attention described in (Vinyals et al., 2015). We use the Mel Encoder described in (Hsu et al., 2018) for Flowtron models that predict the parameters of the Gaussian mixture."
578
- },
579
- {
580
- "type": "text",
581
- "bbox": [
582
- 0.496,
583
- 0.77,
584
- 0.887,
585
- 0.906
586
- ],
587
- "angle": 0,
588
- "content": "Unlike (Ping et al., 2017; Gibiansky et al., 2017), where site specific speaker embeddings are used, we use a single speaker embedding that is channel-wise concatenated with the encoder outputs at every token. We use a fixed dummy speaker embedding for models not conditioned on speaker id. Finally, we add a dense layer with a sigmoid output the flow step closest to \\( z \\). This provides the model with a gating mechanism as early as possible during inference to avoid extra computation."
589
- }
590
- ],
591
- [
592
- {
593
- "type": "header",
594
- "bbox": [
595
- 0.202,
596
- 0.058,
597
- 0.769,
598
- 0.071
599
- ],
600
- "angle": 0,
601
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
602
- },
603
- {
604
- "type": "image",
605
- "bbox": [
606
- 0.088,
607
- 0.082,
608
- 0.476,
609
- 0.246
610
- ],
611
- "angle": 0,
612
- "content": null
613
- },
614
- {
615
- "type": "image_caption",
616
- "bbox": [
617
- 0.085,
618
- 0.254,
619
- 0.476,
620
- 0.299
621
- ],
622
- "angle": 0,
623
- "content": "Figure 1: Flowtron network. Text and speaker embeddings are channel-wise concatenated. A 0-valued vector is concatenated with \\( x \\) in the time dimension."
624
- },
625
- {
626
- "type": "title",
627
- "bbox": [
628
- 0.086,
629
- 0.331,
630
- 0.187,
631
- 0.345
632
- ],
633
- "angle": 0,
634
- "content": "3.3. Inference"
635
- },
636
- {
637
- "type": "text",
638
- "bbox": [
639
- 0.085,
640
- 0.355,
641
- 0.476,
642
- 0.611
643
- ],
644
- "angle": 0,
645
- "content": "Once the network is trained, doing inference is simply a matter of randomly sampling \\( z \\) values from a spherical Gaussian, or Gaussian Mixture, and running them through the network, reverting the order of the input when necessary. During training we used \\( \\sigma^2 = 1 \\). The parameters of the Gaussian mixture are either fixed or predicted by Flowtron. In section 4.3 we explore the effects of different values for \\( \\sigma^2 \\). In general, we found that sampling \\( z \\) values from a Gaussian with a lower standard deviation from that assumed during training resulted in mel-spectrograms that sounded better, as found in (Kingma & Dhariwal, 2018), and earlier work on likelihood-based generative models (Parmar et al., 2018). During inference we sampled \\( z \\) values from a Gaussian with \\( \\sigma^2 = 0.5 \\), unless otherwise specified. The text and speaker embeddings are included at each of the coupling layers as before, but now the affine transforms are inverted in time, and these inverses are also guaranteed by the loss."
646
- },
647
- {
648
- "type": "title",
649
- "bbox": [
650
- 0.086,
651
- 0.631,
652
- 0.218,
653
- 0.649
654
- ],
655
- "angle": 0,
656
- "content": "4. Experiments"
657
- },
658
- {
659
- "type": "text",
660
- "bbox": [
661
- 0.085,
662
- 0.657,
663
- 0.476,
664
- 0.809
665
- ],
666
- "angle": 0,
667
- "content": "This section describes our training setup and provides quantitative and qualitative results. Our quantitative results show that Flowtron has mean opinion scores (MOS) that are comparable to that of state of the art models for text to mel-spectrogram synthesis such as Tacotron 2. Our qualitative results display many features that are not possible or not efficient with Tacotron and Tacotron 2 GST. These features include control of the amount of variation in speech, interpolation between samples and style transfer between seen and unseen speakers during training."
668
- },
669
- {
670
- "type": "text",
671
- "bbox": [
672
- 0.085,
673
- 0.815,
674
- 0.475,
675
- 0.906
676
- ],
677
- "angle": 0,
678
- "content": "We decode all mel-spectrograms into waveforms by using a single pre-trained WaveGlow (Prenger et al., 2019) model trained on a single speaker and available on github (Valle et al., 2019a). During inference we used \\(\\sigma^2 = 0.7\\). In consonance with (Valle et al., 2019b), our results suggest that WaveGlow can be used as an universal decoder."
679
- },
680
- {
681
- "type": "text",
682
- "bbox": [
683
- 0.497,
684
- 0.085,
685
- 0.886,
686
- 0.131
687
- ],
688
- "angle": 0,
689
- "content": "Although we provide images to illustrate our results, they can best be appreciated by listening. Hence, we ask the readers to visit our website \\( {}^{2} \\) to listen to Flowtron samples."
690
- },
691
- {
692
- "type": "title",
693
- "bbox": [
694
- 0.497,
695
- 0.147,
696
- 0.634,
697
- 0.163
698
- ],
699
- "angle": 0,
700
- "content": "4.1. Training setup"
701
- },
702
- {
703
- "type": "text",
704
- "bbox": [
705
- 0.496,
706
- 0.17,
707
- 0.887,
708
- 0.352
709
- ],
710
- "angle": 0,
711
- "content": "We train our Flowtron, Tacotron 2 and Tacotron 2 GST models using a dataset that combines the LJSpeech (LJS) dataset (Ito et al., 2017) with two proprietary single speaker datasets with 20 and 10 hours each (Sally and Helen). We will refer to this combined dataset as LSH. We also train a Flowtron model on the train-clean-100 subset of LibriTTS (Zen et al., 2019) with 123 speakers and 25 minutes on average per speaker. Speakers with less than 5 minutes of data and files that are larger than 10 seconds are filtered out. For each dataset we use at least 180 randomly chosen samples for the validation set and the remainder for the training set."
712
- },
713
- {
714
- "type": "text",
715
- "bbox": [
716
- 0.496,
717
- 0.359,
718
- 0.887,
719
- 0.45
720
- ],
721
- "angle": 0,
722
- "content": "The models are trained on uniformly sampled normalized text and ARPAbet encodings obtained from the CMU Pronouncing Dictionary (Weide, 1998). We do not perform any data augmentation. We adapt the public Tacotron 2 and Tacotron 2 GST repos to include speaker embeddings as described in Section 3."
723
- },
724
- {
725
- "type": "text",
726
- "bbox": [
727
- 0.496,
728
- 0.457,
729
- 0.886,
730
- 0.519
731
- ],
732
- "angle": 0,
733
- "content": "We use a sampling rate of \\(22050\\mathrm{Hz}\\) and mel-spectrograms with 80 bins using librosa mel filter defaults. We apply the STFT with a FFT size of 1024, window size of 1024 samples and hop size of 256 samples \\((\\sim 12ms)\\)."
734
- },
735
- {
736
- "type": "text",
737
- "bbox": [
738
- 0.496,
739
- 0.525,
740
- 0.887,
741
- 0.721
742
- ],
743
- "angle": 0,
744
- "content": "We use the ADAM (Kingma & Ba, 2014) optimizer with default parameters, 1e-4 learning rate and 1e-6 weight decay for Flowtron and 1e-3 learning rate and 1e-5 weight decay for the other models, following guidelines in (Wang et al., 2017). We anneal the learning rate once the generalization error starts to plateau and stop training once the the generalization error stops significantly decreasing or starts increasing. The Flowtron models with 2 steps of flow were trained on the LSH dataset for approximately 1000 epochs and then fine-tuned on LibriTTS for 500 epochs. Tacotron 2 and Tacotron 2 GST are trained for approximately 500 epochs. Each model is trained on a single NVIDIA DGX-1 with 8 GPUs."
745
- },
746
- {
747
- "type": "text",
748
- "bbox": [
749
- 0.496,
750
- 0.73,
751
- 0.887,
752
- 0.881
753
- ],
754
- "angle": 0,
755
- "content": "We find it faster to first learn to attend on a Flowtron model with a single step of flow and large amounts of data than multiple steps of flow and less data. After the model has learned to attend, we transfer its parameters to models with more steps of flow and speakers with less data. Thus, we first train Flowtron model with a single step of flow on the LSH dataset with many hours per speaker. Then we fine tune this model to Flowtron models with more steps of flow. Finally, these models are fine tuned on LibriTTS with an optional new speaker embedding."
756
- },
757
- {
758
- "type": "page_footnote",
759
- "bbox": [
760
- 0.518,
761
- 0.89,
762
- 0.724,
763
- 0.904
764
- ],
765
- "angle": 0,
766
- "content": "2 https://nv-adlr.github.io/Flowtron"
767
- }
768
- ],
769
- [
770
- {
771
- "type": "header",
772
- "bbox": [
773
- 0.201,
774
- 0.057,
775
- 0.771,
776
- 0.071
777
- ],
778
- "angle": 0,
779
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
780
- },
781
- {
782
- "type": "title",
783
- "bbox": [
784
- 0.086,
785
- 0.086,
786
- 0.35,
787
- 0.101
788
- ],
789
- "angle": 0,
790
- "content": "4.2. Mean Opinion Score comparison"
791
- },
792
- {
793
- "type": "text",
794
- "bbox": [
795
- 0.085,
796
- 0.109,
797
- 0.475,
798
- 0.229
799
- ],
800
- "angle": 0,
801
- "content": "We provide results that compare mean opinion scores (MOS) from real data from the LJS dataset, samples from a Flowtron with 2 steps of flow and samples from our implementation of Tacotron 2, both trained on LSH. Although the models evaluated are multi-speaker, we only compute mean opinion scores on LJS. In addition, we use the mean opinion scores provided in (Prenger et al., 2019) for ground truth data from the LJS dataset."
802
- },
803
- {
804
- "type": "text",
805
- "bbox": [
806
- 0.085,
807
- 0.237,
808
- 0.476,
809
- 0.343
810
- ],
811
- "angle": 0,
812
- "content": "We crowd-sourced mean opinion score (MOS) tests on Amazon Mechanical Turk. Raters first had to pass a hearing test to be eligible. Then they listened to an utterance, after which they rated pleasantness on a five-point scale. We used 30 volume normalized utterances from all speakers disjoint from the training set for evaluation, and randomly chose the utterances for each subject."
813
- },
814
- {
815
- "type": "text",
816
- "bbox": [
817
- 0.085,
818
- 0.35,
819
- 0.477,
820
- 0.442
821
- ],
822
- "angle": 0,
823
- "content": "The mean opinion scores are shown in Table 1 with \\(95\\%\\) confidence intervals computed over approximately 250 scores per source. The results roughly match our subjective qualitative assessment. The larger advantage of Flowtron is in the control over the amount of speech variation and the manipulation of the latent space."
824
- },
825
- {
826
- "type": "table",
827
- "bbox": [
828
- 0.095,
829
- 0.452,
830
- 0.466,
831
- 0.516
832
- ],
833
- "angle": 0,
834
- "content": "<table><tr><td>Source</td><td>Flows</td><td>Mean Opinion Score (MOS)</td></tr><tr><td>Real</td><td>-</td><td>4.274 ± 0.1340</td></tr><tr><td>Flowtron</td><td>3</td><td>3.665 ± 0.1634</td></tr><tr><td>Tacotron 2</td><td>-</td><td>3.521 ± 0.1721</td></tr></table>"
835
- },
836
- {
837
- "type": "table_caption",
838
- "bbox": [
839
- 0.085,
840
- 0.526,
841
- 0.475,
842
- 0.556
843
- ],
844
- "angle": 0,
845
- "content": "Table 1: Mean Opinion Score (MOS) evaluations with \\(95\\%\\) confidence intervals for various sources."
846
- },
847
- {
848
- "type": "title",
849
- "bbox": [
850
- 0.086,
851
- 0.588,
852
- 0.254,
853
- 0.604
854
- ],
855
- "angle": 0,
856
- "content": "4.3. Sampling the prior"
857
- },
858
- {
859
- "type": "text",
860
- "bbox": [
861
- 0.085,
862
- 0.611,
863
- 0.475,
864
- 0.703
865
- ],
866
- "angle": 0,
867
- "content": "The simplest approach to generate samples with Flowtron is to sample from a prior distribution \\( z \\sim \\mathcal{N}(0, \\sigma^2) \\) and adjust \\( \\sigma^2 \\) to control amount of variation. Whereas \\( \\sigma^2 = 0 \\) completely removes variation and produces outputs based on the model bias, increasing the value of \\( \\sigma^2 \\) will increase the amount of variation in speech."
868
- },
869
- {
870
- "type": "title",
871
- "bbox": [
872
- 0.086,
873
- 0.717,
874
- 0.273,
875
- 0.73
876
- ],
877
- "angle": 0,
878
- "content": "4.3.1. SPEECH VARIATION"
879
- },
880
- {
881
- "type": "text",
882
- "bbox": [
883
- 0.085,
884
- 0.74,
885
- 0.475,
886
- 0.831
887
- ],
888
- "angle": 0,
889
- "content": "To showcase the amount of variation and control thereof in Flowtron, we synthesize 10 mel-spectrograms and sample the Gaussian prior with \\(\\sigma^2 \\in \\{0.0, 0.5, 1.0\\}\\). All samples are generated conditioned on a fixed speaker Sally and text \"How much variation is there?\" to illustrate the relationship between \\(\\sigma^2\\) and variability."
890
- },
891
- {
892
- "type": "text",
893
- "bbox": [
894
- 0.085,
895
- 0.838,
896
- 0.475,
897
- 0.884
898
- ],
899
- "angle": 0,
900
- "content": "Our results show that despite all the variability added by increasing \\(\\sigma^2\\), all the samples synthesized with Flowtron still produce high quality speech."
901
- },
902
- {
903
- "type": "text",
904
- "bbox": [
905
- 0.086,
906
- 0.891,
907
- 0.475,
908
- 0.907
909
- ],
910
- "angle": 0,
911
- "content": "Figure 2 also shows that unlike most SOTA models (Shen"
912
- },
913
- {
914
- "type": "text",
915
- "bbox": [
916
- 0.497,
917
- 0.085,
918
- 0.889,
919
- 0.147
920
- ],
921
- "angle": 0,
922
- "content": "et al., 2017; Arik et al., 2017b;a; Ping et al., 2017; Skerry-Ryan et al., 2018; Wang et al., 2018; Binkowski et al., 2019), Flowtron generates sharp harmonics and well resolved formants without a compound loss nor Prenet or Postnet layers."
923
- },
924
- {
925
- "type": "image",
926
- "bbox": [
927
- 0.543,
928
- 0.178,
929
- 0.857,
930
- 0.316
931
- ],
932
- "angle": 0,
933
- "content": null
934
- },
935
- {
936
- "type": "image_caption",
937
- "bbox": [
938
- 0.659,
939
- 0.325,
940
- 0.726,
941
- 0.339
942
- ],
943
- "angle": 0,
944
- "content": "(a) \\(\\sigma^2 = 0\\)"
945
- },
946
- {
947
- "type": "image",
948
- "bbox": [
949
- 0.543,
950
- 0.37,
951
- 0.857,
952
- 0.507
953
- ],
954
- "angle": 0,
955
- "content": null
956
- },
957
- {
958
- "type": "image_caption",
959
- "bbox": [
960
- 0.653,
961
- 0.518,
962
- 0.732,
963
- 0.531
964
- ],
965
- "angle": 0,
966
- "content": "(b) \\(\\sigma^2 = 0.5\\)"
967
- },
968
- {
969
- "type": "image",
970
- "bbox": [
971
- 0.543,
972
- 0.563,
973
- 0.857,
974
- 0.699
975
- ],
976
- "angle": 0,
977
- "content": null
978
- },
979
- {
980
- "type": "image_caption",
981
- "bbox": [
982
- 0.659,
983
- 0.71,
984
- 0.725,
985
- 0.724
986
- ],
987
- "angle": 0,
988
- "content": "(c) \\(\\sigma^2 = 1\\)"
989
- },
990
- {
991
- "type": "image_caption",
992
- "bbox": [
993
- 0.497,
994
- 0.736,
995
- 0.886,
996
- 0.782
997
- ],
998
- "angle": 0,
999
- "content": "Figure 2: Mel-spectrograms generated with Flowtron using different \\(\\sigma^2\\). This parameter can be adjusted to control mel-spectrogram variability during inference."
1000
- },
1001
- {
1002
- "type": "text",
1003
- "bbox": [
1004
- 0.497,
1005
- 0.8,
1006
- 0.889,
1007
- 0.907
1008
- ],
1009
- "angle": 0,
1010
- "content": "Now we show that adjusting \\(\\sigma^2\\) is a simple and valuable approach that provides more variation and control than Tacotron, without sacrificing speech quality. For this, we synthesize 10 samples with Tacotron 2 using different values for the Prenet dropout probability \\(p\\in \\{0.45,0.5,0.55\\}\\). We scale the outputs of the dropout output such that the mean of the output remains equal to the mean with \\(p = 0.5\\),"
1011
- }
1012
- ],
1013
- [
1014
- {
1015
- "type": "header",
1016
- "bbox": [
1017
- 0.201,
1018
- 0.057,
1019
- 0.771,
1020
- 0.071
1021
- ],
1022
- "angle": 0,
1023
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
1024
- },
1025
- {
1026
- "type": "text",
1027
- "bbox": [
1028
- 0.085,
1029
- 0.085,
1030
- 0.475,
1031
- 0.146
1032
- ],
1033
- "angle": 0,
1034
- "content": "the value used during training. Although we also provide samples computed on values of \\( p \\in [0,1] \\) in our supplemental material, we do not include them in our results because they are unintelligible."
1035
- },
1036
- {
1037
- "type": "text",
1038
- "bbox": [
1039
- 0.085,
1040
- 0.153,
1041
- 0.476,
1042
- 0.244
1043
- ],
1044
- "angle": 0,
1045
- "content": "In Figure 3 below we provide scatter plots from sample duration in seconds. Our results show that whereas \\(\\sigma^2 = 0\\) produces samples with no variation in duration, larger values of \\(\\sigma^2\\) produces samples with more variation in duration. Humans manipulate word and sentence length to express themselves, hence this is valuable."
1046
- },
1047
- {
1048
- "type": "image",
1049
- "bbox": [
1050
- 0.111,
1051
- 0.266,
1052
- 0.451,
1053
- 0.405
1054
- ],
1055
- "angle": 0,
1056
- "content": null
1057
- },
1058
- {
1059
- "type": "image_caption",
1060
- "bbox": [
1061
- 0.085,
1062
- 0.419,
1063
- 0.476,
1064
- 0.465
1065
- ],
1066
- "angle": 0,
1067
- "content": "Figure 3: Sample duration in seconds given parameters \\(\\sigma^2\\) and \\(p\\). These results show that Flowtron provides more variation in sample duration than Tacotron 2."
1068
- },
1069
- {
1070
- "type": "text",
1071
- "bbox": [
1072
- 0.085,
1073
- 0.489,
1074
- 0.475,
1075
- 0.609
1076
- ],
1077
- "angle": 0,
1078
- "content": "In Figure 4 we provide scatter plots of \\( F_{0} \\) contours extracted with the YIN algorithm (De Cheveigné & Kawahara, 2002), with minimum \\( F_{0} \\), maximum \\( F_{0} \\) and harmonicity threshold equal to \\( 80\\mathrm{Hz} \\), \\( 400\\mathrm{Hz} \\) and 0.3 respectively. Our results show a behavior similar to the previous sample duration analysis. As expected, \\( \\sigma^{2} = 0 \\) provides no variation in \\( F_{0} \\) contour<sup>3</sup>, while increasing the value of \\( \\sigma^{2} \\) will increase the amount of variation in \\( F_{0} \\) contours."
1079
- },
1080
- {
1081
- "type": "text",
1082
- "bbox": [
1083
- 0.085,
1084
- 0.617,
1085
- 0.475,
1086
- 0.709
1087
- ],
1088
- "angle": 0,
1089
- "content": "Our results in Figure 4 also show that the samples produced with Flowtron are considerably less monotonous than the samples produced with Tacotron 2. Whereas increasing \\(\\sigma^2\\) considerably increases variation in \\(F_0\\), modifying \\(p\\) barely produces any variation. This is valuable because expressive speech is associated with non-monotonic \\(F_0\\) contours."
1090
- },
1091
- {
1092
- "type": "title",
1093
- "bbox": [
1094
- 0.086,
1095
- 0.723,
1096
- 0.391,
1097
- 0.737
1098
- ],
1099
- "angle": 0,
1100
- "content": "4.3.2. INTERPOLATION BETWEEN SAMPLES"
1101
- },
1102
- {
1103
- "type": "text",
1104
- "bbox": [
1105
- 0.085,
1106
- 0.746,
1107
- 0.476,
1108
- 0.882
1109
- ],
1110
- "angle": 0,
1111
- "content": "With Flowtron we can perform interpolation in \\( z \\)-space to achieve interpolation in mel-spectrogram space. For this experiment we evaluate Flowtron models with and without speaker embeddings. For the experiment with speaker embeddings we choose the Sally speaker and the phrase \"It is well known that deep generative models have a rich latent space\". We generate mel-spectrograms by sampling \\( z \\sim \\mathcal{N}(0, 0.8) \\) twice and interpolating between them over 100 steps."
1112
- },
1113
- {
1114
- "type": "image",
1115
- "bbox": [
1116
- 0.507,
1117
- 0.083,
1118
- 0.888,
1119
- 0.147
1120
- ],
1121
- "angle": 0,
1122
- "content": null
1123
- },
1124
- {
1125
- "type": "image_caption",
1126
- "bbox": [
1127
- 0.634,
1128
- 0.154,
1129
- 0.759,
1130
- 0.168
1131
- ],
1132
- "angle": 0,
1133
- "content": "(a) Flowtron \\(\\sigma^2 = 0\\)"
1134
- },
1135
- {
1136
- "type": "image",
1137
- "bbox": [
1138
- 0.506,
1139
- 0.182,
1140
- 0.888,
1141
- 0.247
1142
- ],
1143
- "angle": 0,
1144
- "content": null
1145
- },
1146
- {
1147
- "type": "image_caption",
1148
- "bbox": [
1149
- 0.629,
1150
- 0.253,
1151
- 0.765,
1152
- 0.266
1153
- ],
1154
- "angle": 0,
1155
- "content": "(b) Flowtron \\(\\sigma^2 = 0.5\\)"
1156
- },
1157
- {
1158
- "type": "image",
1159
- "bbox": [
1160
- 0.506,
1161
- 0.281,
1162
- 0.888,
1163
- 0.345
1164
- ],
1165
- "angle": 0,
1166
- "content": null
1167
- },
1168
- {
1169
- "type": "image_caption",
1170
- "bbox": [
1171
- 0.636,
1172
- 0.351,
1173
- 0.757,
1174
- 0.364
1175
- ],
1176
- "angle": 0,
1177
- "content": "(c) Flowtron \\(\\sigma^2 = 1\\)"
1178
- },
1179
- {
1180
- "type": "image",
1181
- "bbox": [
1182
- 0.506,
1183
- 0.379,
1184
- 0.888,
1185
- 0.444
1186
- ],
1187
- "angle": 0,
1188
- "content": null
1189
- },
1190
- {
1191
- "type": "image_caption",
1192
- "bbox": [
1193
- 0.588,
1194
- 0.449,
1195
- 0.804,
1196
- 0.464
1197
- ],
1198
- "angle": 0,
1199
- "content": "(d) Tacotron \\(2p\\in \\{0.45,0.5,0.55\\}\\)"
1200
- },
1201
- {
1202
- "type": "image_caption",
1203
- "bbox": [
1204
- 0.497,
1205
- 0.475,
1206
- 0.888,
1207
- 0.521
1208
- ],
1209
- "angle": 0,
1210
- "content": "Figure 4: \\( F_{0} \\) contours obtained from samples generated by Flowtron and Tacotron 2 with different values for \\( \\sigma^{2} \\) and \\( p \\). Flowtron provides more expressivity than Tacotron 2."
1211
- },
1212
- {
1213
- "type": "text",
1214
- "bbox": [
1215
- 0.496,
1216
- 0.572,
1217
- 0.888,
1218
- 0.678
1219
- ],
1220
- "angle": 0,
1221
- "content": "For the experiment without speaker embeddings we interpolate between Sally and Helen using the phrase \"We are testing this model\". First, we perform inference by sampling \\( z \\sim \\mathcal{N}(0, 0.5) \\) until we find two \\( z \\) values, \\( z_h \\) and \\( z_s \\), that produce mel-spectrograms with Helen's and Sally's voice respectively. We then generate samples by performing inference while linearly interpolating between \\( z_h \\) and \\( z_s \\)."
1222
- },
1223
- {
1224
- "type": "text",
1225
- "bbox": [
1226
- 0.496,
1227
- 0.685,
1228
- 0.888,
1229
- 0.775
1230
- ],
1231
- "angle": 0,
1232
- "content": "Our same speaker interpolation samples show that Flowtron is able to interpolate between multiple samples while producing correct alignment maps. In addition, our different speaker interpolation samples show that Flowtron is able to blurry the boundaries between two speakers, creating a speaker that combines the characteristics of both."
1233
- },
1234
- {
1235
- "type": "title",
1236
- "bbox": [
1237
- 0.497,
1238
- 0.792,
1239
- 0.692,
1240
- 0.807
1241
- ],
1242
- "angle": 0,
1243
- "content": "4.4. Sampling the posterior"
1244
- },
1245
- {
1246
- "type": "text",
1247
- "bbox": [
1248
- 0.496,
1249
- 0.815,
1250
- 0.888,
1251
- 0.907
1252
- ],
1253
- "angle": 0,
1254
- "content": "In this approach we generate samples with Flowtron by sampling a posterior distribution conditioned on prior evidence containing speech characteristics of interest, as described in (Gambardella et al., 2019; Kingma & Dhariwal, 2018). In this experiment, we collect prior evidence \\( z_{e} \\) by performing a forward pass with the speaker id to be used during"
1255
- },
1256
- {
1257
- "type": "page_footnote",
1258
- "bbox": [
1259
- 0.106,
1260
- 0.89,
1261
- 0.458,
1262
- 0.905
1263
- ],
1264
- "angle": 0,
1265
- "content": "3Variations in \\(\\sigma^2 = 0\\) are due to different \\(z\\) for WaveGlow."
1266
- }
1267
- ],
1268
- [
1269
- {
1270
- "type": "header",
1271
- "bbox": [
1272
- 0.201,
1273
- 0.057,
1274
- 0.771,
1275
- 0.071
1276
- ],
1277
- "angle": 0,
1278
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
1279
- },
1280
- {
1281
- "type": "text",
1282
- "bbox": [
1283
- 0.085,
1284
- 0.085,
1285
- 0.475,
1286
- 0.162
1287
- ],
1288
- "angle": 0,
1289
- "content": "inference\\(^4\\), observed mel-spectrogram and text from a set of samples with characteristics of interest. If necessary, we time-concatenate each \\(z_{e}\\) with itself to fulfill minimum length requirements defined according to the text length to be said during inference."
1290
- },
1291
- {
1292
- "type": "text",
1293
- "bbox": [
1294
- 0.085,
1295
- 0.168,
1296
- 0.476,
1297
- 0.29
1298
- ],
1299
- "angle": 0,
1300
- "content": "Tacotron 2 GST (Wang et al., 2018) has an equivalent posterior sampling approach, in which during inference the model is conditioned on a weighted sum of global style tokens (posterior) queried through an embedding of existing audio samples (prior). For Tacotron 2 GST, we evaluate two approaches: in one we use a single sample to query a style token in the other we use an average style token computed over multiple samples."
1301
- },
1302
- {
1303
- "type": "title",
1304
- "bbox": [
1305
- 0.086,
1306
- 0.305,
1307
- 0.411,
1308
- 0.318
1309
- ],
1310
- "angle": 0,
1311
- "content": "4.4.1. SEEN SPEAKER WITHOUT ALIGNMENTS"
1312
- },
1313
- {
1314
- "type": "text",
1315
- "bbox": [
1316
- 0.085,
1317
- 0.328,
1318
- 0.476,
1319
- 0.48
1320
- ],
1321
- "angle": 0,
1322
- "content": "In this experiment we compare Sally samples from Flowtron and Tacotron 2 GST generated by conditioning on the posterior computed over 30 Helen samples with the highest variance in fundamental frequency. The goal is to make a monotonic speaker sound expressive. Our experiments show that by sampling from the posterior or interpolating between the posterior and a standard Gaussian prior, Flowtron is able to make a monotonic speaker gradually sound more expressive. On the other hand, Tacotron 2 GST is barely able to alter characteristics of the monotonic speaker."
1323
- },
1324
- {
1325
- "type": "title",
1326
- "bbox": [
1327
- 0.086,
1328
- 0.494,
1329
- 0.381,
1330
- 0.508
1331
- ],
1332
- "angle": 0,
1333
- "content": "4.4.2. SEEN SPEAKER WITH ALIGNMENTS"
1334
- },
1335
- {
1336
- "type": "text",
1337
- "bbox": [
1338
- 0.085,
1339
- 0.516,
1340
- 0.476,
1341
- 0.669
1342
- ],
1343
- "angle": 0,
1344
- "content": "We use a Flowtron model with speaker embeddings to illustrate Flowtron's ability to learn and transfer acoustic characteristics that are hard to express algorithmically but easy to perceive acoustically, we select a female speaker from LibriTTS with a distinguished nasal voice and oscillation in \\( F_{0} \\) as our source speaker and transfer her style to a male speaker, also from LibriTTS, with acoustic characteristics that sound different from the female speaker. Unlike the previous experiment, this time the text and the alignment maps are transferred from the female to the male speaker."
1345
- },
1346
- {
1347
- "type": "text",
1348
- "bbox": [
1349
- 0.085,
1350
- 0.675,
1351
- 0.475,
1352
- 0.737
1353
- ],
1354
- "angle": 0,
1355
- "content": "Figure 5 is an attempt to visualize the transfer of these acoustic qualities we described. It shows that after the transfer, the lower partials of the male speaker oscillate more and become more similar to the female speaker."
1356
- },
1357
- {
1358
- "type": "title",
1359
- "bbox": [
1360
- 0.086,
1361
- 0.751,
1362
- 0.313,
1363
- 0.765
1364
- ],
1365
- "angle": 0,
1366
- "content": "4.4.3. UNSEEN SPEAKER STYLE"
1367
- },
1368
- {
1369
- "type": "text",
1370
- "bbox": [
1371
- 0.085,
1372
- 0.774,
1373
- 0.476,
1374
- 0.88
1375
- ],
1376
- "angle": 0,
1377
- "content": "We compare samples generated with Flowtron and Tacotron 2 GST with speaker embeddings in which we modify a speaker's style by using data from the same speaker but from a style not seen during training. Whereas Sally's data used during training consists of news article readings, the evaluation samples contain Sally's interpretation of the somber and vampiresque novel Born of Darkness."
1378
- },
1379
- {
1380
- "type": "image",
1381
- "bbox": [
1382
- 0.505,
1383
- 0.083,
1384
- 0.891,
1385
- 0.208
1386
- ],
1387
- "angle": 0,
1388
- "content": null
1389
- },
1390
- {
1391
- "type": "image_caption",
1392
- "bbox": [
1393
- 0.664,
1394
- 0.21,
1395
- 0.73,
1396
- 0.223
1397
- ],
1398
- "angle": 0,
1399
- "content": "(a) Female"
1400
- },
1401
- {
1402
- "type": "image",
1403
- "bbox": [
1404
- 0.504,
1405
- 0.235,
1406
- 0.891,
1407
- 0.361
1408
- ],
1409
- "angle": 0,
1410
- "content": null
1411
- },
1412
- {
1413
- "type": "image_caption",
1414
- "bbox": [
1415
- 0.661,
1416
- 0.364,
1417
- 0.733,
1418
- 0.376
1419
- ],
1420
- "angle": 0,
1421
- "content": "(b) Transfer"
1422
- },
1423
- {
1424
- "type": "image",
1425
- "bbox": [
1426
- 0.505,
1427
- 0.389,
1428
- 0.891,
1429
- 0.514
1430
- ],
1431
- "angle": 0,
1432
- "content": null
1433
- },
1434
- {
1435
- "type": "image_caption",
1436
- "bbox": [
1437
- 0.67,
1438
- 0.517,
1439
- 0.724,
1440
- 0.53
1441
- ],
1442
- "angle": 0,
1443
- "content": "(c) Male"
1444
- },
1445
- {
1446
- "type": "image_caption",
1447
- "bbox": [
1448
- 0.497,
1449
- 0.542,
1450
- 0.888,
1451
- 0.619
1452
- ],
1453
- "angle": 0,
1454
- "content": "Figure 5: Mel-spectrograms from a female speaker, male speaker and a sample where we transfer the acoustic characteristics from the female speaker to the male speaker. It shows that the transferred sample is more similar to the female speaker than the male speaker."
1455
- },
1456
- {
1457
- "type": "text",
1458
- "bbox": [
1459
- 0.497,
1460
- 0.649,
1461
- 0.888,
1462
- 0.725
1463
- ],
1464
- "angle": 0,
1465
- "content": "Our samples show that Tacotron 2 GST fails to emulate the somber style from Born of Darkness's data. We show that Flowtron succeeds in transferring not only to the somber style in the evaluation data, but also the long pauses associated with the narrative style."
1466
- },
1467
- {
1468
- "type": "title",
1469
- "bbox": [
1470
- 0.498,
1471
- 0.739,
1472
- 0.675,
1473
- 0.753
1474
- ],
1475
- "angle": 0,
1476
- "content": "4.4.4. UNSEEN SPEAKER"
1477
- },
1478
- {
1479
- "type": "text",
1480
- "bbox": [
1481
- 0.497,
1482
- 0.763,
1483
- 0.887,
1484
- 0.824
1485
- ],
1486
- "angle": 0,
1487
- "content": "In this experiment we compare Flowtron and Tacotron 2 GST samples in which we transfer the speaking style of a speaker not seen during training. Both models use speaker embeddings."
1488
- },
1489
- {
1490
- "type": "text",
1491
- "bbox": [
1492
- 0.497,
1493
- 0.83,
1494
- 0.887,
1495
- 0.906
1496
- ],
1497
- "angle": 0,
1498
- "content": "For these experiments, we consider two speakers. The first comes from speaker ID 03 from RAVDESS, a dataset with emotion labels. We focus on the label \"surprised\". The second speaker is Richard Feynman, using a set of 10 audio samples collected from the web."
1499
- },
1500
- {
1501
- "type": "page_footnote",
1502
- "bbox": [
1503
- 0.106,
1504
- 0.889,
1505
- 0.386,
1506
- 0.904
1507
- ],
1508
- "angle": 0,
1509
- "content": "4To remove this speaker's information from \\(z_{e}\\)"
1510
- }
1511
- ],
1512
- [
1513
- {
1514
- "type": "header",
1515
- "bbox": [
1516
- 0.202,
1517
- 0.058,
1518
- 0.769,
1519
- 0.071
1520
- ],
1521
- "angle": 0,
1522
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
1523
- },
1524
- {
1525
- "type": "text",
1526
- "bbox": [
1527
- 0.085,
1528
- 0.085,
1529
- 0.475,
1530
- 0.16
1531
- ],
1532
- "angle": 0,
1533
- "content": "For each experiment, we use the Sally speaker and the sentences \"Humans are walking on the street?\" and \"Surely you are joking mister Feynman,\" which do not exist in RAVDESS nor in the audio samples from Richard Feynman."
1534
- },
1535
- {
1536
- "type": "text",
1537
- "bbox": [
1538
- 0.085,
1539
- 0.169,
1540
- 0.475,
1541
- 0.289
1542
- ],
1543
- "angle": 0,
1544
- "content": "The samples generated with Tacotron 2 GST are not able to emulate the surprised style from RAVDESS nor Feynman's prosody and acoustic characteristics. Flowtron, on the other hand, is able to make Sally sound surprised, which is drastically different from the monotonous baseline. Likewise, Flowtron is able to pick up on the prosody and articulation details particular to Feynman's speaking style, and transfer them to Sally."
1545
- },
1546
- {
1547
- "type": "title",
1548
- "bbox": [
1549
- 0.086,
1550
- 0.306,
1551
- 0.341,
1552
- 0.321
1553
- ],
1554
- "angle": 0,
1555
- "content": "4.5. Sampling the Gaussian Mixture"
1556
- },
1557
- {
1558
- "type": "text",
1559
- "bbox": [
1560
- 0.085,
1561
- 0.33,
1562
- 0.475,
1563
- 0.42
1564
- ],
1565
- "angle": 0,
1566
- "content": "In this last section we showcase visualizations and samples from Flowtron Gaussian Mixture (GM). First we investigate how different mixture components and speakers are correlated. Then we provide sound examples in which we modulate speech characteristics by translating one of the the dimensions of an individual component."
1567
- },
1568
- {
1569
- "type": "title",
1570
- "bbox": [
1571
- 0.086,
1572
- 0.435,
1573
- 0.334,
1574
- 0.449
1575
- ],
1576
- "angle": 0,
1577
- "content": "4.5.1. VISUALIZING ASSIGNMENTS"
1578
- },
1579
- {
1580
- "type": "text",
1581
- "bbox": [
1582
- 0.085,
1583
- 0.458,
1584
- 0.476,
1585
- 0.593
1586
- ],
1587
- "angle": 0,
1588
- "content": "For the first experiment, we train a Flowtrom Gaussian Mixture on LSH with 2 steps of flow, speaker embeddings and fixed mean and covariance (Flowtron GM-A). We obtain mixture component assignments per mel-spectrogram by performing a forward pass and averaging the component assignment over time and samples. Figure 6 shows that whereas most speakers are equally assigned to all components, component 7 is almost exclusively assigned to Helen's data."
1589
- },
1590
- {
1591
- "type": "image",
1592
- "bbox": [
1593
- 0.09,
1594
- 0.609,
1595
- 0.472,
1596
- 0.696
1597
- ],
1598
- "angle": 0,
1599
- "content": null
1600
- },
1601
- {
1602
- "type": "image_caption",
1603
- "bbox": [
1604
- 0.085,
1605
- 0.709,
1606
- 0.476,
1607
- 0.754
1608
- ],
1609
- "angle": 0,
1610
- "content": "Figure 6: Component assignments for Flowtron GM-A. Unlike LJS and Sally, Helen is almost exclusively assigned to component 7."
1611
- },
1612
- {
1613
- "type": "text",
1614
- "bbox": [
1615
- 0.085,
1616
- 0.786,
1617
- 0.476,
1618
- 0.906
1619
- ],
1620
- "angle": 0,
1621
- "content": "In the second experiment, we train a Flowtron Gaussian Mixture on LibriTTS with 1 step of flow, without speaker embeddings and predicted mean and covariance (Flowtron GM-B). Figure 7 shows that Flowtron GM assigns more probability to component 7 when the speaker is male than when it's female. Conversely, the model assigns more probability to component 6 when the speaker is female than when it's male."
1622
- },
1623
- {
1624
- "type": "image",
1625
- "bbox": [
1626
- 0.501,
1627
- 0.083,
1628
- 0.885,
1629
- 0.171
1630
- ],
1631
- "angle": 0,
1632
- "content": null
1633
- },
1634
- {
1635
- "type": "image_caption",
1636
- "bbox": [
1637
- 0.497,
1638
- 0.182,
1639
- 0.887,
1640
- 0.243
1641
- ],
1642
- "angle": 0,
1643
- "content": "Figure 7: Component assignments for Flowtron GM-B. Components 7 and 8 are assigned different probabilities according to gender, suggesting that the information stored in the components is gender dependent."
1644
- },
1645
- {
1646
- "type": "title",
1647
- "bbox": [
1648
- 0.498,
1649
- 0.279,
1650
- 0.742,
1651
- 0.293
1652
- ],
1653
- "angle": 0,
1654
- "content": "4.5.2. TRANSLATING DIMENSIONS"
1655
- },
1656
- {
1657
- "type": "text",
1658
- "bbox": [
1659
- 0.497,
1660
- 0.302,
1661
- 0.887,
1662
- 0.361
1663
- ],
1664
- "angle": 0,
1665
- "content": "In this subsection, we use the model Flowtron GM-A described previously. We focus on selecting a single mixture component and translating one of its dimensions by adding an offset."
1666
- },
1667
- {
1668
- "type": "text",
1669
- "bbox": [
1670
- 0.496,
1671
- 0.37,
1672
- 0.886,
1673
- 0.536
1674
- ],
1675
- "angle": 0,
1676
- "content": "The samples in our supplementary material show that we are able to modulate specific speech characteristics like pitch and word duration. Although the samples generated by translating one the dimensions associated with pitch height have different pitch contours, they have the same duration. Similarly, our samples show that translating the dimension associated with length of the first word does not modulate the pitch of the first word. This provides evidence that we can modulate these attributes by manipulating these dimensions and that the model is able to learn a disentangled representation of these speech attributes."
1677
- },
1678
- {
1679
- "type": "title",
1680
- "bbox": [
1681
- 0.498,
1682
- 0.556,
1683
- 0.611,
1684
- 0.571
1685
- ],
1686
- "angle": 0,
1687
- "content": "5. Discussion"
1688
- },
1689
- {
1690
- "type": "text",
1691
- "bbox": [
1692
- 0.496,
1693
- 0.582,
1694
- 0.887,
1695
- 0.778
1696
- ],
1697
- "angle": 0,
1698
- "content": "In this paper we propose a new text to mel-spectrogram synthesis model based on autoregressive flows that is optimized by maximizing the likelihood and allows for control of speech variation and style transfer. Our results show that samples generated with FlowTron achieve mean opinion scores that are similar to samples generated with state-of-the-art text-to-speech synthesis models. In addition, we demonstrate that at no extra cost and without a compound loss term, our model learns a latent space that stores nontextual information. Our experiments show that FlowTron gives the user the possibility to transfer characteristics from a source sample or speaker to a target speaker, for example making a monotonic speaker sound more expressive."
1699
- },
1700
- {
1701
- "type": "text",
1702
- "bbox": [
1703
- 0.496,
1704
- 0.785,
1705
- 0.886,
1706
- 0.906
1707
- ],
1708
- "angle": 0,
1709
- "content": "Our results show that despite all the variability added by increasing \\(\\sigma^2\\), the samples synthesized with FlowTron still produce high quality speech. Our results show that FlowTron learns a latent space over non-textual features that can be investigated and manipulated to give the user more control over the generative models output. We provide many examples that showcase this including increasing variation in mel-spectrograms in a controllable manner, transferring"
1710
- }
1711
- ],
1712
- [
1713
- {
1714
- "type": "header",
1715
- "bbox": [
1716
- 0.202,
1717
- 0.058,
1718
- 0.769,
1719
- 0.071
1720
- ],
1721
- "angle": 0,
1722
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
1723
- },
1724
- {
1725
- "type": "text",
1726
- "bbox": [
1727
- 0.089,
1728
- 0.086,
1729
- 0.475,
1730
- 0.144
1731
- ],
1732
- "angle": 0,
1733
- "content": "the style from speakers seen and unseen during training to another speaker using sentences with similar or different text, and making a monotonic speaker sound more expressive."
1734
- },
1735
- {
1736
- "type": "text",
1737
- "bbox": [
1738
- 0.089,
1739
- 0.154,
1740
- 0.475,
1741
- 0.274
1742
- ],
1743
- "angle": 0,
1744
- "content": "Flowtron produces expressive speech without labeled data or ever seeing expressive data. It pushes text-to-speech synthesis beyond the expressive limits of personal assistants. It opens new avenues for speech synthesis in human-computer interaction and the arts, where realism and expressivity are of utmost importance. To our knowledge, this work is the first to demonstrate the advantages of using normalizing flow models in text to mel-spectrogram synthesis."
1745
- },
1746
- {
1747
- "type": "title",
1748
- "bbox": [
1749
- 0.502,
1750
- 0.085,
1751
- 0.593,
1752
- 0.099
1753
- ],
1754
- "angle": 0,
1755
- "content": "References"
1756
- },
1757
- {
1758
- "type": "ref_text",
1759
- "bbox": [
1760
- 0.502,
1761
- 0.109,
1762
- 0.887,
1763
- 0.166
1764
- ],
1765
- "angle": 0,
1766
- "content": "Akuzawa, K., Iwasawa, Y., and Matsuo, Y. Expressive speech synthesis via modeling expressions with variational autoencoder. arXiv preprint arXiv:1804.02135, 2018."
1767
- },
1768
- {
1769
- "type": "ref_text",
1770
- "bbox": [
1771
- 0.501,
1772
- 0.181,
1773
- 0.887,
1774
- 0.24
1775
- ],
1776
- "angle": 0,
1777
- "content": "Arik, S., Diamos, G., Gibiansky, A., Miller, J., Peng, K., Ping, W., Raiman, J., and Zhou, Y. Deep voice 2: Multi-speaker neural text-to-speech. arXiv preprint arXiv:1705.08947, 2017a."
1778
- },
1779
- {
1780
- "type": "ref_text",
1781
- "bbox": [
1782
- 0.501,
1783
- 0.254,
1784
- 0.887,
1785
- 0.313
1786
- ],
1787
- "angle": 0,
1788
- "content": "Arik, S. O., Chrzanowski, M., Coates, A., Diamos, G., Gibiansky, A., Kang, Y., Li, X., Miller, J., Ng, A., Raiman, J., et al. Deep voice: Real-time neural text-to-speech. arXiv preprint arXiv:1702.07825, 2017b."
1789
- },
1790
- {
1791
- "type": "ref_text",
1792
- "bbox": [
1793
- 0.501,
1794
- 0.327,
1795
- 0.887,
1796
- 0.37
1797
- ],
1798
- "angle": 0,
1799
- "content": "Badham, J., Lasker, L., Parkes, W. F., Rubinstein, A. B., Broderick, M., Coleman, D., and Wood, J. Wargames, 1983."
1800
- },
1801
- {
1802
- "type": "ref_text",
1803
- "bbox": [
1804
- 0.501,
1805
- 0.384,
1806
- 0.887,
1807
- 0.444
1808
- ],
1809
- "angle": 0,
1810
- "content": "Binkowski, M., Donahue, J., Dieleman, S., Clark, A., Elsen, E., Casagrande, N., Cobo, L. C., and Simonyan, K. High fidelity speech synthesis with adversarial networks. arXiv preprint arXiv:1909.11646, 2019."
1811
- },
1812
- {
1813
- "type": "ref_text",
1814
- "bbox": [
1815
- 0.501,
1816
- 0.457,
1817
- 0.887,
1818
- 0.515
1819
- ],
1820
- "angle": 0,
1821
- "content": "De Cheveigné, A. and Kawahara, H. Yin, a fundamental frequency estimator for speech and music. The Journal of the Acoustical Society of America, 111(4):1917-1930, 2002."
1822
- },
1823
- {
1824
- "type": "ref_text",
1825
- "bbox": [
1826
- 0.501,
1827
- 0.53,
1828
- 0.887,
1829
- 0.574
1830
- ],
1831
- "angle": 0,
1832
- "content": "Dinh, L., Krueger, D., and Bengio, Y. Nice: Non-linear independent components estimation. arXiv preprint arXiv:1410.8516, 2014."
1833
- },
1834
- {
1835
- "type": "ref_text",
1836
- "bbox": [
1837
- 0.501,
1838
- 0.588,
1839
- 0.887,
1840
- 0.631
1841
- ],
1842
- "angle": 0,
1843
- "content": "Dinh, L., Sohl-Dickstein, J., and Bengio, S. Density estimation using real nvp. arXiv preprint arXiv:1605.08803, 2016."
1844
- },
1845
- {
1846
- "type": "ref_text",
1847
- "bbox": [
1848
- 0.501,
1849
- 0.645,
1850
- 0.887,
1851
- 0.69
1852
- ],
1853
- "angle": 0,
1854
- "content": "Gambardella, A., Baydin, A. G., and Torr, P. H. Transflow learning: Repurposing flow models without retraining. arXiv preprint arXiv:1911.13270, 2019."
1855
- },
1856
- {
1857
- "type": "ref_text",
1858
- "bbox": [
1859
- 0.501,
1860
- 0.703,
1861
- 0.887,
1862
- 0.763
1863
- ],
1864
- "angle": 0,
1865
- "content": "Gibiansky, A., Arik, S., Diamos, G., Miller, J., Peng, K., Ping, W., Raiman, J., and Zhou, Y. Deep voice 2: Multi-speaker neural text-to-speech. In Advances in neural information processing systems, pp. 2962-2970, 2017."
1866
- },
1867
- {
1868
- "type": "ref_text",
1869
- "bbox": [
1870
- 0.501,
1871
- 0.776,
1872
- 0.887,
1873
- 0.835
1874
- ],
1875
- "angle": 0,
1876
- "content": "Hsu, W.-N., Zhang, Y., Weiss, R. J., Zen, H., Wu, Y., Wang, Y., Cao, Y., Jia, Y., Chen, Z., Shen, J., et al. Hierarchical generative modeling for controllable speech synthesis. arXiv preprint arXiv:1810.07217, 2018."
1877
- },
1878
- {
1879
- "type": "ref_text",
1880
- "bbox": [
1881
- 0.501,
1882
- 0.849,
1883
- 0.773,
1884
- 0.863
1885
- ],
1886
- "angle": 0,
1887
- "content": "Ito, K. et al. The LJ speech dataset, 2017."
1888
- },
1889
- {
1890
- "type": "ref_text",
1891
- "bbox": [
1892
- 0.501,
1893
- 0.876,
1894
- 0.885,
1895
- 0.906
1896
- ],
1897
- "angle": 0,
1898
- "content": "Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014."
1899
- },
1900
- {
1901
- "type": "list",
1902
- "bbox": [
1903
- 0.501,
1904
- 0.109,
1905
- 0.887,
1906
- 0.906
1907
- ],
1908
- "angle": 0,
1909
- "content": null
1910
- }
1911
- ],
1912
- [
1913
- {
1914
- "type": "header",
1915
- "bbox": [
1916
- 0.202,
1917
- 0.057,
1918
- 0.771,
1919
- 0.072
1920
- ],
1921
- "angle": 0,
1922
- "content": "Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis"
1923
- },
1924
- {
1925
- "type": "ref_text",
1926
- "bbox": [
1927
- 0.088,
1928
- 0.085,
1929
- 0.475,
1930
- 0.13
1931
- ],
1932
- "angle": 0,
1933
- "content": "Kingma, D. P. and Dhariwal, P. Glow: Generative flow with invertible 1x1 convolutions. arXiv preprint arXiv:1807.03039, 2018."
1934
- },
1935
- {
1936
- "type": "ref_text",
1937
- "bbox": [
1938
- 0.088,
1939
- 0.141,
1940
- 0.476,
1941
- 0.216
1942
- ],
1943
- "angle": 0,
1944
- "content": "Kingma, D. P., Salimans, T., Jozefowicz, R., Chen, X., Sutskever, I., and Welling, M. Improved variational inference with inverse autoregressive flow. In Advances in Neural Information Processing Systems, pp. 4743-4751, 2016."
1945
- },
1946
- {
1947
- "type": "ref_text",
1948
- "bbox": [
1949
- 0.088,
1950
- 0.228,
1951
- 0.476,
1952
- 0.274
1953
- ],
1954
- "angle": 0,
1955
- "content": "Lee, J., Choi, H.-S., Jeon, C.-B., Koo, J., and Lee, K. Adversarially trained end-to-end korean singing voice synthesis system. arXiv preprint arXiv:1908.01919, 2019."
1956
- },
1957
- {
1958
- "type": "ref_text",
1959
- "bbox": [
1960
- 0.088,
1961
- 0.285,
1962
- 0.476,
1963
- 0.375
1964
- ],
1965
- "angle": 0,
1966
- "content": "Nishimura, M., Hashimoto, K., Oura, K., Nankaku, Y., and Tokuda, K. Singing voice synthesis based on deep neural networks. In Interspeech 2016, pp. 2478-2482, 2016. doi: 10.21437/Interspeech.2016-1027. URL http://dx.doi.org/10.21437/Interspeech.2016-1027."
1967
- },
1968
- {
1969
- "type": "ref_text",
1970
- "bbox": [
1971
- 0.088,
1972
- 0.386,
1973
- 0.476,
1974
- 0.432
1975
- ],
1976
- "angle": 0,
1977
- "content": "Parmar, N., Vaswani, A., Uszkoreit, J., Kaiser, L., Shazeer, N., Ku, A., and Tran, D. Image transformer. arXiv preprint arXiv:1802.05751, 2018."
1978
- },
1979
- {
1980
- "type": "ref_text",
1981
- "bbox": [
1982
- 0.088,
1983
- 0.442,
1984
- 0.476,
1985
- 0.502
1986
- ],
1987
- "angle": 0,
1988
- "content": "Ping, W., Peng, K., Gibiansky, A., Arik, S. O., Kannan, A., Narang, S., Raiman, J., and Miller, J. Deep voice 3: 2000-speaker neural text-to-speech. arXiv preprint arXiv:1710.07654, 2017."
1989
- },
1990
- {
1991
- "type": "ref_text",
1992
- "bbox": [
1993
- 0.088,
1994
- 0.514,
1995
- 0.476,
1996
- 0.589
1997
- ],
1998
- "angle": 0,
1999
- "content": "Prenger, R., Valle, R., and Catanzaro, B. Waveglow: A flow-based generative network for speech synthesis. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3617-3621. IEEE, 2019."
2000
- },
2001
- {
2002
- "type": "ref_text",
2003
- "bbox": [
2004
- 0.088,
2005
- 0.601,
2006
- 0.476,
2007
- 0.66
2008
- ],
2009
- "angle": 0,
2010
- "content": "Radford, A., Metz, L., and Chintala, S. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015."
2011
- },
2012
- {
2013
- "type": "ref_text",
2014
- "bbox": [
2015
- 0.088,
2016
- 0.672,
2017
- 0.476,
2018
- 0.747
2019
- ],
2020
- "angle": 0,
2021
- "content": "Shen, J., Pang, R., Weiss, R. J., Schuster, M., Jaitly, N., Yang, Z., Chen, Z., Zhang, Y., Wang, Y., Skerry-Ryan, R., et al. Natural tts synthesis by conditioning wavenet on mel spectrogram predictions. arXiv preprint arXiv:1712.05884, 2017."
2022
- },
2023
- {
2024
- "type": "ref_text",
2025
- "bbox": [
2026
- 0.088,
2027
- 0.759,
2028
- 0.476,
2029
- 0.834
2030
- ],
2031
- "angle": 0,
2032
- "content": "Skerry-Ryan, R., Battenberg, E., Xiao, Y., Wang, Y., Stanton, D., Shor, J., Weiss, R. J., Clark, R., and Saurous, R. A. Towards end-to-end prosody transfer for expressive speech synthesis with tacotron. arXiv preprint arXiv:1803.09047, 2018."
2033
- },
2034
- {
2035
- "type": "ref_text",
2036
- "bbox": [
2037
- 0.088,
2038
- 0.845,
2039
- 0.476,
2040
- 0.905
2041
- ],
2042
- "angle": 0,
2043
- "content": "Umeda, N., Matsui, E., Suzuki, T., and Omura, H. Synthesis of fairy tales using an analog vocal tract. In Proceedings of 6th International Congress on Acoustics, pp. B159-162, 1968."
2044
- },
2045
- {
2046
- "type": "list",
2047
- "bbox": [
2048
- 0.088,
2049
- 0.085,
2050
- 0.476,
2051
- 0.905
2052
- ],
2053
- "angle": 0,
2054
- "content": null
2055
- },
2056
- {
2057
- "type": "ref_text",
2058
- "bbox": [
2059
- 0.5,
2060
- 0.085,
2061
- 0.885,
2062
- 0.13
2063
- ],
2064
- "angle": 0,
2065
- "content": "Valle, R., Li, J., Prenger, R., and Catanzaro, B. Mellotron github repo, 2019a. URL https://github.com/NVIDIA/mellotron."
2066
- },
2067
- {
2068
- "type": "ref_text",
2069
- "bbox": [
2070
- 0.5,
2071
- 0.141,
2072
- 0.887,
2073
- 0.2
2074
- ],
2075
- "angle": 0,
2076
- "content": "Valle, R., Li, J., Prenger, R., and Catanzaro, B. Mellotron: Multispeaker expressive voice synthesis by conditioning on rhythm, pitch and global style tokens. arXiv preprint arXiv:1910.11997, 2019b."
2077
- },
2078
- {
2079
- "type": "ref_text",
2080
- "bbox": [
2081
- 0.5,
2082
- 0.211,
2083
- 0.887,
2084
- 0.27
2085
- ],
2086
- "angle": 0,
2087
- "content": "Vinyals, O., Kaiser, L., Koo, T., Petrov, S., Sutskever, I., and Hinton, G. Grammar as a foreign language. In Advances in neural information processing systems, pp. 2773-2781, 2015."
2088
- },
2089
- {
2090
- "type": "ref_text",
2091
- "bbox": [
2092
- 0.5,
2093
- 0.282,
2094
- 0.887,
2095
- 0.342
2096
- ],
2097
- "angle": 0,
2098
- "content": "Wang, Y., Skerry-Ryan, R., Stanton, D., Wu, Y., Weiss, R. J., Jaitly, N., Yang, Z., Xiao, Y., Chen, Z., Bengio, S., et al. Tacotron: A fully end-to-end text-to-speech synthesis model. arXiv preprint arXiv:1703.10135, 2017."
2099
- },
2100
- {
2101
- "type": "ref_text",
2102
- "bbox": [
2103
- 0.5,
2104
- 0.352,
2105
- 0.887,
2106
- 0.428
2107
- ],
2108
- "angle": 0,
2109
- "content": "Wang, Y., Stanton, D., Zhang, Y., Skerry-Ryan, R., Battenberg, E., Shor, J., Xiao, Y., Ren, F., Jia, Y., and Saurous, R. A. Style tokens: Unsupervised style modeling, control and transfer in end-to-end speech synthesis. arXiv preprint arXiv:1803.09017, 2018."
2110
- },
2111
- {
2112
- "type": "ref_text",
2113
- "bbox": [
2114
- 0.5,
2115
- 0.438,
2116
- 0.887,
2117
- 0.468
2118
- ],
2119
- "angle": 0,
2120
- "content": "Weide, R. L. The cmu pronouncing dictionary. URL: http://wwwspeech.cs.cmu.edu/cgi-bin/cmudict, 1998."
2121
- },
2122
- {
2123
- "type": "ref_text",
2124
- "bbox": [
2125
- 0.5,
2126
- 0.478,
2127
- 0.887,
2128
- 0.537
2129
- ],
2130
- "angle": 0,
2131
- "content": "Zen, H., Dang, V., Clark, R., Zhang, Y., Weiss, R. J., Jia, Y., Chen, Z., and Wu, Y. Libritts: A corpus derived from librispeech for text-to-speech. arXiv preprint arXiv:1904.02882, 2019."
2132
- },
2133
- {
2134
- "type": "list",
2135
- "bbox": [
2136
- 0.5,
2137
- 0.085,
2138
- 0.887,
2139
- 0.537
2140
- ],
2141
- "angle": 0,
2142
- "content": null
2143
- }
2144
- ]
2145
- ]
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9edd700799e72190df95b9146103a096b8d5959bffcce7c7dcb999e3ba8b6bfd
3
+ size 87047
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_05xxx/2005.05957/full.md CHANGED
@@ -1,322 +1,3 @@
1
- # Flowtron: an Autoregressive Flow-based Generative Network for Text-to-Speech Synthesis
2
-
3
- Rafael Valle<sup>1</sup> Kevin Shih<sup>1</sup> Ryan Prenger<sup>1</sup> Bryan Catanzaro<sup>1</sup>
4
-
5
- # Abstract
6
-
7
- In this paper we propose Flowtron: an autoregressive flow-based generative network for text-to-speech synthesis with control over speech variation and style transfer. Flowtron borrows insights from IAF and revamps Tacotron in order to provide high-quality and expressive melspectrogram synthesis. Flowtron is optimized by maximizing the likelihood of the training data, which makes training simple and stable. Flowtron learns an invertible mapping of data to a latent space that can be manipulated to control many aspects of speech synthesis (pitch, tone, speech rate, cadence, accent). Our mean opinion scores (MOS) show that Flowtron matches state-of-the-art TTS models in terms of speech quality. In addition, we provide results on control of speech variation, interpolation between samples and style transfer between speakers seen and unseen during training. Code and pretrained models will be made publicly available at https://github.com/NVIDIA/flowtron.
8
-
9
- # 1. Introduction
10
-
11
- Current speech synthesis methods do not give the user enough control over how speech actually sounds. Automatically converting text to audio that successfully communicates the text was achieved a long time ago (Umeda et al., 1968; Badham et al., 1983). However, communicating only the text information leaves out all of the acoustic properties of the voice that convey much of the meaning and human expressiveness. Nearly all the research into speech synthesis since the 1960s has focused on adding that non-textual information to synthesized speech. But in spite of this, the typical speech synthesis problem is formulated as a text to speech problem in which the user inputs only text.
12
-
13
- Taming the non-textual information in speech is difficult
14
-
15
- because the non-textual is unlabeled. A voice actor may speak the same text with different emphasis or emotion based on context, but it is unclear how to label a particular reading. Without labels for the non-textual information, models have fallen back to unsupervised learning. Recent models have achieved nearly human-level quality, despite treating the non-textual information as a black box. The model's only goal is to match the patterns in the training data (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017). Despite these models' excellent ability to recreate the non-textual information in the training set, the user has no insight into or control over the non-textual information.
16
-
17
- It is possible to formulate an unsupervised learning problem in such a way that the user can gain insights into the structure of a data set. One way is to formulate the problem such that the data is assumed to have a representation in some latent space, and have the model learn that representation. This latent space can then be investigated and manipulated to give the user more control over the generative model's output. Such approaches have been popular in image generation for some time now, allowing users to interpolate smoothly between images and to identify portions of the latent space that correlate with various features (Radford et al., 2015; Kingma & Dhariwal, 2018).
18
-
19
- In audio, however, approaches have focused on embeddings that remove a large amount of information and are obtained from assumptions about what is interesting. Recent approaches that utilize deep learning for expressive speech synthesis combine text and a learned latent embedding for prosody or global style (Wang et al., 2018; Skerry-Ryan et al., 2018). A variation of this approach is proposed by (Hsu et al., 2018), wherein a Gaussian mixture model (GMM) encoding the audio is added to Tacotron to learn a latent embedding. These approaches control the nontextual information by learning a bank of embeddings or by providing the target output as an input to the model and compressing it. However, these approaches require making assumptions about the dimensionality of the embeddings before hand and are not guaranteed to contain all the nontextual information it takes to reconstruct speech, including the risk of having dummy dimensions or not enough capacity, as the appendix sections in (Wang et al., 2018;
20
-
21
- Skerry-Ryan et al., 2018; Hsu et al., 2018) confirm. They also require finding an encoder and embedding that prevents the model from simply learning a complex identity function that ignores other inputs. Furthermore, these approaches focus on fixed-length embeddings under the assumption that variable-length embeddings are not robust to text and speaker perturbations. Finally, most of these approaches do not give the user control over the degree of variability in the synthesized speech.
22
-
23
- In this paper we propose Flowtron: an autoregressive flow-based generative network for mel-spectrogram synthesis with control over acoustics and speech. Flowtron learns an invertible function that maps a distribution over mel-spectrograms to a latent $z$ space parameterized by a spherical Gaussian. With this formalization, we can generate samples containing specific speech characteristics manifested in mel-space by finding and sampling the corresponding region in $z$ -space. In the basic approach, we generate samples by sampling a zero mean spherical Gaussian prior and control the amount of variation by adjusting its variance. Despite its simplicity, this approach offers more speech variation and control than Tacotron.
24
-
25
- In Flowtron, we can access specific regions of mel-spectrogram space by sampling a posterior distribution conditioned on prior evidence from existing samples (Kingma & Dhariwal, 2018; Gambardella et al., 2019). This approach allows us to make a monotonous speaker more expressive by computing the region in $z$ -space associated with expressive speech as it is manifested in the prior evidence. Finally, our formulation also allows us to impose a structure to the $z$ -space and parametrize it with a Gaussian mixture, for example. In this approach related to (Hsu et al., 2018), speech characteristics in mel-spectrogram space can be associated with individual components. Hence, it is possible to generate samples with specific speech characteristics by selecting a component or a mixture thereof<sup>1</sup>.
26
-
27
- Although VAEs and GANs (Hsu et al., 2018; Binkowski et al., 2019; Akuzawa et al., 2018) based models also provide a latent prior that can be easily manipulated, in Flowtron this comes at no cost in speech quality nor optimization challenges.
28
-
29
- We find that Flowtron is able to generalize and produce sharp mel-spectrograms by simply maximizing the likelihood of the data while not requiring any additional Prenet or Postnet layer (Wang et al., 2017), nor compound loss functions required by most state of the art models like (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017; Skerry-Ryan et al., 2018; Wang et al., 2018; Binkowski et al., 2019).
30
-
31
- Flowtron is optimized by maximizing the likelihood of the training data, which makes training simple and stable. It
32
-
33
- learns an invertible mapping of the a latent space that can be manipulated to control many aspects of speech synthesis. Our mean opinion scores (MOS) show that Flowtron matches state-of-the-art TTS models in terms of speech quality. In addition, we provide results on control of speech variation, interpolation between samples, and style transfer between seen and unseen speakers with similar and different sentences. To our knowledge, this work is the first to show evidence that normalizing flow models can also be used for text-to-speech synthesis. We hope this will further stimulate developments in normalizing flows.
34
-
35
- # 2. Related Work
36
-
37
- Earlier approaches to text-to-speech synthesis that achieve human like results focus on synthesizing acoustic features from text, treating the non-textual information as a black box. (Shen et al., 2017; Arik et al., 2017b;a; Ping et al., 2017). Approaches like (Wang et al., 2017; Shen et al., 2017) require adding a critical Prenet layer to help with convergence and improve generalization (Wang et al., 2017) Furthermore, such models require an additional Postnet residual layer and modified loss to produce "better resolved harmonics and high frequency formant structures, which reduces synthesis artifacts."
38
-
39
- One approach to dealing with this lack of labels for underlying non-textual information is to look for hand engineered statistics based on the audio that we believe are correlated with this underlying information.
40
-
41
- This is the approach taken by models like (Nishimura et al., 2016; Lee et al., 2019), wherein utterances are conditioned on audio statistics that can be calculated directly from the training data such as $F_{0}$ (fundamental frequency). However, in order to use such models, the statistics we hope to approximate must be decided upon a-priori, and the target value of these statistics must be determined before synthesis.
42
-
43
- Another approach to dealing with the issue of unlabeled non-textual information is to learn a latent embedding for prosody or global style. This is the approach taken by models like (Skerry-Ryan et al., 2018; Wang et al., 2018), wherein in a bank of embeddings or a latent embedding space of prosody is learned from unlabelled data. While these approaches have shown promise, manipulating such latent variables only offers a coarse control over expressive characteristics of speech.
44
-
45
- A mixed approach consists of combining engineered statistics with latent embeddings learned in an unsupervised fashion. This is the approach taken by models like Mellotron (Valle et al., 2019b). In Mellotron, utterances are conditioned on both audio statistics and a latent embedding of acoustic features derived from a reference acoustic representation. Despite its advantages, this approach still requires
46
-
47
- determining these statistics before synthesis.
48
-
49
- # 3. Flowtron
50
-
51
- Flowtron is an autoregressive generative model that generates a sequence of mel spectrogram frames $p(x)$ by producing each mel-spectrogram frame based on previous mel-spectrogram frames $p(x) = \prod p(x_{t}|x_{1:t - 1})$ . Our setup uses a neural network as a generative model by sampling from a simple distribution $p(z)$ . We consider two simple distributions with the same number of dimensions as our desired mel-spectrogram: a zero-mean spherical Gaussian and a mixture of spherical Gaussians with fixed or learnable parameters.
52
-
53
- $$
54
- \boldsymbol {z} \sim \mathcal {N} (\boldsymbol {z}; 0, \boldsymbol {I}) \tag {1}
55
- $$
56
-
57
- $$
58
- \boldsymbol {z} \sim \sum_ {k} \hat {\phi} _ {k} \mathcal {N} (\boldsymbol {z}; \boldsymbol {\mu} _ {k}, \boldsymbol {\Sigma} _ {k}) \tag {2}
59
- $$
60
-
61
- These samples are put through a series of invertible, parametrized transformations $\pmb{f}$ , in our case affine transformations that transform $p(\pmb{z})$ into $p(x)$ .
62
-
63
- $$
64
- \boldsymbol {x} = \boldsymbol {f} _ {0} \circ \boldsymbol {f} _ {1} \circ \dots \boldsymbol {f} _ {k} (z) \tag {3}
65
- $$
66
-
67
- As it is illustrated in (Kingma et al., 2016), in autoregressive normalizing flows the $t$ -th variable $\boldsymbol{z}_t^\prime$ only depends on previous timesteps $\boldsymbol{z}_{1:t - 1}$ :
68
-
69
- $$
70
- \boldsymbol {z} _ {t} ^ {\prime} = \boldsymbol {f} _ {k} \left(\boldsymbol {z} _ {1: t - 1}\right) \tag {4}
71
- $$
72
-
73
- By using parametrized affine transformations for $f$ and due to the autoregressive structure, the Jacobian determinant of each of the transformations $f$ is lower triangular, hence easy to compute. With this setup we can train Flowtron by maximizing the log-likelihood of the data, which can be done using the change of variables:
74
-
75
- $$
76
- \log p _ {\theta} (\boldsymbol {x}) = \log p _ {\theta} (\boldsymbol {z}) + \sum_ {i = 1} ^ {k} \log | \det (\boldsymbol {J} (\boldsymbol {f} _ {i} ^ {- 1} (\boldsymbol {x}))) | \tag {5}
77
- $$
78
-
79
- $$
80
- \boldsymbol {z} = \boldsymbol {f} _ {k} ^ {- 1} \circ \boldsymbol {f} _ {k - 1} ^ {- 1} \circ \dots \boldsymbol {f} _ {0} ^ {- 1} (\boldsymbol {x}) \tag {6}
81
- $$
82
-
83
- For the forward pass through the network, we take the melspectrograms as vectors and process them through several "steps of flow conditioned on the text and speaker ids. A step of flow here consists of an affine coupling layer, described below.
84
-
85
- # 3.1. Affine Coupling Layer
86
-
87
- Invertible neural networks are typically constructed using coupling layers (Dinh et al., 2014; 2016; Kingma &
88
-
89
- Dhariwal, 2018). In our case, we use an affine coupling layer (Dinh et al., 2016). Every input $\boldsymbol{x}_{t-1}$ produces scale and bias terms, $s$ and $b$ respectively, that affine-transform the succeeding input $\boldsymbol{x}_t$ :
90
-
91
- $$
92
- \left(\log \boldsymbol {s} _ {t}, \boldsymbol {b} _ {t}\right) = N N \left(\boldsymbol {x} _ {1: t - 1}, \text {t e x t}, \text {s p e a k e r}\right) \tag {7}
93
- $$
94
-
95
- $$
96
- \boldsymbol {x} _ {t} ^ {\prime} = \boldsymbol {s} _ {t} \odot \boldsymbol {x} _ {t} + \boldsymbol {b} _ {t} \tag {8}
97
- $$
98
-
99
- Here $NN()$ can be any autoregressive causal transformation. This can be achieved by time-wise concatenation of a 0-valued vector to the input provided to $NN()$ . The affine coupling layer preserves invertibility for the overall network, even though $NN()$ does not need to be invertible. This follows because the first input of $NN()$ is a constant and due to the autoregressive nature of the model the scaling and translation terms $s_t$ and $b_t$ only depend on $x_{1:t-1}$ and the fixed text and speaker vectors. Accordingly, when inverting the network, we can compute $s_t$ and $b_t$ from the preceding input $x_{1:t-1}$ , and then invert $x_t'$ to compute $x_t$ , by simply recomputing $NN(x_{1:t-1},text,Speaker)$ .
100
-
101
- With an affine coupling layer, only the $s_t$ term changes the volume of the mapping and adds a change of variables term to the loss. This term also serves to penalize the model for non-invertible affine mappings.
102
-
103
- $$
104
- \log | \det (\boldsymbol {J} (\boldsymbol {f} _ {\text {c o u p l i n g}} ^ {- 1} (\boldsymbol {x}))) | = \log | \boldsymbol {s} | \tag {9}
105
- $$
106
-
107
- With this setup, it is also possible to revert the ordering of the input $x$ without loss of generality. Hence, we choose to revert the order of the input at every even step of flow and to maintain the original order on odd steps of flow. This allows the model to learn dependencies both forward and backwards in time while remaining causal and invertible.
108
-
109
- # 3.2. Model architecture
110
-
111
- Our text encoder modifies Tacotron's by replacing batchnorm with instance-norm. Our decoder and $NN$ architecture, depicted in Figure 1, removes the essential Prenet and Postnet layers from Tacotron. We use the content-based tanh attention described in (Vinyals et al., 2015). We use the Mel Encoder described in (Hsu et al., 2018) for Flowtron models that predict the parameters of the Gaussian mixture.
112
-
113
- Unlike (Ping et al., 2017; Gibiansky et al., 2017), where site specific speaker embeddings are used, we use a single speaker embedding that is channel-wise concatenated with the encoder outputs at every token. We use a fixed dummy speaker embedding for models not conditioned on speaker id. Finally, we add a dense layer with a sigmoid output the flow step closest to $z$ . This provides the model with a gating mechanism as early as possible during inference to avoid extra computation.
114
-
115
- ![](images/829dc9169a7f7f045a4d5475447794c6bfc08282e8364637c340292bd9ae2c77.jpg)
116
- Figure 1: Flowtron network. Text and speaker embeddings are channel-wise concatenated. A 0-valued vector is concatenated with $x$ in the time dimension.
117
-
118
- # 3.3. Inference
119
-
120
- Once the network is trained, doing inference is simply a matter of randomly sampling $z$ values from a spherical Gaussian, or Gaussian Mixture, and running them through the network, reverting the order of the input when necessary. During training we used $\sigma^2 = 1$ . The parameters of the Gaussian mixture are either fixed or predicted by Flowtron. In section 4.3 we explore the effects of different values for $\sigma^2$ . In general, we found that sampling $z$ values from a Gaussian with a lower standard deviation from that assumed during training resulted in mel-spectrograms that sounded better, as found in (Kingma & Dhariwal, 2018), and earlier work on likelihood-based generative models (Parmar et al., 2018). During inference we sampled $z$ values from a Gaussian with $\sigma^2 = 0.5$ , unless otherwise specified. The text and speaker embeddings are included at each of the coupling layers as before, but now the affine transforms are inverted in time, and these inverses are also guaranteed by the loss.
121
-
122
- # 4. Experiments
123
-
124
- This section describes our training setup and provides quantitative and qualitative results. Our quantitative results show that Flowtron has mean opinion scores (MOS) that are comparable to that of state of the art models for text to mel-spectrogram synthesis such as Tacotron 2. Our qualitative results display many features that are not possible or not efficient with Tacotron and Tacotron 2 GST. These features include control of the amount of variation in speech, interpolation between samples and style transfer between seen and unseen speakers during training.
125
-
126
- We decode all mel-spectrograms into waveforms by using a single pre-trained WaveGlow (Prenger et al., 2019) model trained on a single speaker and available on github (Valle et al., 2019a). During inference we used $\sigma^2 = 0.7$ . In consonance with (Valle et al., 2019b), our results suggest that WaveGlow can be used as an universal decoder.
127
-
128
- Although we provide images to illustrate our results, they can best be appreciated by listening. Hence, we ask the readers to visit our website ${}^{2}$ to listen to Flowtron samples.
129
-
130
- # 4.1. Training setup
131
-
132
- We train our Flowtron, Tacotron 2 and Tacotron 2 GST models using a dataset that combines the LJSpeech (LJS) dataset (Ito et al., 2017) with two proprietary single speaker datasets with 20 and 10 hours each (Sally and Helen). We will refer to this combined dataset as LSH. We also train a Flowtron model on the train-clean-100 subset of LibriTTS (Zen et al., 2019) with 123 speakers and 25 minutes on average per speaker. Speakers with less than 5 minutes of data and files that are larger than 10 seconds are filtered out. For each dataset we use at least 180 randomly chosen samples for the validation set and the remainder for the training set.
133
-
134
- The models are trained on uniformly sampled normalized text and ARPAbet encodings obtained from the CMU Pronouncing Dictionary (Weide, 1998). We do not perform any data augmentation. We adapt the public Tacotron 2 and Tacotron 2 GST repos to include speaker embeddings as described in Section 3.
135
-
136
- We use a sampling rate of $22050\mathrm{Hz}$ and mel-spectrograms with 80 bins using librosa mel filter defaults. We apply the STFT with a FFT size of 1024, window size of 1024 samples and hop size of 256 samples $(\sim 12ms)$ .
137
-
138
- We use the ADAM (Kingma & Ba, 2014) optimizer with default parameters, 1e-4 learning rate and 1e-6 weight decay for Flowtron and 1e-3 learning rate and 1e-5 weight decay for the other models, following guidelines in (Wang et al., 2017). We anneal the learning rate once the generalization error starts to plateau and stop training once the the generalization error stops significantly decreasing or starts increasing. The Flowtron models with 2 steps of flow were trained on the LSH dataset for approximately 1000 epochs and then fine-tuned on LibriTTS for 500 epochs. Tacotron 2 and Tacotron 2 GST are trained for approximately 500 epochs. Each model is trained on a single NVIDIA DGX-1 with 8 GPUs.
139
-
140
- We find it faster to first learn to attend on a Flowtron model with a single step of flow and large amounts of data than multiple steps of flow and less data. After the model has learned to attend, we transfer its parameters to models with more steps of flow and speakers with less data. Thus, we first train Flowtron model with a single step of flow on the LSH dataset with many hours per speaker. Then we fine tune this model to Flowtron models with more steps of flow. Finally, these models are fine tuned on LibriTTS with an optional new speaker embedding.
141
-
142
- # 4.2. Mean Opinion Score comparison
143
-
144
- We provide results that compare mean opinion scores (MOS) from real data from the LJS dataset, samples from a Flowtron with 2 steps of flow and samples from our implementation of Tacotron 2, both trained on LSH. Although the models evaluated are multi-speaker, we only compute mean opinion scores on LJS. In addition, we use the mean opinion scores provided in (Prenger et al., 2019) for ground truth data from the LJS dataset.
145
-
146
- We crowd-sourced mean opinion score (MOS) tests on Amazon Mechanical Turk. Raters first had to pass a hearing test to be eligible. Then they listened to an utterance, after which they rated pleasantness on a five-point scale. We used 30 volume normalized utterances from all speakers disjoint from the training set for evaluation, and randomly chose the utterances for each subject.
147
-
148
- The mean opinion scores are shown in Table 1 with $95\%$ confidence intervals computed over approximately 250 scores per source. The results roughly match our subjective qualitative assessment. The larger advantage of Flowtron is in the control over the amount of speech variation and the manipulation of the latent space.
149
-
150
- <table><tr><td>Source</td><td>Flows</td><td>Mean Opinion Score (MOS)</td></tr><tr><td>Real</td><td>-</td><td>4.274 ± 0.1340</td></tr><tr><td>Flowtron</td><td>3</td><td>3.665 ± 0.1634</td></tr><tr><td>Tacotron 2</td><td>-</td><td>3.521 ± 0.1721</td></tr></table>
151
-
152
- Table 1: Mean Opinion Score (MOS) evaluations with $95\%$ confidence intervals for various sources.
153
-
154
- # 4.3. Sampling the prior
155
-
156
- The simplest approach to generate samples with Flowtron is to sample from a prior distribution $z \sim \mathcal{N}(0, \sigma^2)$ and adjust $\sigma^2$ to control amount of variation. Whereas $\sigma^2 = 0$ completely removes variation and produces outputs based on the model bias, increasing the value of $\sigma^2$ will increase the amount of variation in speech.
157
-
158
- # 4.3.1. SPEECH VARIATION
159
-
160
- To showcase the amount of variation and control thereof in Flowtron, we synthesize 10 mel-spectrograms and sample the Gaussian prior with $\sigma^2 \in \{0.0, 0.5, 1.0\}$ . All samples are generated conditioned on a fixed speaker Sally and text "How much variation is there?" to illustrate the relationship between $\sigma^2$ and variability.
161
-
162
- Our results show that despite all the variability added by increasing $\sigma^2$ , all the samples synthesized with Flowtron still produce high quality speech.
163
-
164
- Figure 2 also shows that unlike most SOTA models (Shen
165
-
166
- et al., 2017; Arik et al., 2017b;a; Ping et al., 2017; Skerry-Ryan et al., 2018; Wang et al., 2018; Binkowski et al., 2019), Flowtron generates sharp harmonics and well resolved formants without a compound loss nor Prenet or Postnet layers.
167
-
168
- ![](images/f531ff4688197dedc3f519712b69c73640420e32e215aa3fa17b6321580ddb03.jpg)
169
- (a) $\sigma^2 = 0$
170
-
171
- ![](images/c824fec24fc000d559eeb6f5034a1b6de248c08750f99912a3663c785251a8cd.jpg)
172
- (b) $\sigma^2 = 0.5$
173
-
174
- ![](images/687bb83515ae804a699ca82ebfa8cbad946bacbf8437f26a739c6c524fb58773.jpg)
175
- (c) $\sigma^2 = 1$
176
- Figure 2: Mel-spectrograms generated with Flowtron using different $\sigma^2$ . This parameter can be adjusted to control mel-spectrogram variability during inference.
177
-
178
- Now we show that adjusting $\sigma^2$ is a simple and valuable approach that provides more variation and control than Tacotron, without sacrificing speech quality. For this, we synthesize 10 samples with Tacotron 2 using different values for the Prenet dropout probability $p\in \{0.45,0.5,0.55\}$ . We scale the outputs of the dropout output such that the mean of the output remains equal to the mean with $p = 0.5$ ,
179
-
180
- the value used during training. Although we also provide samples computed on values of $p \in [0,1]$ in our supplemental material, we do not include them in our results because they are unintelligible.
181
-
182
- In Figure 3 below we provide scatter plots from sample duration in seconds. Our results show that whereas $\sigma^2 = 0$ produces samples with no variation in duration, larger values of $\sigma^2$ produces samples with more variation in duration. Humans manipulate word and sentence length to express themselves, hence this is valuable.
183
-
184
- ![](images/2dc6d966f1aa35365fc598f41080e3eb94de0570fd245b4c3708e026191c12c4.jpg)
185
- Figure 3: Sample duration in seconds given parameters $\sigma^2$ and $p$ . These results show that Flowtron provides more variation in sample duration than Tacotron 2.
186
-
187
- In Figure 4 we provide scatter plots of $F_{0}$ contours extracted with the YIN algorithm (De Cheveigné & Kawahara, 2002), with minimum $F_{0}$ , maximum $F_{0}$ and harmonicity threshold equal to $80\mathrm{Hz}$ , $400\mathrm{Hz}$ and 0.3 respectively. Our results show a behavior similar to the previous sample duration analysis. As expected, $\sigma^{2} = 0$ provides no variation in $F_{0}$ contour<sup>3</sup>, while increasing the value of $\sigma^{2}$ will increase the amount of variation in $F_{0}$ contours.
188
-
189
- Our results in Figure 4 also show that the samples produced with Flowtron are considerably less monotonous than the samples produced with Tacotron 2. Whereas increasing $\sigma^2$ considerably increases variation in $F_0$ , modifying $p$ barely produces any variation. This is valuable because expressive speech is associated with non-monotonic $F_0$ contours.
190
-
191
- # 4.3.2. INTERPOLATION BETWEEN SAMPLES
192
-
193
- With Flowtron we can perform interpolation in $z$ -space to achieve interpolation in mel-spectrogram space. For this experiment we evaluate Flowtron models with and without speaker embeddings. For the experiment with speaker embeddings we choose the Sally speaker and the phrase "It is well known that deep generative models have a rich latent space". We generate mel-spectrograms by sampling $z \sim \mathcal{N}(0, 0.8)$ twice and interpolating between them over 100 steps.
194
-
195
- ![](images/74b6f6d178ebe9e6a7f06a082d3ae60945991a1c3dd318cbb8938c05c77fbc18.jpg)
196
- (a) Flowtron $\sigma^2 = 0$
197
-
198
- ![](images/4cc52763d45a27a041fed184dce63e422f3e853380bbd5b7d7d37b4adf7b7739.jpg)
199
- (b) Flowtron $\sigma^2 = 0.5$
200
-
201
- ![](images/9e21bd90e7eb1bf44033713012683b9ff5c70e0e6b6f81daccf04388c2a0d555.jpg)
202
- (c) Flowtron $\sigma^2 = 1$
203
-
204
- ![](images/902ebaafea4e8691cb8f9576c354a86d410508db35a2b7f090901babc1fa1d0a.jpg)
205
- (d) Tacotron $2p\in \{0.45,0.5,0.55\}$
206
- Figure 4: $F_{0}$ contours obtained from samples generated by Flowtron and Tacotron 2 with different values for $\sigma^{2}$ and $p$ . Flowtron provides more expressivity than Tacotron 2.
207
-
208
- For the experiment without speaker embeddings we interpolate between Sally and Helen using the phrase "We are testing this model". First, we perform inference by sampling $z \sim \mathcal{N}(0, 0.5)$ until we find two $z$ values, $z_h$ and $z_s$ , that produce mel-spectrograms with Helen's and Sally's voice respectively. We then generate samples by performing inference while linearly interpolating between $z_h$ and $z_s$ .
209
-
210
- Our same speaker interpolation samples show that Flowtron is able to interpolate between multiple samples while producing correct alignment maps. In addition, our different speaker interpolation samples show that Flowtron is able to blurry the boundaries between two speakers, creating a speaker that combines the characteristics of both.
211
-
212
- # 4.4. Sampling the posterior
213
-
214
- In this approach we generate samples with Flowtron by sampling a posterior distribution conditioned on prior evidence containing speech characteristics of interest, as described in (Gambardella et al., 2019; Kingma & Dhariwal, 2018). In this experiment, we collect prior evidence $z_{e}$ by performing a forward pass with the speaker id to be used during
215
-
216
- inference $^4$ , observed mel-spectrogram and text from a set of samples with characteristics of interest. If necessary, we time-concatenate each $z_{e}$ with itself to fulfill minimum length requirements defined according to the text length to be said during inference.
217
-
218
- Tacotron 2 GST (Wang et al., 2018) has an equivalent posterior sampling approach, in which during inference the model is conditioned on a weighted sum of global style tokens (posterior) queried through an embedding of existing audio samples (prior). For Tacotron 2 GST, we evaluate two approaches: in one we use a single sample to query a style token in the other we use an average style token computed over multiple samples.
219
-
220
- # 4.4.1. SEEN SPEAKER WITHOUT ALIGNMENTS
221
-
222
- In this experiment we compare Sally samples from Flowtron and Tacotron 2 GST generated by conditioning on the posterior computed over 30 Helen samples with the highest variance in fundamental frequency. The goal is to make a monotonic speaker sound expressive. Our experiments show that by sampling from the posterior or interpolating between the posterior and a standard Gaussian prior, Flowtron is able to make a monotonic speaker gradually sound more expressive. On the other hand, Tacotron 2 GST is barely able to alter characteristics of the monotonic speaker.
223
-
224
- # 4.4.2. SEEN SPEAKER WITH ALIGNMENTS
225
-
226
- We use a Flowtron model with speaker embeddings to illustrate Flowtron's ability to learn and transfer acoustic characteristics that are hard to express algorithmically but easy to perceive acoustically, we select a female speaker from LibriTTS with a distinguished nasal voice and oscillation in $F_{0}$ as our source speaker and transfer her style to a male speaker, also from LibriTTS, with acoustic characteristics that sound different from the female speaker. Unlike the previous experiment, this time the text and the alignment maps are transferred from the female to the male speaker.
227
-
228
- Figure 5 is an attempt to visualize the transfer of these acoustic qualities we described. It shows that after the transfer, the lower partials of the male speaker oscillate more and become more similar to the female speaker.
229
-
230
- # 4.4.3. UNSEEN SPEAKER STYLE
231
-
232
- We compare samples generated with Flowtron and Tacotron 2 GST with speaker embeddings in which we modify a speaker's style by using data from the same speaker but from a style not seen during training. Whereas Sally's data used during training consists of news article readings, the evaluation samples contain Sally's interpretation of the somber and vampiresque novel Born of Darkness.
233
-
234
- ![](images/feec29abeea58716be55d2721b04e418886a161f073d96450d46164a62d783f9.jpg)
235
- (a) Female
236
-
237
- ![](images/7d7ac4054e134c7de78153892277aed2809297beababa032997272ff97d8540a.jpg)
238
- (b) Transfer
239
-
240
- ![](images/c76aca10d669e5ca92748ed0f83584de62c03c1b11a9e5405b933e737cbe7643.jpg)
241
- (c) Male
242
- Figure 5: Mel-spectrograms from a female speaker, male speaker and a sample where we transfer the acoustic characteristics from the female speaker to the male speaker. It shows that the transferred sample is more similar to the female speaker than the male speaker.
243
-
244
- Our samples show that Tacotron 2 GST fails to emulate the somber style from Born of Darkness's data. We show that Flowtron succeeds in transferring not only to the somber style in the evaluation data, but also the long pauses associated with the narrative style.
245
-
246
- # 4.4.4. UNSEEN SPEAKER
247
-
248
- In this experiment we compare Flowtron and Tacotron 2 GST samples in which we transfer the speaking style of a speaker not seen during training. Both models use speaker embeddings.
249
-
250
- For these experiments, we consider two speakers. The first comes from speaker ID 03 from RAVDESS, a dataset with emotion labels. We focus on the label "surprised". The second speaker is Richard Feynman, using a set of 10 audio samples collected from the web.
251
-
252
- For each experiment, we use the Sally speaker and the sentences "Humans are walking on the street?" and "Surely you are joking mister Feynman," which do not exist in RAVDESS nor in the audio samples from Richard Feynman.
253
-
254
- The samples generated with Tacotron 2 GST are not able to emulate the surprised style from RAVDESS nor Feynman's prosody and acoustic characteristics. Flowtron, on the other hand, is able to make Sally sound surprised, which is drastically different from the monotonous baseline. Likewise, Flowtron is able to pick up on the prosody and articulation details particular to Feynman's speaking style, and transfer them to Sally.
255
-
256
- # 4.5. Sampling the Gaussian Mixture
257
-
258
- In this last section we showcase visualizations and samples from Flowtron Gaussian Mixture (GM). First we investigate how different mixture components and speakers are correlated. Then we provide sound examples in which we modulate speech characteristics by translating one of the the dimensions of an individual component.
259
-
260
- # 4.5.1. VISUALIZING ASSIGNMENTS
261
-
262
- For the first experiment, we train a Flowtrom Gaussian Mixture on LSH with 2 steps of flow, speaker embeddings and fixed mean and covariance (Flowtron GM-A). We obtain mixture component assignments per mel-spectrogram by performing a forward pass and averaging the component assignment over time and samples. Figure 6 shows that whereas most speakers are equally assigned to all components, component 7 is almost exclusively assigned to Helen's data.
263
-
264
- ![](images/95dc28151d1802e27b0464f8d320b4f457eeeaefe0f77711babf55383ef0537b.jpg)
265
- Figure 6: Component assignments for Flowtron GM-A. Unlike LJS and Sally, Helen is almost exclusively assigned to component 7.
266
-
267
- In the second experiment, we train a Flowtron Gaussian Mixture on LibriTTS with 1 step of flow, without speaker embeddings and predicted mean and covariance (Flowtron GM-B). Figure 7 shows that Flowtron GM assigns more probability to component 7 when the speaker is male than when it's female. Conversely, the model assigns more probability to component 6 when the speaker is female than when it's male.
268
-
269
- ![](images/3679a60d11546426354812b28643ceab8d77f27c5b05a6af502cd335db17ba09.jpg)
270
- Figure 7: Component assignments for Flowtron GM-B. Components 7 and 8 are assigned different probabilities according to gender, suggesting that the information stored in the components is gender dependent.
271
-
272
- # 4.5.2. TRANSLATING DIMENSIONS
273
-
274
- In this subsection, we use the model Flowtron GM-A described previously. We focus on selecting a single mixture component and translating one of its dimensions by adding an offset.
275
-
276
- The samples in our supplementary material show that we are able to modulate specific speech characteristics like pitch and word duration. Although the samples generated by translating one the dimensions associated with pitch height have different pitch contours, they have the same duration. Similarly, our samples show that translating the dimension associated with length of the first word does not modulate the pitch of the first word. This provides evidence that we can modulate these attributes by manipulating these dimensions and that the model is able to learn a disentangled representation of these speech attributes.
277
-
278
- # 5. Discussion
279
-
280
- In this paper we propose a new text to mel-spectrogram synthesis model based on autoregressive flows that is optimized by maximizing the likelihood and allows for control of speech variation and style transfer. Our results show that samples generated with FlowTron achieve mean opinion scores that are similar to samples generated with state-of-the-art text-to-speech synthesis models. In addition, we demonstrate that at no extra cost and without a compound loss term, our model learns a latent space that stores nontextual information. Our experiments show that FlowTron gives the user the possibility to transfer characteristics from a source sample or speaker to a target speaker, for example making a monotonic speaker sound more expressive.
281
-
282
- Our results show that despite all the variability added by increasing $\sigma^2$ , the samples synthesized with FlowTron still produce high quality speech. Our results show that FlowTron learns a latent space over non-textual features that can be investigated and manipulated to give the user more control over the generative models output. We provide many examples that showcase this including increasing variation in mel-spectrograms in a controllable manner, transferring
283
-
284
- the style from speakers seen and unseen during training to another speaker using sentences with similar or different text, and making a monotonic speaker sound more expressive.
285
-
286
- Flowtron produces expressive speech without labeled data or ever seeing expressive data. It pushes text-to-speech synthesis beyond the expressive limits of personal assistants. It opens new avenues for speech synthesis in human-computer interaction and the arts, where realism and expressivity are of utmost importance. To our knowledge, this work is the first to demonstrate the advantages of using normalizing flow models in text to mel-spectrogram synthesis.
287
-
288
- # References
289
-
290
- Akuzawa, K., Iwasawa, Y., and Matsuo, Y. Expressive speech synthesis via modeling expressions with variational autoencoder. arXiv preprint arXiv:1804.02135, 2018.
291
- Arik, S., Diamos, G., Gibiansky, A., Miller, J., Peng, K., Ping, W., Raiman, J., and Zhou, Y. Deep voice 2: Multi-speaker neural text-to-speech. arXiv preprint arXiv:1705.08947, 2017a.
292
- Arik, S. O., Chrzanowski, M., Coates, A., Diamos, G., Gibiansky, A., Kang, Y., Li, X., Miller, J., Ng, A., Raiman, J., et al. Deep voice: Real-time neural text-to-speech. arXiv preprint arXiv:1702.07825, 2017b.
293
- Badham, J., Lasker, L., Parkes, W. F., Rubinstein, A. B., Broderick, M., Coleman, D., and Wood, J. Wargames, 1983.
294
- Binkowski, M., Donahue, J., Dieleman, S., Clark, A., Elsen, E., Casagrande, N., Cobo, L. C., and Simonyan, K. High fidelity speech synthesis with adversarial networks. arXiv preprint arXiv:1909.11646, 2019.
295
- De Cheveigné, A. and Kawahara, H. Yin, a fundamental frequency estimator for speech and music. The Journal of the Acoustical Society of America, 111(4):1917-1930, 2002.
296
- Dinh, L., Krueger, D., and Bengio, Y. Nice: Non-linear independent components estimation. arXiv preprint arXiv:1410.8516, 2014.
297
- Dinh, L., Sohl-Dickstein, J., and Bengio, S. Density estimation using real nvp. arXiv preprint arXiv:1605.08803, 2016.
298
- Gambardella, A., Baydin, A. G., and Torr, P. H. Transflow learning: Repurposing flow models without retraining. arXiv preprint arXiv:1911.13270, 2019.
299
- Gibiansky, A., Arik, S., Diamos, G., Miller, J., Peng, K., Ping, W., Raiman, J., and Zhou, Y. Deep voice 2: Multi-speaker neural text-to-speech. In Advances in neural information processing systems, pp. 2962-2970, 2017.
300
- Hsu, W.-N., Zhang, Y., Weiss, R. J., Zen, H., Wu, Y., Wang, Y., Cao, Y., Jia, Y., Chen, Z., Shen, J., et al. Hierarchical generative modeling for controllable speech synthesis. arXiv preprint arXiv:1810.07217, 2018.
301
- Ito, K. et al. The LJ speech dataset, 2017.
302
- Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
303
-
304
- Kingma, D. P. and Dhariwal, P. Glow: Generative flow with invertible 1x1 convolutions. arXiv preprint arXiv:1807.03039, 2018.
305
- Kingma, D. P., Salimans, T., Jozefowicz, R., Chen, X., Sutskever, I., and Welling, M. Improved variational inference with inverse autoregressive flow. In Advances in Neural Information Processing Systems, pp. 4743-4751, 2016.
306
- Lee, J., Choi, H.-S., Jeon, C.-B., Koo, J., and Lee, K. Adversarially trained end-to-end korean singing voice synthesis system. arXiv preprint arXiv:1908.01919, 2019.
307
- Nishimura, M., Hashimoto, K., Oura, K., Nankaku, Y., and Tokuda, K. Singing voice synthesis based on deep neural networks. In Interspeech 2016, pp. 2478-2482, 2016. doi: 10.21437/Interspeech.2016-1027. URL http://dx.doi.org/10.21437/Interspeech.2016-1027.
308
- Parmar, N., Vaswani, A., Uszkoreit, J., Kaiser, L., Shazeer, N., Ku, A., and Tran, D. Image transformer. arXiv preprint arXiv:1802.05751, 2018.
309
- Ping, W., Peng, K., Gibiansky, A., Arik, S. O., Kannan, A., Narang, S., Raiman, J., and Miller, J. Deep voice 3: 2000-speaker neural text-to-speech. arXiv preprint arXiv:1710.07654, 2017.
310
- Prenger, R., Valle, R., and Catanzaro, B. Waveglow: A flow-based generative network for speech synthesis. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3617-3621. IEEE, 2019.
311
- Radford, A., Metz, L., and Chintala, S. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015.
312
- Shen, J., Pang, R., Weiss, R. J., Schuster, M., Jaitly, N., Yang, Z., Chen, Z., Zhang, Y., Wang, Y., Skerry-Ryan, R., et al. Natural tts synthesis by conditioning wavenet on mel spectrogram predictions. arXiv preprint arXiv:1712.05884, 2017.
313
- Skerry-Ryan, R., Battenberg, E., Xiao, Y., Wang, Y., Stanton, D., Shor, J., Weiss, R. J., Clark, R., and Saurous, R. A. Towards end-to-end prosody transfer for expressive speech synthesis with tacotron. arXiv preprint arXiv:1803.09047, 2018.
314
- Umeda, N., Matsui, E., Suzuki, T., and Omura, H. Synthesis of fairy tales using an analog vocal tract. In Proceedings of 6th International Congress on Acoustics, pp. B159-162, 1968.
315
-
316
- Valle, R., Li, J., Prenger, R., and Catanzaro, B. Mellotron github repo, 2019a. URL https://github.com/NVIDIA/mellotron.
317
- Valle, R., Li, J., Prenger, R., and Catanzaro, B. Mellotron: Multispeaker expressive voice synthesis by conditioning on rhythm, pitch and global style tokens. arXiv preprint arXiv:1910.11997, 2019b.
318
- Vinyals, O., Kaiser, L., Koo, T., Petrov, S., Sutskever, I., and Hinton, G. Grammar as a foreign language. In Advances in neural information processing systems, pp. 2773-2781, 2015.
319
- Wang, Y., Skerry-Ryan, R., Stanton, D., Wu, Y., Weiss, R. J., Jaitly, N., Yang, Z., Xiao, Y., Chen, Z., Bengio, S., et al. Tacotron: A fully end-to-end text-to-speech synthesis model. arXiv preprint arXiv:1703.10135, 2017.
320
- Wang, Y., Stanton, D., Zhang, Y., Skerry-Ryan, R., Battenberg, E., Shor, J., Xiao, Y., Ren, F., Jia, Y., and Saurous, R. A. Style tokens: Unsupervised style modeling, control and transfer in end-to-end speech synthesis. arXiv preprint arXiv:1803.09017, 2018.
321
- Weide, R. L. The cmu pronouncing dictionary. URL: http://wwwspeech.cs.cmu.edu/cgi-bin/cmudict, 1998.
322
- Zen, H., Dang, V., Clark, R., Zhang, Y., Weiss, R. J., Jia, Y., Chen, Z., and Wu, Y. Libritts: A corpus derived from librispeech for text-to-speech. arXiv preprint arXiv:1904.02882, 2019.
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dcbbd486d7d13769de29fb12c23301138493bdd28eb5fd8a78004620519068c
3
+ size 42593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_05xxx/2005.05957/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0f8aba387f7970f94d3bc605bb9303b0045ca388aafa1135b9ec571e53d2bb1
3
  size 290324
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e37af170217839acbebaf932d21879f7e89a2a2860a3276c559d78a1787ca6f2
3
  size 290324
data/2020/2005_05xxx/2005.05957/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05960/4c5c873d-7788-4d67-afc4-75f6ae3012da_content_list.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05960/4c5c873d-7788-4d67-afc4-75f6ae3012da_model.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05960/full.md CHANGED
@@ -1,454 +1,3 @@
1
- Ramanan Sekar $^{1*}$ Oleh Rybkin $^{1*}$ Kostas Daniilidis $^{1}$ Pieter Abbeel $^{2}$ Danijar Hafner $^{3,4}$ Deepak Pathak $^{5,6}$
2
-
3
- # Abstract
4
-
5
- Reinforcement learning allows solving complex tasks, however, the learning tends to be task-specific and the sample efficiency remains a challenge. We present Plan2Explore, a self-supervised reinforcement learning agent that tackles both these challenges through a new approach to self-supervised exploration and fast adaptation to new tasks, which need not be known during exploration. During exploration, unlike prior methods which retrospectively compute the novelty of observations after the agent has already reached them, our agent acts efficiently by leveraging planning to seek out expected future novelty. After exploration, the agent quickly adapts to multiple downstream tasks in a zero or a few-shot manner. We evaluate on challenging control tasks from high-dimensional image inputs. Without any training supervision or task-specific interaction, Plan2Explore outperforms prior self-supervised exploration methods, and in fact, almost matches the performances oracle which has access to rewards. Videos and code: https://ramanans1.github.io/plan2explore/
6
-
7
- # 1. Introduction
8
-
9
- The dominant approach in sensorimotor control is to train the agent on one or more pre-specified tasks either via rewards in reinforcement learning, or via demonstrations in imitation learning. However, learning each task from scratch is often inefficient, requiring a large amount of task-specific environment interaction for solving each task. How can an agent quickly generalize to unseen tasks it has never experienced before in a zero or few-shot manner?
10
-
11
- *Equal contribution 1University of Pennsylvania 2UC Berkeley 3Google Research, Brain Team 4University of Toronto 5Carnegie Mellon University 6Facebook AI Research. Correspondence to: Oleh Rybkin <oleh@seas.upenn.edu>.
12
-
13
- Proceedings of the $37^{th}$ International Conference on Machine Learning, Vienna, Austria, PMLR 119, 2020. Copyright 2020 by the author(s).
14
-
15
- ![](images/73a14ad489e547e72b7e382c2ec126b53d90e7ae17069f59df51dd7465424666.jpg)
16
- Figure 1. The agent first leverages planning to explore in a self-supervised manner, without task-specific rewards, to efficiently learn a global world model. After the exploration phase, it receives reward functions at test time to adapt to multiple downstream tasks, such as standing, walking, running, and flipping using either zero or few tasks-specific interactions.
17
-
18
- Task-agnostic RL Because data collection is often expensive, it would be ideal to not keep collecting data for each new task. Hence, we explore the environment once without reward to collect a diverse dataset for later solving any downstream task, as shown in Figure 1. After the task-agnostic exploration phase, the agent is provided with downstream reward functions and needs to solve the tasks with limited or no further environment interaction. Such a self-supervised approach would allow solving various tasks without having to repeat the expensive data collection for each new task.
19
-
20
- Intrinsic motivation To explore complex environments in the absence of rewards, the agent needs to follow a form of intrinsic motivation that is computed from inputs that could be high-dimensional images. For example, an agent could seek inputs that it cannot yet predict accurately (Schmidhuber, 1991b; Oudeyer et al., 2007; Pathak et al., 2017), maximally influence its inputs (Klyubin et al., 2005; Eysenbach et al., 2018), or visit rare states (Poupart et al., 2006; Lehman & Stanley, 2011; Bellemare et al., 2016; Burda et al., 2018). However, most prior methods learn a model-free exploration policy to act in the environment which needs large amounts of samples for finetuning or adaptation when presented with rewards for downstream tasks.
21
-
22
- Retrospective novelty Model-free exploration methods not only require large amounts of experience to adapt to downstream tasks, they can also be inefficient during exploration. These agents usually first act in the environment,
23
-
24
- ![](images/145d1644d095a19dfae4dca9497196e132801302d7788cb017958e9056988883.jpg)
25
- Figure 2. Overview of Plan2Explore. Each observation $o_{t}$ at time $t$ is first encoded into features $h_{t}$ which are then used to infer a recurrent latent state $s_{t}$ . At each training step, the agent leverages planning to explore by imagining the consequences of the actions of policy $\pi_{\phi}$ using the current world model. The planning objective is to maximize expected novelty $r_{t}^{i}$ over all future time steps, computed as the disagreement in the predicted next image embedding $h_{t + 1}$ from an ensemble of learned transition dynamics $w_{k}$ . This planning objective is backpropagated all the way through the imagined rollout states to improve the exploration policy $\pi_{\phi}$ . The learned model is used for planning to explore in latent space, and the data collected during exploration is in turn used to improve the model. This world model is then later used to plan for novel tasks at test time by replacing novelty reward with task reward.
26
-
27
- collect trajectories and then calculate an intrinsic reward as the agent's current estimate of novelty. This approach misses out on efficiency by operating retrospectively, that is, the novelty of inputs is computed after the agent has already reached them. For instance, in curiosity (Pathak et al., 2017), novelty is measured by computing error between the prediction of the next state and the ground truth only after the agent has visited the next state. Hence, it seeks out previously novel inputs that have already been visited and would not be novel anymore. Instead, one should directly seek out future inputs that are expected to be novel.
28
-
29
- Planning to explore We address both of these challenges – quick adaptation and expected future novelty – within a common framework while learning directly from high-dimensional image inputs. Instead of maximizing intrinsic rewards in retrospect, we learn a world model to plan ahead and seek out the expected novelty of future situations. This lets us learn the exploration policy purely from imagined model states, without causing additional environment interaction (Sun et al., 2011; Shyam et al., 2019). The exploration policy is optimized purely from trajectories imagined under the model to maximize the intrinsic rewards computed by the model itself. After the exploration phase, the learned world model is used to train downstream task policies in imagination via offline reinforcement learning, without any further environment interaction.
30
-
31
- Challenges The key challenges for planning to explore are to train an accurate world model from high-dimensional inputs and to define an effective exploration objective. We focus on world models that predict ahead in a compact latent space and have recently been shown to solve challenging control tasks from images (Hafner et al., 2019; Zhang et al., 2019). Predicting future compact representations facilitates
32
-
33
- accurate long-term predictions and lets us efficiently predict thousands of future sequences in parallel for policy learning.
34
-
35
- An ideal exploration objective should seek out inputs that the agent can learn the most from (epistemic uncertainty) while being robust to stochastic parts of the environment that cannot be learned accurately (aleatoric uncertainty). This is formalized in the expected information gain (Lindley, 1956), which we approximate as the disagreement in predictions of an ensemble of one-step models. These one-step models are trained alongside the world model and mimic its transition function. The disagreement is positive for novel states, but given enough samples, it eventually reduces to zero even for stochastic environments because all one-step predictions converge to the mean of the next input (Pathak et al., 2019).
36
-
37
- Contributions We introduce Plan2Explore, a self-supervised reinforcement learning agent that leverages planning to efficiently explore visual environments without rewards. Across 20 challenging control tasks without access to proprioceptive states or rewards, Plan2Explore achieves state-of-the-art zero-shot and adaptation performance. Moreover, we empirically study the questions:
38
-
39
- - How does planning to explore via latent disagreement compare to a supervised oracle and other model-free and model-based intrinsic reward objectives?
40
- - How much task-specific experience is enough to fine-tune a self-supervised model to reach the performance of a task-specific agent?
41
- - To what degree does a self-supervised model generalize to unseen tasks compared to a task-specific model trained on a different task in the same environment?
42
- - What is the advantage of maximizing expected future novelty in comparison to retrospective novelty?
43
-
44
- # 2. Control with Latent Dynamics Models
45
-
46
- World models summarize past experience into a representation of the environment that enables predicting imagined future sequences (Sutton, 1991; Watter et al., 2015; Ha & Schmidhuber, 2018). When sensory inputs are high-dimensional observations, predicting compact latent states $s_t$ lets us predict many future sequences in parallel due to memory efficiency. Specifically, we use the latent dynamics model of PlaNet (Hafner et al., 2019), that consists of the following key components that are illustrated in Figure 2,
47
-
48
- Image encoder: $h_t = e_\theta (o_t)$
49
-
50
- Posterior dynamics: $q_{\theta}(s_t \mid s_{t-1}, a_{t-1}, h_t)$
51
-
52
- Prior dynamics: $p_{\theta}(s_t \mid s_{t-1}, a_{t-1})$ (1)
53
-
54
- Reward predictor: $p_{\theta}(r_t \mid s_t)$
55
-
56
- Image decoder: $p_{\theta}(o_t \mid s_t)$ .
57
-
58
- The image encoder is implemented as a CNN, and the posterior and prior dynamics share an RSSM (Hafner et al., 2019). The temporal prior predicts forward without access to the corresponding image. The reward predictor and image decoder provide a rich learning signal to the dynamics. The distributions are parameterized as diagonal Gaussians. All model components are trained jointly similar to a variational autoencoder (VAE) (Kingma & Welling, 2013; Rezende et al., 2014) by maximizing the evidence lower bound (ELBO).
59
-
60
- Given this learned world model, we need to derive behaviors from it. Instead of online planning, we use Dreamer (Hafner et al., 2020) to efficiently learn a parametric policy inside the world model that considers long-term rewards. Specifically, we learn two neural networks that operate on latent states of the model. The state-value estimates the sum of future rewards and the actor tries to maximize these predicted values,
61
-
62
- Actor: $\pi (a_{t}\mid s_{t})$ Value: $V(s_{t})$ (2)
63
-
64
- The learned world model is used to predict the sequences of future latent states under the current actor starting from the latent states obtained by encoding images from the replay buffer. The value function is computed at each latent state and the actor policy is trained to maximize the predicted values by propagating their gradients through the neural network dynamics model as shown in Figure 2.
65
-
66
- # 3. Planning to Explore
67
-
68
- We consider a learning setup with two phases, as illustrated in Figure 1. During self-supervised exploration, the agent gathers information about the environment and summarizes
69
-
70
- # Algorithm 1 Planning to Explore via Latent Disagreement
71
-
72
- 1: initialize: Dataset D from a few random episodes.
73
- 2: World model M.
74
- 3: Latent disagreement ensemble E.
75
- 4: Exploration actor-critic $\pi_{\mathrm{LD}}$
76
- 5: while exploring do
77
- 6: Train M on D.
78
- 7: Train E on D.
79
- 8: Train $\pi_{\mathrm{LD}}$ on LD reward in imagination of M.
80
- 9: Execute $\pi_{\mathrm{LD}}$ in the environment to expand D.
81
-
82
- # 10: end while
83
-
84
- 11: return Task-agnostic D and M.
85
-
86
- # Algorithm 2 Zero and Few-Shot Task Adaptation
87
-
88
- 1: input: World model M.
89
- 2: Dataset D without rewards.
90
- 3: Reward function R.
91
- 4: initialize: Latent-space reward predictor $\hat{\mathbf{R}}$ .
92
- 5: Task actor-critic $\pi_{\mathrm{R}}$
93
- 6: while adapting do
94
- 7: Distill R into $\hat{\mathbf{R}}$ for sequences in D.
95
- 8: Train $\pi_{\mathbb{R}}$ on $\hat{\mathbf{R}}$ in imagination of M.
96
- 9: Execute $\pi_{\mathrm{R}}$ for the task and report performance.
97
-
98
- 10: Optionally, add task-specific episode to D and repeat.
99
-
100
- # 11: end while
101
-
102
- 12: return Task actor-critic $\pi_{\mathrm{R}}$
103
-
104
- this past experience in the form of a parametric world model. After exploration, the agent is given a downstream task in the form of a reward function that it should adapt to with no or limited additional environment interaction.
105
-
106
- During exploration, the agent begins by learning a global world model using data collected so far, and then this model is in turn used to direct agent's exploration to collect more data, as described in Algorithm 1. This is achieved by training an exploration policy inside of the world model to seek out novel states. Novelty is estimated by ensemble disagreement in latent predictions made by 1-step transition models trained alongside the global recurrent world model. More details to follow in Section 3.1.
107
-
108
- During adaptation, we can efficiently optimize a task policy by imagination inside of the world model, as shown in Algorithm 2. Since our self-supervised model is trained without being biased toward a specific task, a single trained model can be used to solve multiple downstream tasks.
109
-
110
- # 3.1. Latent Disagreement
111
-
112
- To efficiently learn a world model of an unknown environment, a successful strategy should explore the environment such as to collect new experience that improves the model
113
-
114
- the most. For this, we quantify the model's uncertainty about its predictions for different latent states. An exploration policy then seeks out states with high uncertainty. The model is then trained on the newly acquired trajectories and reduces its uncertainty in these and the process is repeated.
115
-
116
- Quantifying uncertainty is a long-standing open challenge in deep learning (MacKay, 1992; Gal, 2016). In this paper, we use ensemble disagreement as an empirically successful method for quantifying uncertainty (Lakshminarayanan et al., 2017; Osband et al., 2018). As shown in Figure 2, we train a bootstrap ensemble (Breiman, 1996) to predict, from each model state, the next encoder features. The variance of the ensemble serves as an estimate of uncertainty.
117
-
118
- Intuitively, because the ensemble models have different initialization and observe data in a different order, their predictions differ for unseen inputs. Once the data is added to the training set, however, the models will converge towards more similar predictions, and the disagreement decreases. Eventually, once the whole environment is explored, the models should converge to identical predictions.
119
-
120
- Formally, we define a bootstrap ensemble of one-step predictive models with parameters $\{w_{k} \mid k \in [1;K]\}$ . Each of these models takes a model state $s_t$ and action $a_t$ as input and predicts the next image embedding $h_{t+1}$ . The models are trained with the mean squared error, which is equivalent to Gaussian log-likelihood,
121
-
122
- $$
123
- \text {E n s e m b l e p r e d i c t o r s :} q \left(h _ {t + 1} \mid w _ {k}, s _ {t}, a _ {t}\right) \tag {3}
124
- $$
125
-
126
- $$
127
- q \left(h _ {t + 1} \mid w _ {k}, s _ {t}, a _ {t}\right) \triangleq \mathcal {N} \left(\mu \left(w _ {k}, s _ {t}, a _ {t}\right), 1\right).
128
- $$
129
-
130
- We quantify model uncertainty as the variance over predicted means of the different ensemble members and use this disagreement as the intrinsic reward $\mathrm{ir}_t\triangleq \mathrm{D}(s_t,a_t)$ to train the exploration policy,
131
-
132
- $$
133
- \begin{array}{l} \mathrm {D} \left(s _ {t}, a _ {t}\right) \triangleq \operatorname {V a r} \left(\left\{\mu \left(w _ {k}, s _ {t}, a _ {t}\right) \mid k \in [ 1; K ] \right\}\right) \\ = \frac {1}{K - 1} \sum_ {k} \left(\mu \left(w _ {k}, s _ {t}, a _ {t}\right) - \mu^ {\prime}\right) ^ {2}, \tag {4} \\ \end{array}
134
- $$
135
-
136
- $$
137
- \mu^ {\prime} \triangleq \frac {1}{K} \sum_ {k} \mu \left(w _ {k}, s _ {t}, a _ {t}\right).
138
- $$
139
-
140
- The intrinsic reward is non-stationary because the world model and the ensemble predictors change throughout exploration. Indeed, once certain states are visited by the agent and the model gets trained on them, these states will become less interesting for the agent and the intrinsic reward for visiting them will decrease.
141
-
142
- We learn the exploration policy using Dreamer (Section 2). Since the intrinsic reward is computed in the compact representation space of the latent dynamics model, we can optimize the learned actor and value from imagined latent trajectories without generating images. This lets us efficiently
143
-
144
- optimize the intrinsic reward without additional environment interaction. Furthermore, the ensemble of lightweight 1-step models adds little computational overhead as they are trained together efficiently in parallel across all time steps.
145
-
146
- # 3.2. Expected Information Gain
147
-
148
- Latent disagreement has an information-theoretic interpretation. This subsection derives our method from the amount of information gained by interacting with the environment, which has its roots in optimal Bayesian experiment design (Lindley, 1956; MacKay, 1992).
149
-
150
- Because the true dynamics are unknown, the agent treats the optimal dynamics parameters as a random variable $w$ . To explore the environment as efficiently as possible, the agent should seek out future states that are informative of our belief over the parameters.
151
-
152
- Mutual information formalizes the amount of bits that a future trajectory provides about the optimal model parameters on average. We aim to find a policy that shapes the distribution over future states to maximize the mutual information between the image embeddings $h_{1:T}$ and parameters $w$ ,
153
-
154
- $$
155
- I \left(h _ {t + 1}; w \mid s _ {t}, a _ {t}\right) \tag {5}
156
- $$
157
-
158
- We operate on latent image embeddings to save computation. To select the most promising data during exploration, the agent maximizes the expected information gain,
159
-
160
- $$
161
- a _ {t} ^ {*} \triangleq \underset {a _ {t}} {\arg \max } \mathrm {I} \left(h _ {t + 1}; w \mid s _ {t}, a _ {t}\right). \tag {6}
162
- $$
163
-
164
- This expected information gain can be rewritten as conditional entropy of trajectories subtracted from marginal entropy of trajectories, which correspond to, respectively, the aleatoric and the total uncertainty of the model,
165
-
166
- $$
167
- \begin{array}{l} \mathrm {I} \left(h _ {t + 1}; w \mid s _ {t}, a _ {t}\right) \\ = \mathrm {H} \left(h _ {t + 1} \mid s _ {t}, a _ {t}\right) - \mathrm {H} \left(h _ {t + 1} \mid w, s _ {t}, a _ {t}\right). \tag {7} \\ \end{array}
168
- $$
169
-
170
- We see that the information gain corresponds to the epistemic uncertainty, i.e. the reducible uncertainty of the model that is left after subtracting the expected amount of data noise from the total uncertainty.
171
-
172
- Trained via squared error, our ensemble members are conditional Gaussians with means produced by neural networks and fixed variances. The ensemble can be seen as a mixture distribution of parameter point masses,
173
-
174
- $$
175
- p (w) \triangleq \frac {1}{K} \sum_ {k} \delta \left(w - w _ {k}\right) \tag {8}
176
- $$
177
-
178
- $$
179
- p \left(h _ {t + 1} \mid w _ {k}, s _ {t}, a _ {t}\right) \triangleq \mathcal {N} \left(h _ {t + 1} \mid \mu \left(w _ {k}, s _ {t}, a _ {t}\right), \sigma^ {2}\right).
180
- $$
181
-
182
- Because the variance is fixed, the conditional entropy does not depend on the state or action in our case ( $D$ is the
183
-
184
- ![](images/c3566b7a930ba96021a7ee8960a380d526219e6b52f5737a245619df60b3efc0.jpg)
185
-
186
- ![](images/235a68fb020ad795b7b384c6b3b444ffe4ca63fa8ec3556070a0a2b52d8e73fb.jpg)
187
-
188
- ![](images/bebd51b4fcebba8640c718b821ce369b5ad558f98b736cf1e71f8dd67a2b4515.jpg)
189
-
190
- ![](images/b5e5cffcb6a33b127bf8efd6a1b20fde02876b763816fd9f1d7f6d0045c7c304.jpg)
191
-
192
- Figure 3. Zero-shot RL performance from raw pixels. After training the agent without rewards, we provide it with a task by specifying the reward function at test time. Throughout the exploration, we take snapshots of the agent to train a task policy on the final task and plot its zero-shot performance. We see that Plan2Explore achieves state-of-the-art zero-shot task performance on a range of tasks, and even demonstrates competitive performance to Dreamer (Hafner et al., 2020), a state-of-the-art supervised RL agent. This indicates that Plan2Explore is able to explore and learn a global model of the environment that is useful for adapting to new tasks, demonstrating the potential of self-supervised RL. Results on all 20 tasks are in the appendix Figure 6 and videos on the project website.
193
- ![](images/5048f101c2440eb025a71a6982fa5c77c345af1b57ffad62738c356f8465a1bf.jpg)
194
- Dreamer (Hafner et al., 2020) [sup] MAX (Shyam et al., 2019) [unsup]
195
-
196
- ![](images/5849b2c73aa0ada5d14f86567ee9d5306442722e318790411f37f9ee6c126626.jpg)
197
- Plan2Explore (Ours) [unsup] Retrospective (Pathak et al., 2019) [unsup]
198
-
199
- ![](images/b212580804ed11801a3af91c73349310def308b071fe78c591e32cec4105c640.jpg)
200
- Curiosity (Pathak et al., 2017) [unsup] Random [unsup]
201
-
202
- ![](images/91eecad18cd3c2d8d326ae5e9f147a23c129da22a2625dcd719d2f11cdbeb3c3.jpg)
203
-
204
- dimensionality of the predicted embedding),
205
-
206
- $$
207
- \begin{array}{l} \mathrm {H} \left(h _ {t + 1} \mid w, s _ {t}, a _ {t}\right) = \frac {1}{K} \sum_ {k} \mathrm {H} \left(h _ {t + 1} \mid w _ {k}, s _ {t}, a _ {t}\right) \tag {9} \\ = \frac {D}{K} \sum_ {k} \ln \sigma_ {k} \left(s _ {t}, a _ {t}\right) + \text {c o n s t .} \\ \end{array}
208
- $$
209
-
210
- We note that this fixed variance approach is applicable even in environments with heteroscedastic variance, where it will measure the information gain about the mean prediction.
211
-
212
- Maximizing information gain then means to simply maximize the marginal entropy of the ensemble prediction. For this, we make the following observation: the marginal entropy is maximized when the ensemble means are far apart (disagreement) so the modes overlap the least, maximally spreading out probability mass. As the marginal entropy has no closed-form expression suitable for optimization, we instead use the empirical variance over ensemble means to measure how far apart they are,
213
-
214
- $$
215
- \begin{array}{l} \mathrm {D} \left(s _ {t}, a _ {t}\right) \triangleq \frac {1}{K - 1} \sum_ {k} \left(\mu \left(w _ {k}, s _ {t}, a _ {t}\right) - \mu^ {\prime}\right) ^ {2}, \tag {10} \\ \mu^ {\prime} \triangleq \frac {1}{K} \sum_ {k} \mu \left(w _ {k}, s _ {t}, a _ {t}\right). \\ \end{array}
216
- $$
217
-
218
- To summarize, our exploration objective defined in Section 3.1, which maximizes the variance of ensemble means,
219
-
220
- approximates the information gain and thus should find trajectories that will efficiently reduce the model uncertainty.
221
-
222
- # 4. Experimental Setup
223
-
224
- Environment Details We use the DM Control Suite (Tassa et al., 2018), a standard benchmark for continuous control. All experiments use visual observations only, of size $64 \times 64 \times 3$ pixels. The episode length is 1000 steps and we apply an action repeat of $R = 2$ for all the tasks. We run every experiment with three different random seeds with standard deviation shown in the shaded region. Further details are in the appendix.
225
-
226
- Implementation We use (Hafner et al., 2020) with the original hyperparameters unless specified otherwise to optimize both exploration and task policies of Plan2Explore. We found that additional capacity provided by increasing the hidden size of the GRU in the latent dynamics model to 400 and the deterministic and stochastic components of the latent space to 60 helped performance. For a fair comparison, we maintain this model size for Dreamer and other baselines. For latent disagreement, we use an ensemble of 5 one-step prediction models implemented as 2 hidden-layer MLP. Full details are in the appendix.
227
-
228
- Baselines We compare our agent to a state-of-the-art task-oriented agent that receives rewards throughout training,
229
-
230
- ![](images/07c17656ebf69c748b4193649d67cdb45f7f35b91f7f46d04da3fffd818b5e7d.jpg)
231
-
232
- ![](images/abf5bd897b7e6ecb0ca9a15a81ee0939f391e0a4be131682a1e7411c884c3881.jpg)
233
-
234
- ![](images/c23972170253f9935c38e37ad9f9ba98fb1e9aa66460f3b3698c28a0367019d9.jpg)
235
-
236
- ![](images/3f058a7038a5129ff9ce89327e92148289612cfd09cb146babaaf192712da474.jpg)
237
-
238
- ![](images/4b69ff0460674904c75660388cfe1df72741cd74c79ea2ee3cdaf9160aab32e0.jpg)
239
- Figure 4. Performance on few-shot adaptation from raw pixels without state-space input. After the exploration phase of 1M steps (white background), during which the agent does not observe the reward and thus does not solve the task, we let the agent collect a small amount of data from the environment (shaded background). We see that Plan2Explore is able to explore the environment efficiently in only 1000 episodes, and then adapt its behavior immediately after observing the reward. Plan2Explore adapts rapidly, producing effective behavior competitive to state-of-the-art supervised reinforcement learning in just a few collected episodes.
240
-
241
- ![](images/9a8d3f2270e228427f6beb069dbcbcde3daf56e72fd54026e9d691c9381ce0da.jpg)
242
-
243
- ![](images/4e027fa5690ca624f69403aba4b57379e29bf7b00a16a7f038f7199058d241e2.jpg)
244
-
245
- ![](images/f6dcf9f40b8cb7d0a7c5b58abfd66da231aa4dc0f572db91b0fc7bd1d0a76c52.jpg)
246
-
247
- Dreamer (Hafner et al., 2020) [sup]
248
-
249
- Plan2Explore (Ours) [unsup]
250
-
251
- Curiosity (Pathak et al., 2017) [unsup]
252
-
253
- MAX (Shyam et al., 2019) [unsup]
254
-
255
- Retrospective (Pathak et al., 2019) [unsup]
256
-
257
- Random [unsup]
258
-
259
- Dreamer (Hafner et al., 2020). We also compare to state-of-the-art unsupervised agents: Curiosity (Pathak et al., 2017) and Model-based Active Exploration (Shyam et al., 2019, MAX). Because Curiosity is inefficient during fine-tuning and would not be able to solve a task in a zero-shot way, we adapt it into the model-based setting. We further adapt MAX to work with image observations as (Shyam et al., 2019) only addresses learning from low-dimensional states. We use (Hafner et al., 2020) as the base agent for all methods to provide a fair comparison. We additionally compare to a random data collection policy that uniformly samples from the action space of the environment. All methods share the same model hyperparameters to provide a fair comparison.
260
-
261
- # 5. Results and Analysis
262
-
263
- Our experiments focus on evaluating whether our proposed Plan2Explore agent efficiently explores and builds a model of the world that allows quick adaptation to solve tasks in zero or few-shot manner. The rest of the subsections are organized in terms of the key scientific questions we would like to investigate as discussed in the introduction.
264
-
265
- # 5.1. Does the model transfer to solve tasks zero-shot?
266
-
267
- To test whether Plan2Explore has learned a global model of the environment that can be used to solve new tasks, we
268
-
269
- evaluate the zero-shot performance of our agent. Our agent learns a model without using any task-specific information. After that, a separate downstream agent is trained in imagination, which optimizes the task reward using only the self-supervised world model and no new interaction with the world. To specify the task, we provide the agent with the reward function that is used to label its replay buffer with rewards and train a reward predictor. This process is described in the Algorithm 2, with step 10 omitted.
270
-
271
- In Figure 3, we compare the zero-shot performance of our downstream agent with different amounts of exploration data. This is done by training the downstream agent in imagination at each training checkpoint. The same architecture and hyper-parameters are used for all the methods for a fair comparison. We see that Plan2Explore overall performs better than prior state-of-the-art exploration strategies from high dimensional pixel input, sometimes being the only successful unsupervised method. Moreover, the zero-shot performance of Plan2Explore is competitive to Dreamer, even outperforming it in the hopper hop task.
272
-
273
- Plan2Explore was able to successfully learn a good model of the environment and efficiently derive task-oriented behaviors from this model. We emphasize that Plan2Explore explores without task rewards, and Dreamer is the oracle as it is given task rewards during exploration. Yet, Plan2Explore almost matches the performance of this oracle.
274
-
275
- Figure 5. Do task-specific models generalize? We test Plan2Explore on zero-shot performance on four different tasks in the cheetah environment from raw pixels without state-space input. Throughout the exploration, we take snapshots of policy to plot its zero-shot performance. In addition to random exploration, we compare to an oracle agent, Dreamer, that uses the data collected when trained on the run forward task with rewards. Although Dreamer trained on 'run forward' is able to solve the task it is trained on, it struggles on the other tasks, indicating that it has not learned a global world model.
276
- ![](images/2bbe09fa783cbd4973a6f7fcb75b5923c2850805894792b14167dbdb3ca4f1d7.jpg)
277
- Dreamer (Hafner et al., 2020) [sup] (Trained on Run Forward) Plan2Explore (Ours) [unsup] Random [unsup]
278
-
279
- ![](images/5da5ffd880f783234b4470fa5179c507a4ffbeb0ff332a0b440f21f3e44e0dec.jpg)
280
-
281
- ![](images/25c69d35455b5663489e6dedc50ca8a69e297cabb62f0f89d2b59bd607a002bf.jpg)
282
-
283
- ![](images/d98fec28053e364982426c5f90ee746bd4d128103435cde81b674b24e8a824a8.jpg)
284
-
285
- # 5.2. How much task-specific interaction is needed for finetuning to reach the supervised oracle?
286
-
287
- While zero-shot learning might suffice for some tasks, in general, we will want to adapt our model of the world to task-specific information. In this section, we test whether few-shot adaptation of the model to a particular task is competitive to training a fully supervised task-specific model. To adapt our model, we only add 100-150 supervised episodes which falls under 'few-shot' adaptation. Furthermore, in this setup, to evaluate the data efficiency of Plan2Explore we set the number of exploratory episodes to only 1000.
288
-
289
- In the exploration phase of Figure 4, i.e., left of the vertical line, our agent does not aim to solve the task, as it is still unknown, however, we expect that during some period of exploration it will coincidentally achieve higher rewards as it explores the parts of the state space relevant for the task. The performance of unsupervised methods is coincidental until 1000 episodes and then it switches to task-oriented behavior for the remaining 150 episodes, while for supervised, it is task-oriented throughout. That's why we see a big jump for unsupervised methods where the shaded region begins.
290
-
291
- In the few-shot learning setting, Plan2Explore eventually performs competitively to Dreamer on all tasks, significantly outperforming it on the hopper task. Plan2Explore is also able to adapt quickly or similar to other unsupervised agents on all tasks. These results show that a self-supervised agent, when presented with a task specification, should be able to rapidly adapt its model to the task information, matching or outperforming the fully supervised agent trained only for that task. Moreover, Plan2Explore is able to learn this general model with a small number of samples, matching Dreamer, which is fully task-specific, in data efficiency. This shows the potential of an unsupervised pre-training in reinforcement learning. Please refer to the appendix for detailed quantitative results.
292
-
293
- # 5.3. Do self-supervised models generalize better than supervised task-specific models?
294
-
295
- If the quality of our learned model is good, it should be transferable to multiple tasks. In this section, we test the quality of the learned model on generalization to multiple tasks in the same environment. We devise a set of three new tasks for the Cheetah environment, specifically, running backward, flipping forward, and flipping backward. We evaluate the zero-shot performance of Plan2Explore and additionally compare it to a Dreamer agent that is only allowed to collect data on the running forward task and then tested on zero-shot performance on the three other tasks.
296
-
297
- Figure 5 shows that while Dreamer performs well on the task it is trained on, running forward, it fails to solve all other tasks, performing comparably to random exploration. It even fails to generalize to the running backward task. In contrast, Plan2Explore performs well across all tasks, outperforming Dreamer on the other three tasks. This indicates that the model learned by Plan2Explore is indeed global, while the model learned by Dreamer, which is task-oriented, fails to generalize to different tasks.
298
-
299
- # 5.4. What is the advantage of maximizing expected novelty in comparison to retrospective novelty?
300
-
301
- Our Plan2Explore agent is able to measure expected novelty by imagining future states that have not been visited yet. A model-free agent, in contrast, is only trained on the states from the replay buffer, and only gets to see the novelty in retrospect, after the state has been visited. Here, we evaluate the advantages of computing expected versus retrospective novelty by comparing Plan2Explore to a one-step planning agent. The one-step planning agent is not able to plan to visit states that are more than one step away from the replay buffer and is somewhat similar to a Q-learning agent with a particular parametrization of the Q-function. We refer to this approach as Retrospective Disagreement. Figures 3 and 4
302
-
303
- show the performance of this approach. Our agent achieves superior performance, which is consistent with our intuition about the importance of computing expected novelty.
304
-
305
- # 6. Related Work
306
-
307
- Exploration Exploration is crucial for efficient reinforcement learning (Kakade & Langford, 2002). In tabular settings, it is efficiently addressed with exploration bonuses based on state visitation counts (Strehl & Littman, 2008; Jaksch et al., 2010) or fully Bayesian approaches (Duff & Barto, 2002; Poupart et al., 2006), however, these approaches are hard to generalize to high-dimensional inputs, such as images. Recently methods are able to scale early ideas to high dimensional data via pseudo-count measures of state visitation (Bellemare et al., 2016; Ostrovski et al., 2018). Osband et al. (2016) derived an efficient approximation to the Thompson sampling procedure via ensembles of Q-functions. Osband et al. (2018); Lowrey et al. (2018) use ensembles of Q-functions to track the posterior of the value functions. In contrast to these task-oriented methods, our approach uses neither reward nor state at training time.
308
-
309
- Self-Supervised RL One way to learn skills without extrinsic rewards is to use intrinsic motivation as sole objective (Oudeyer & Kaplan, 2009). Practical examples of such approaches focus on maximizing prediction error as curiosity bonus (Pathak et al., 2017; Burda et al., 2019; Haber et al., 2018). These approaches can also be understood as maximizing the agent's surprise (Schmidhuber, 1991a; Achiam & Sastry, 2017). Similar to our work, other recent approaches use the notion of model disagreement to encourage visiting states with the highest potential to improve the model (Burda et al., 2018; Pathak et al., 2019), motivated by the active learning literature (Seung et al., 1992; McCallumzy & Nigamy, 1998). An alternative is to explore by generating goals from prior experience (Andrychowicz et al., 2017; Nair et al., 2018). However, most of these approaches are model-free and expensive to fine-tune to a new task, requiring millions of environment steps for fine-tuning.
310
-
311
- Model-based Control Early work on model-based reinforcement learning used Gaussian processes and time-varying linear dynamical systems and has shown significant improvements in data efficiency over model-free agents (Kaelbling et al., 1996; Deisenroth & Rasmussen, 2011; Levine & Koltun, 2013) when low-dimensional state information is available. Recent work on latent dynamics models has shown that model-based agents can achieve performance competitive with model-free agents while attaining much higher data efficiency, and even scale to high-dimensional observations (Chua et al., 2018; Buesing et al., 2018; Ebert et al., 2018; Ha & Schmidhuber, 2018; Hafner et al., 2019; Nagabandi et al., 2019). We base our agent on a state-of-the-art model-based agent, Dreamer (Hafner et al.,
312
-
313
- 2020), and use it to perform self-supervised exploration in order to solve tasks in a few-shot manner.
314
-
315
- Certain prior work has considered model-based exploration (Amos et al., 2018; Sharma et al., 2019), but was not shown to scale to complex visual observations, only using proprioceptive information. Other work (Ebert et al., 2018; Pathak et al., 2018) has demonstrated the possibility of self-supervised model-based learning with visual observations. However, these approaches do not integrate exploration and model learning together, instead of performing them in stages (Pathak et al., 2018) or just using random exploration (Ebert et al., 2018), which makes them difficult to scale to long-horizon problems.
316
-
317
- The idea of actively exploring to collect the most informative data goes back to the formulation of the information gain (Lindley, 1956). MacKay (1992) described how a learning system might optimize Bayesian objectives for active data selection based on the information gain. Sun et al. (2011) derived a model-based reinforcement learning agent that can optimize the infinite-horizon information gain and experimented with it in tabular settings. The closest works to ours are Shyam et al. (2019); Henaff (2019), which use a measurement of disagreement or information gain through ensembles of neural networks in order to incentivize exploration. However, these approaches are restricted to setups where low-dimensional states are available, whereas we design a latent state approach that scales to high-dimensional observations. Moreover, we provide a theoretical connection between information gain and model disagreement. Concurrent with us, Ball et al. (2020) discuss the connection between information gain and model disagreement for task-specific exploration from low-dimensional state space.
318
-
319
- # 7. Discussion
320
-
321
- We presented Plan2Explore, a self-supervised reinforcement learning method that learns a world model of its environment through unsupervised exploration and uses this model to solve tasks in a zero-shot or few-shot manner. We derived connections of our method to the expected information gain, a principled objective for exploration. Building on recent work on learning dynamics models and behaviors from images, we constructed a model-based zero-shot reinforcement learning agent that was able to achieve state-of-the-art zero-shot task performance on the DeepMind Control Suite. Moreover, the agent's zero-shot performance was competitive to Dreamer, a state-of-the-art supervised reinforcement learning agent on some tasks, with the few-shot performance eventually matching or outperforming the supervised agent. By presenting a method that can learn effective behavior for many different tasks in a scalable and data-efficient manner, we hope this work constitutes a step toward building scalable real-world reinforcement learning systems.
322
-
323
- Acknowledgements We thank Rowan McAllister, Avi-ral Kumar, Vijay Balasubramanian, and the members of GRASP for fruitful discussions. This work was supported in part by NSF-IIS-1703319, ONR N00014-17-1-2093, ARL DCIST CRA W911NF-17-2-0181, Curious Minded Machines grant from Honda Research and DARPA Machine Common Sense grant.
324
-
325
- # References
326
-
327
- Achiam, J. and Sastry, S. Surprise-based intrinsic motivation for deep reinforcement learning. arXiv:1703.01732, 2017.8
328
- Amos, B., Dinh, L., Cabi, S., Rothrl, T., Muldal, A., Erez, T., Tassa, Y., de Freitas, N., and Denil, M. Learning awareness models. In ICLR, 2018. 8
329
- Andrychowicz, M., Wolski, F., Ray, A., Schneider, J., Fong, R., Welinder, P., McGrew, B., Tobin, J., Abbeel, P., and Zaremba, W. Hindsight experience replay. In NIPS, 2017. 8
330
- Ball, P., Parker-Holder, J., Pacchiano, A., Choromanski, K., and Roberts, S. Ready policy one: World building through active learning. arXiv preprint arXiv:2002.02693, 2020.8
331
- Bellemare, M., Srinivasan, S., Ostrovski, G., Schaul, T., Saxton, D., and Munos, R. Unifying count-based exploration and intrinsic motivation. In NIPS, 2016. 1, 8
332
- Breiman, L. Bagging predictors. Machine learning, 24(2): 123-140, 1996. 4
333
- Buesing, L., Weber, T., Racaniere, S., Eslami, S., Rezende, D., Reichert, D. P., Viola, F., Besse, F., Gregor, K., Hassabis, D., et al. Learning and querying fast generative models for reinforcement learning. arXiv preprint arXiv:1802.03006, 2018. 8
334
- Burda, Y., Edwards, H., Storkey, A., and Klimov, O. Exploration by random network distillation. arXiv preprint arXiv:1810.12894, 2018. 1, 8
335
- Burda, Y., Edwards, H., Pathak, D., Storkey, A., Darrell, T., and Efros, A. A. Large-scale study of curiosity-driven learning. *ICLR*, 2019. 8
336
- Chua, K., Calandra, R., McAllister, R., and Levine, S. Deep reinforcement learning in a handful of trials using probabilistic dynamics models. arXiv preprint arXiv:1805.12114, 2018. 8
337
- Deisenroth, M. and Rasmussen, C. E. Pilco: A model-based and data-efficient approach to policy search. In ICML, 2011. 8
338
-
339
- Duff, M. O. and Barto, A. Optimal Learning: Computational procedures for Bayes-adaptive Markov decision processes. PhD thesis, University of Massachusetts at Amherst, 2002. 8
340
- Ebert, F., Finn, C., Dasari, S., Xie, A., Lee, A., and Levine, S. Visual foresight: Model-based deep reinforcement learning for vision-based robotic control. arXiv:1812.00568, 2018. 8
341
- Eysenbach, B., Gupta, A., Ibarz, J., and Levine, S. Diversity is all you need: Learning skills without a reward function. arXiv:1802.06070, 2018. 1
342
- Gal, Y. Uncertainty in deep learning. University of Cambridge, 1:3, 2016. 4
343
- Ha, D. and Schmidhuber, J. World models. arXiv preprint arXiv:1803.10122, 2018. 3, 8
344
- Haber, N., Mrowca, D., Wang, S., Fei-Fei, L. F., and Yamins, D. L. Learning to play with intrinsically-motivated, self-aware agents. In NeurIPS, 2018. 8
345
- Hafner, D., Lillicrap, T., Fischer, I., Villegas, R., Ha, D., Lee, H., and Davidson, J. Learning latent dynamics for planning from pixels. ICML, 2019. 2, 3, 8, 11
346
- Hafner, D., Lillicrap, T., Ba, J., and Norouzi, M. Dream to control: Learning behaviors by latent imagination. *ICLR*, 2020. 3, 5, 6, 8, 13
347
- Henaff, M. Explicit explore-exploit algorithms in continuous state spaces. In NeurIPS, 2019. 8
348
- Jaksch, T., Ortner, R., and Auer, P. Near-optimal regret bounds for reinforcement learning. JMLR, 2010. 8
349
- Kaelbling, L. P., Littman, M. L., and Moore, A. W. Reinforcement learning: A survey. Journal of artificial intelligence research, 1996. 8
350
- Kakade, S. and Langford, J. Approximately optimal approximate reinforcement learning. In ICML, volume 2, pp. 267-274, 2002. 8
351
- Kingma, D. P. and Welling, M. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 3
352
- Klyubin, A. S., Polani, D., and Nehaniv, C. L. Empowerment: A universal agent-centric measure of control. In Evolutionary Computation, 2005. 1
353
- Lakshminarayanan, B., Pritzel, A., and Blundell, C. Simple and scalable predictive uncertainty estimation using deep ensembles. In NIPS, 2017. 4
354
- Lehman, J. and Stanley, K. O. Evolving a diversity of virtual creatures through novelty search and local competition. In Proceedings of the 13th annual conference on Genetic and evolutionary computation, 2011. 1
355
-
356
- Levine, S. and Koltun, V. Guided policy search. In International Conference on Machine Learning, pp. 1-9, 2013. 8
357
- Lindley, D. V. On a measure of the information provided by an experiment. The Annals of Mathematical Statistics, pp. 986-1005, 1956. 2, 4, 8
358
- Lowrey, K., Rajeswaran, A., Kakade, S., Todorov, E., and Mordatch, I. Plan online, learn offline: Efficient learning and exploration via model-based control. arXiv preprint arXiv:1811.01848, 2018. 8
359
- MacKay, D. J. Information-based objective functions for active data selection. Neural computation, 4(4):590-604, 1992. 4, 8
360
- McCallumzy, A. K. and Nigamy, K. Employing em and pool-based active learning for text classification. In ICML, 1998. 8
361
- Nagabandi, A., Konoglie, K., Levine, S., and Kumar, V. Deep dynamics models for learning dexterous manipulation. arXiv preprint arXiv:1909.11652, 2019. 8
362
- Nair, A. V., Pong, V., Dalal, M., Bahl, S., Lin, S., and Levine, S. Visual reinforcement learning with imagined goals. In NeurIPS, 2018. 8
363
- Osband, I., Blundell, C., Pritzel, A., and Van Roy, B. Deep exploration via bootstrapped dqn. In NIPS, 2016. 8
364
- Osband, I., Aslanides, J., and Cassirer, A. Randomized prior functions for deep reinforcement learning. In Advances in Neural Information Processing Systems, pp. 8617-8629, 2018. 4, 8
365
- Ostrovski, G., Bellemare, M. G., Oord, A. v. d., and Munos, R. Count-based exploration with neural density models. ICML, 2018. 8
366
- Oudeyer, P.-Y. and Kaplan, F. What is intrinsic motivation? a typology of computational approaches. Frontiers in neurorobotics, 2009. 8
367
- Oudeyer, P.-Y., Kaplan, F., and Hafner, V. V. Intrinsic motivation systems for autonomous mental development. Evolutionary Computation, 2007. 1
368
- Pathak, D., Agrawal, P., Efros, A. A., and Darrell, T. Curiosity-driven exploration by self-supervised prediction. In ICML, 2017. 1, 2, 6, 8, 11
369
- Pathak, D., Mahmoudieh, P., Luo, G., Agrawal, P., Chen, D., Shentu, Y., Shelhamer, E., Malik, J., Efros, A. A., and Darrell, T. Zero-shot visual imitation. In ICLR, 2018. 8
370
- Pathak, D., Gandhi, D., and Gupta, A. Self-supervised exploration via disagreement. ICML, 2019. 2, 8
371
-
372
- Poupart, P., Vlassis, N., Hoey, J., and Regan, K. An analytic solution to discrete bayesian reinforcement learning. In ICML, 2006. 1, 8
373
- Rezende, D. J., Mohamed, S., and Wierstra, D. Stochastic backpropagation and approximate inference in deep generative models. arXiv preprint arXiv:1401.4082, 2014. 3
374
- Schmidhuber, J. Curious model-building control systems. In Neural Networks, 1991. 1991 IEEE International Joint Conference on, pp. 1458-1463. IEEE, 1991a. 8
375
- Schmidhuber, J. A possibility for implementing curiosity and boredom in model-building neural controllers. In From animals to animals: Proceedings of the first international conference on simulation of adaptive behavior, 1991b. 1
376
- Seung, H., Opper, M., and Sompolinsky, H. Query by committee. $COLT$ , 1992. 8
377
- Sharma, A., Gu, S., Levine, S., Kumar, V., and Hausman, K. Dynamics-aware unsupervised discovery of skills. arXiv preprint arXiv:1907.01657, 2019.8
378
- Shyam, P., Jaskowski, W., and Gomez, F. Model-Based Active Exploration. In ICML, 2019. 2, 6, 8
379
- Strehl, A. and Littman, M. An analysis of model-based interval estimation for markov decision processes. Journal of Computer and System Sciences, 2008. 8
380
- Sun, Y., Gomez, F., and Schmidhuber, J. Planning to be surprised: Optimal bayesian exploration in dynamic environments. In AGI, 2011. 2, 8
381
- Sutton, R. S. Dyna, an integrated architecture for learning, planning, and reacting. ACM SIGART Bulletin, 2(4): 160-163, 1991. 3
382
- Tassa, Y., Doron, Y., Muldal, A., Erez, T., Li, Y., de Las Casas, D., Budden, D., Abdelmaleki, A., Merel, J., Lefrancq, A., Lillicrap, T., and Riedmiller, M. DeepMind control suite. Technical report, DeepMind, January 2018. URL https://arxiv.org/abs/1801.00690.5, 11
383
- Watter, M., Springenberg, J., Boedecker, J., and Riedmiller, M. Embed to control: A locally linear latent dynamics model for control from raw images. In NIPS, 2015. 3
384
- Zhang, M., Vikram, S., Smith, L., Abbeel, P., Johnson, M., and Levine, S. Solar: deep structured representations for model-based reinforcement learning. In ICML, 2019. 2
385
-
386
- # A. Appendix
387
-
388
- Results DM Control Suite In Figure 6, we show the performance of our agent on all 20 DM Control Suite tasks from pixels. In addition, we show videos corresponding to all the plots on the project website: https://ramanans1.github.io/plan2explore/
389
-
390
- Convention for plots We run every experiment with three different random seeds. The shaded area of the graphs shows the standard deviation in performance. All plot curves are smoothed with a moving mean that takes into account a window of the past 20 data points. Only Figure 5 was smoothed with a window of past 5 data points so as to provide cleaner looking plots that indicate the general trend. Low variance in all the curves consistently across all figures suggests that our approach is very reproducible.
391
-
392
- Rewards of new tasks To test the generalization performance of the our agent, we define three new tasks in the Cheetah environment:
393
-
394
- - Cheetah Run Backward Analogous to the forward running task, the reward $r$ is linearly proportional to the backward velocity $v_{b}$ up to a maximum of $10\mathrm{m / s}$ , which means $r(v_{b}) = \max (0,\min (v_{b} / 10,1))$ , where $v_{b} = -v$ and $v$ is the forward velocity of the Cheetah.
395
- - Cheetah Flip Backward The reward $r$ is linearly proportional to the backward angular velocity $\omega_{b}$ up to a maximum of 5rad/s, which means $r(\omega_{b}) = \max(0, \min(\omega_{b}/5, 1))$ , where $\omega_{b} = -\omega$ and $\omega$ is the angular velocity about the positive Z-axis, as defined in DeepMind Control Suite.
396
- - Cheetah Flip Forward The reward $r$ is linearly proportional to the forward angular velocity $\omega$ up to a maximum of 5rad/s, which means $r(\omega) = \max(0, \min(\omega/5, 1))$ .
397
-
398
- Environment We use the DeepMind Control Suite (Tassa et al., 2018) tasks, a standard benchmark of tasks for continuous control agents. All experiments are performed with only visual observations. We use RGB visual observations with $64 \times 64$ resolution. We have selected a diverse set of 8 tasks that feature sparse rewards, high dimensional action spaces, and environments with unstable equilibria and environments that require a long planning horizon. We use episode length of 1000 steps and a fixed action repeat of $R = 2$ for all the tasks.
399
-
400
- Agent implementation For implementing latent disagreement, we use an ensemble of 5 one-step prediction models with a 2 hidden-layer MLP, which takes in the RNN-state of RSSM and the action as inputs, and predicts the encoder
401
-
402
- features, which have a dimension of 1024. We scale the disagreement of the predictions by 10,000 for the final intrinsic reward, this was found to increase performance in some environments. We do not normalize the rewards, both extrinsic and intrinsic. This setup for the one-step model was chosen over 3 other variants, in which we tried predicting the deterministic, stochastic, and the combined features of RSSM respectively. The performance benefits of this ensemble over the variants potentially come from the large parametrization that comes with predicting the large encoder features.
403
-
404
- Baselines We note that while Curiosity (Pathak et al., 2017) uses $L_{2}$ loss to train the model, the RSSM loss is different (see (Hafner et al., 2019)); we use the full RSSM loss as the intrinsic reward for the Curiosity comparison, as we found it produces the best performance. Note that this reward can only be computed when ground truth data is available and needs a separate reward predictor to optimize it in a model-based fashion.
405
-
406
- Table 1. Zero-shot performance at 3.5 million environment steps (corresponding to 1.75 agent steps times 2 for action repeat). We report the average performance of the last 20 episodes before the 3.5 million steps point. The performance is computed by executing the mode of the actor without action noise. Among the agents that receive no task rewards, the highest performance of each task is highlighted. The corresponding training curves are visualized in Figure 6.
407
-
408
- <table><tr><td>Zero-shot performance</td><td>Plan2Explore</td><td>Curiosity</td><td>Random</td><td>MAX</td><td>Retrospective</td><td>Dreamer</td></tr><tr><td>Task-agnostic experience</td><td>3.5M</td><td>3.5M</td><td>3.5M</td><td>3.5M</td><td>3.5M</td><td>-</td></tr><tr><td>Task-specific experience</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>3.5M</td></tr><tr><td>Acrobot Swingup</td><td>280.23</td><td>219.55</td><td>107.38</td><td>64.30</td><td>110.84</td><td>408.27</td></tr><tr><td>Cartpole Balance</td><td>950.97</td><td>917.10</td><td>963.40</td><td>-</td><td>-</td><td>970.28</td></tr><tr><td>Cartpole Balance Sparse</td><td>860.38</td><td>695.83</td><td>764.48</td><td>-</td><td>-</td><td>926.9</td></tr><tr><td>Cartpole Swingup</td><td>759.65</td><td>747.488</td><td>516.04</td><td>144.05</td><td>700.59</td><td>855.55</td></tr><tr><td>Cartpole Swingup Sparse</td><td>602.71</td><td>324.5</td><td>94.89</td><td>9.23</td><td>180.85</td><td>789.79</td></tr><tr><td>Cheetah Run</td><td>784.45</td><td>495.55</td><td>0.78</td><td>0.76</td><td>9.11</td><td>888.84</td></tr><tr><td>Cup Catch</td><td>962.81</td><td>963.13</td><td>660.35</td><td>-</td><td>-</td><td>963.4</td></tr><tr><td>Finger Spin</td><td>655.4</td><td>661.96</td><td>676.5</td><td>-</td><td>-</td><td>333.73</td></tr><tr><td>Finger Turn Easy</td><td>401.64</td><td>266.96</td><td>495.21</td><td>-</td><td>-</td><td>551.31</td></tr><tr><td>Finger Turn Hard</td><td>270.83</td><td>289.65</td><td>464.01</td><td>-</td><td>-</td><td>435.56</td></tr><tr><td>Hopper Hop</td><td>432.58</td><td>389.64</td><td>12.11</td><td>17.39</td><td>41.32</td><td>336.57</td></tr><tr><td>Hopper Stand</td><td>841.53</td><td>889.87</td><td>180.86</td><td>-</td><td>-</td><td>923.74</td></tr><tr><td>Pendulum Swingup</td><td>792.71</td><td>56.80</td><td>16.96</td><td>748.53</td><td>1.383</td><td>829.21</td></tr><tr><td>Quadruped Run</td><td>223.96</td><td>164.02</td><td>139.53</td><td>-</td><td>-</td><td>373.25</td></tr><tr><td>Quadruped Walk</td><td>182.87</td><td>368.45</td><td>129.73</td><td>-</td><td>-</td><td>921.25</td></tr><tr><td>Reacher Easy</td><td>530.56</td><td>416.31</td><td>229.23</td><td>242.13</td><td>230.68</td><td>544.15</td></tr><tr><td>Reacher Hard</td><td>66.76</td><td>123.5</td><td>4.10</td><td>-</td><td>-</td><td>438.34</td></tr><tr><td>Walker Run</td><td>429.30</td><td>446.45</td><td>318.61</td><td>-</td><td>-</td><td>783.95</td></tr><tr><td>Walker Stand</td><td>331.20</td><td>459.29</td><td>301.65</td><td>-</td><td>-</td><td>655.80</td></tr><tr><td>Walker Walk</td><td>911.04</td><td>889.17</td><td>766.41</td><td>148.02</td><td>538.84</td><td>965.51</td></tr><tr><td>Task Average</td><td>563.58</td><td>489.26</td><td>342.11</td><td>-</td><td>-</td><td>694.77</td></tr></table>
409
-
410
- Table 2. Adaptation performance after 1M task-agnostic environment steps, followed by 150K task-specific environment steps (agent steps are half as much due to the action repeat of 2). We report the average performance of the last 20 episodes before the 1.15M steps point. The performance is computed by executing the mode of the actor without action noise. Among the self-supervised agents, the highest performance of each task is highlighted. The corresponding training curves are visualized in Figure 4.
411
-
412
- <table><tr><td>Adaptation performance</td><td>Plan2Explore</td><td>Curiosity</td><td>Random</td><td>MAX</td><td>Retrospective</td><td>Dreamer</td></tr><tr><td>Task-agnostic experience</td><td>1M</td><td>1M</td><td>1M</td><td>1M</td><td>1M</td><td>-</td></tr><tr><td>Task-specific experience</td><td>150K</td><td>150K</td><td>150K</td><td>150K</td><td>150K</td><td>1.15M</td></tr><tr><td>Acrobot Swingup</td><td>312.03</td><td>163.71</td><td>27.54</td><td>108.39</td><td>76.92</td><td>345.51</td></tr><tr><td>Cartpole Swingup</td><td>803.53</td><td>747.10</td><td>416.82</td><td>501.93</td><td>725.81</td><td>826.07</td></tr><tr><td>Cartpole Swingup Sparse</td><td>516.56</td><td>456.8</td><td>104.88</td><td>82.06</td><td>211.81</td><td>758.45</td></tr><tr><td>Cheetah Run</td><td>697.80</td><td>572.67</td><td>18.91</td><td>0.76</td><td>79.90</td><td>852.03</td></tr><tr><td>Hopper Hop</td><td>307.16</td><td>159.45</td><td>5.21</td><td>64.95</td><td>29.97</td><td>163.32</td></tr><tr><td>Pendulum Swingup</td><td>771.51</td><td>377.51</td><td>1.45</td><td>284.53</td><td>21.23</td><td>781.36</td></tr><tr><td>Reacher Easy</td><td>848.65</td><td>894.29</td><td>358.56</td><td>611.65</td><td>104.03</td><td>918.86</td></tr><tr><td>Walker Walk</td><td>892.63</td><td>932.03</td><td>308.51</td><td>29.39</td><td>820.54</td><td>956.53</td></tr><tr><td>Task Average</td><td>643.73</td><td>537.95</td><td>155.23</td><td>210.46</td><td>258.78</td><td>700.27</td></tr></table>
413
-
414
- ![](images/66bb028be9b6ea6d2d20d9c4e8f0aadc48a2659139fb427341aacc7686f2706d.jpg)
415
-
416
- ![](images/886bfae2255f166aeee151cd8cb14b67eeff5d2dce33c53d66b4c700aed78df2.jpg)
417
-
418
- ![](images/5a5f0f7f4132a24a569824e70b2a71bb59eb8cfb04591e624aec006a91707d96.jpg)
419
-
420
- ![](images/ee9c88f763516c94a9e50d01a174a7961ece53843fe726b26a538f4faf6ba3c9.jpg)
421
-
422
- ![](images/707009379d574e3860ce66fb3619c3e8b0c65ec7a5a3d46c15f14bfd997779be.jpg)
423
-
424
- ![](images/48e5db0fa0ccf6a05c34ce295f16de32f228faaa9cda2e407a1bd86d9ef800c9.jpg)
425
-
426
- ![](images/cc8bdcd271a4c0a4c800abd298b062c612229a368096f2644a4eb7d6f641782c.jpg)
427
-
428
- ![](images/55122b49e405c931b556cabc887a00244651a1f50e7e407dd213025241123afc.jpg)
429
-
430
- ![](images/88a3839b0c065c1411bc5aea8008bc2eff6bda77bfa8f2341e8e9d96f460ac4e.jpg)
431
-
432
- ![](images/00fe8003ebbf6c72ea1a78724f94d91ac21eb9963d65bb4e139d5d3d69fe9eae.jpg)
433
-
434
- ![](images/4582df33bc1ce3ea314185037e724a26a2e38dd76c47d971521ea5e3315d64f4.jpg)
435
-
436
- ![](images/01d59f6bdc0f4e737f60d75464c73292b3b06bb8faae889fd7354bf23d91fb6d.jpg)
437
-
438
- ![](images/0679f63fae4b78c28f40c94fa3ee91e30b704edecc8c5422d396d5bcca381052.jpg)
439
-
440
- ![](images/dd14680c9e34c779bdecd2d1c3a5c00e0f84c2020b3ee3eb98baf1d6dd3b38dc.jpg)
441
-
442
- ![](images/94623d00be38c3899bc444355a97723e45fecc1792525fb9a2ec737176ab8da9.jpg)
443
-
444
- ![](images/8fb933d986196fe281a013188e080ec1c5aa9905bc28aa011aefeff7fcfaa559.jpg)
445
-
446
- Figure 6. We evaluate the zero-shot performance of the self-supervised agents as well as supervised performance of Dreamer on all tasks from the DM control suite. All agents operate from raw pixels. The experimental protocol is the same as in Figure 3 of the main paper. To produce this plot, we take snapshots of the agent throughout exploration to train a task policy on the downstream task and plot its zero-shot performance. We use the same hyperparameters for all environments. We see that Plan2Explore achieves state-of-the-art zero-shot task performance on a range of tasks. Moreover, even though Plan2Explore is a self-supervised agent, it demonstrates competitive performance to Dreamer (Hafner et al., 2020), a state-of-the-art supervised reinforcement learning agent. This shows that self-supervised exploration is competitive to task-specific approaches in these continuous control tasks.
447
- ![](images/426b849ef07429a7e7c382f11800bd6dbfe69e011f059c10b849089bdf86dbc9.jpg)
448
- Dreamer (Hafner et al., 2020) [sup] Plan2Explore (Ours) [unsup] Curiosity (Pathak et al., 2017) [unsup] Random [unsup]
449
-
450
- ![](images/f3863ef963c7b0ae6e4666826a6365f659479758a429cc77a202ee616c52c51d.jpg)
451
-
452
- ![](images/2e65b1ceeaab1f9104a7c053cbfdc443000f82801cec3d942e05b95f7de9e9c9.jpg)
453
-
454
- ![](images/86b0a939f850f261ce1ef4767a11a2a215cdb5038f49efd9cb7fd0423b19f761.jpg)
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:377aee1030966c6c9246989c9626ae61ca6085ef19265118c6fac3a9548ae262
3
+ size 58948
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_05xxx/2005.05960/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d92d46670f0712669458757e77fb1ab834870e0a8710214e5295eee8c8ed377
3
  size 979994
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:184f0b802e92a6c78fecfb53f5ade39277573a604cafd4516dfca6defbc26181
3
  size 979994
data/2020/2005_05xxx/2005.05960/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_05xxx/2005.05999/55470d3a-eba0-4dac-8f5a-e101cace3cb0_content_list.json CHANGED
@@ -1,1601 +1,3 @@
1
- [
2
- {
3
- "type": "text",
4
- "text": "Fast Deep Multi-patch Hierarchical Network for Nonhomogeneous Image Dehazing",
5
- "text_level": 1,
6
- "bbox": [
7
- 114,
8
- 128,
9
- 854,
10
- 175
11
- ],
12
- "page_idx": 0
13
- },
14
- {
15
- "type": "text",
16
- "text": "Sourya Dipta Das* \nJadavpur University \nKolkata, India",
17
- "bbox": [
18
- 261,
19
- 204,
20
- 421,
21
- 255
22
- ],
23
- "page_idx": 0
24
- },
25
- {
26
- "type": "text",
27
- "text": "dipta.juetce@gmail.com",
28
- "bbox": [
29
- 243,
30
- 260,
31
- 441,
32
- 273
33
- ],
34
- "page_idx": 0
35
- },
36
- {
37
- "type": "text",
38
- "text": "Saikat Dutta* IIT Madras",
39
- "bbox": [
40
- 557,
41
- 204,
42
- 668,
43
- 237
44
- ],
45
- "page_idx": 0
46
- },
47
- {
48
- "type": "text",
49
- "text": "Chennai, India",
50
- "bbox": [
51
- 555,
52
- 239,
53
- 674,
54
- 255
55
- ],
56
- "page_idx": 0
57
- },
58
- {
59
- "type": "text",
60
- "text": "cs18s016@smail.iitm.ac.in",
61
- "bbox": [
62
- 501,
63
- 260,
64
- 725,
65
- 272
66
- ],
67
- "page_idx": 0
68
- },
69
- {
70
- "type": "text",
71
- "text": "Abstract",
72
- "text_level": 1,
73
- "bbox": [
74
- 233,
75
- 309,
76
- 313,
77
- 324
78
- ],
79
- "page_idx": 0
80
- },
81
- {
82
- "type": "text",
83
- "text": "Recently, CNN based end-to-end deep learning methods achieve superiority in Image Dehazing but they tend to fail drastically in Non-homogeneous dehazing. Apart from that, existing popular Multi-scale approaches are runtime intensive and memory inefficient. In this context, we proposed a fast Deep Multi-patch Hierarchical Network to restore Non-homogeneous hazed images by aggregating features from multiple image patches from different spatial sections of the hazed image with fewer number of network parameters. Our proposed method is quite robust for different environments with various density of the haze or fog in the scene and very lightweight as the total size of the model is around 21.7 MB. It also provides faster runtime compared to current multi-scale methods with an average runtime of 0.0145s to process $1200 \\times 1600$ HD quality image. Finally, we show the superiority of this network on Dense Haze Removal to other state-of-the-art models.",
84
- "bbox": [
85
- 75,
86
- 340,
87
- 472,
88
- 598
89
- ],
90
- "page_idx": 0
91
- },
92
- {
93
- "type": "text",
94
- "text": "1. Introduction",
95
- "text_level": 1,
96
- "bbox": [
97
- 76,
98
- 625,
99
- 209,
100
- 638
101
- ],
102
- "page_idx": 0
103
- },
104
- {
105
- "type": "text",
106
- "text": "Outdoor images are often deteriorated due to the extreme weather, such as fog and haze, which influences visibility issues in the scene because of the degradation of color, contrast and textures for different distant objects, selective attenuation of the light spectrum. Restoring such hazed images has become an important problem in many computer vision applications like visual surveillance, remote sensing, and Autonomous transportation etc. Most of early methods proposed for image dehazing are based on the classic atmospheric scattering model which is shown as the following equation. 1.",
107
- "bbox": [
108
- 75,
109
- 648,
110
- 468,
111
- 815
112
- ],
113
- "page_idx": 0
114
- },
115
- {
116
- "type": "equation",
117
- "text": "\n$$\nI (x) = J (x) t (x) + A (1 - t (x)) \\tag {1}\n$$\n",
118
- "text_format": "latex",
119
- "bbox": [
120
- 163,
121
- 824,
122
- 468,
123
- 840
124
- ],
125
- "page_idx": 0
126
- },
127
- {
128
- "type": "text",
129
- "text": "where, $x$ represents pixel locations, $I(x)$ is the observed hazy image, $J(x)$ is the dehazed image, $t(x)$ is called",
130
- "bbox": [
131
- 75,
132
- 848,
133
- 468,
134
- 878
135
- ],
136
- "page_idx": 0
137
- },
138
- {
139
- "type": "text",
140
- "text": "medium transmission function and $A$ is the global atmospheric light. Recently, Deep learning based methods have shown remarkable improvements though those methods suffer from degradation of colour, texture in image, halo artifacts, haze residuals and distortions. In our problem statement, Non-homogeneous haze in the scene can be seen in the real world situation where different spatial domains of the image can be affected by different levels of haze. The degradation level also varies a lot for objects at different scene depth due to non-uniform haze distribution in the image. Few example images of such Non-homogeneous haze are shown in figure 4. Dehazing model should put more effort to handle non-uniform haze and different degradation between different scene depth jointly. Multi-scale and scale-recurrent models can be a viable solution in this type of problem because of its coarse-to-fine learning scheme by hierarchical integration of features from different spatial scale of the image. This type of methods is inefficient because of high runtime and large model size due to a lot of convolution and Deconvolution layers. Apart from that, increasing depth of layers at fine scale levels may not always improve the perceptual quality of the output dehazed image. On the contrary, main goal of our model is to aggregate features multiple image patches from different spatial sections of the image for better performance. The parameters of our encoder and decoder are very less due to residual links in our model which helps in fast dehazing inference. The main intuition behind our idea is to make the lower level network portion focus on local information by extracting local features from the finer grid to produce residual information for the upper level part of the network to get more global information from both finer and coarser grid which is achieved by concatenating convolutional features.",
141
- "bbox": [
142
- 500,
143
- 310,
144
- 893,
145
- 809
146
- ],
147
- "page_idx": 0
148
- },
149
- {
150
- "type": "text",
151
- "text": "2. Related Work",
152
- "text_level": 1,
153
- "bbox": [
154
- 500,
155
- 828,
156
- 640,
157
- 843
158
- ],
159
- "page_idx": 0
160
- },
161
- {
162
- "type": "text",
163
- "text": "Most early work of image dehazing methods is developed on atmosphere scattering model as it's physical model. In that respect, previous works on image dehazing can be",
164
- "bbox": [
165
- 500,
166
- 854,
167
- 892,
168
- 900
169
- ],
170
- "page_idx": 0
171
- },
172
- {
173
- "type": "aside_text",
174
- "text": "arXiv:2005.05999v1 [cs.CV] 12 May 2020",
175
- "bbox": [
176
- 22,
177
- 255,
178
- 57,
179
- 705
180
- ],
181
- "page_idx": 0
182
- },
183
- {
184
- "type": "page_footnote",
185
- "text": "*Equal contribution.",
186
- "bbox": [
187
- 94,
188
- 887,
189
- 205,
190
- 898
191
- ],
192
- "page_idx": 0
193
- },
194
- {
195
- "type": "page_number",
196
- "text": "1",
197
- "bbox": [
198
- 480,
199
- 924,
200
- 488,
201
- 936
202
- ],
203
- "page_idx": 0
204
- },
205
- {
206
- "type": "text",
207
- "text": "segregated into two classes which are traditional image prior-based methods and end to end deep learning based methods. Traditional image prior based methods relies on hand-crafted statistics from the images to leverage extra mathematical constraints to compensate for the information lost during reconstruction. On contrary, deep learning based methods learn the direct relationship between haze and haze-free image by utilizing multistage, attention mechanisms etc. Here, we discussed some recent deep learning based methods with state-of-the-art results.",
208
- "bbox": [
209
- 80,
210
- 90,
211
- 467,
212
- 239
213
- ],
214
- "page_idx": 1
215
- },
216
- {
217
- "type": "text",
218
- "text": "Zhang et al.[23] proposed a dehazing network with edge-preserving densely connected encoder-decoder architecture that jointly learns the dehazed image, transmission map and atmosphere light all together based on the scattering model for dehazing. In their encoder-decoder architecture, they use a multilevel pyramid pooling module and to improve their results further, joint-discriminator based on GAN is used to incorporate the correlation between estimated transmission map and dehazed image. Deng et al.[8] presents a multi-model fusion network to combine multiple models in its different levels of layers and enhance the overall performance of image dehazing. They generate the multi-model attention integrated feature from various CNN features at different levels and fed it to their fusion model to predict dehazed image for an atmospheric scattering model and four haze-layer separation models altogether. After that, they fused the corresponding results together to generate the final dehazed image. Qin et al.[17] proposed a novel Feature Attention module which fuses Channel Attention with Pixel Attention while considering different weighted information of different channel-wise features and uneven haze distribution on different pixels of the hazed image. For Outdoor hazy images, their work proves superiority though it didn't work well in case of dense dehazing. Liu et al.[14] proposed a grid network with attention-based multi-scale estimation which overcomes the bottleneck problems found in general multi-scale approach. Apart from that, their method also consists of pre-processing and post-processing modules. The pre-processing module used in this method is trainable to get more relevant features from diversified pre-processed image inputs and it outperforms the other hand picked classical pre-process techniques. The post-processing module is finally used on intermediate dehazed image to get more finer dehazed image. Their study shows how their method works quite independently and does not take any advantage from atmosphere scattering model for image dehazing.",
219
- "bbox": [
220
- 80,
221
- 246,
222
- 467,
223
- 789
224
- ],
225
- "page_idx": 1
226
- },
227
- {
228
- "type": "text",
229
- "text": "Unlike other multi-stage methods, Li et al.[13] used a level aware progressive deep network to learn different levels of haze from its different stages of the network by different supervision. Their network tends to progressively learn gradually more intense haze from image by focusing on a specific part of image with a certain haze level. They have also devised a adaptive hierarchical integration tech",
230
- "bbox": [
231
- 80,
232
- 795,
233
- 467,
234
- 898
235
- ],
236
- "page_idx": 1
237
- },
238
- {
239
- "type": "text",
240
- "text": "nique by cooperating with the it's memory network component and domain information of dehazing to emphasize the well-reconstructed parts of the image in it's each stage of the network. Liu et al.[15] suggests a method to learn a haze relevant image priors by using a iteration algorithm with deep CNNs. They achieve this by using gradient descent method to optimize a variational model with image fidelity terms and proper regularization. this method indeed a great combination of properties from classical deep learning based method and physical hazed image formation model. Sharma et al.[19] explored the application of Laplacians of Gaussian (LoG) of the images to reattain the edge and intensity variation information. They optimize their end-to-end deep model by per-pixel difference between Laplacians of Gaussians of the dehazed and ground truth images. they additionally do adversarial training with a perceptual loss to enhance their results. Apart from other physical scattering model based methods, GAN , multiscale or multistage deep networks, Image dehazing can also be posed as image to image translation problem. Qu et al.(2019)[18] proposed their solution as an enhanced Pix2Pix Model which is widely used in image style transfer, image to image translation etc. problems. Their method consists of a GAN with a Enhancer modules to support the dehazing process to get more detailed, vivid image with less artifacts. Their work also proved superiority over other methods in the aspect of the perceptual quality of the dehazed images.",
241
- "bbox": [
242
- 503,
243
- 90,
244
- 890,
245
- 497
246
- ],
247
- "page_idx": 1
248
- },
249
- {
250
- "type": "text",
251
- "text": "3. Proposed Method",
252
- "text_level": 1,
253
- "bbox": [
254
- 503,
255
- 515,
256
- 669,
257
- 532
258
- ],
259
- "page_idx": 1
260
- },
261
- {
262
- "type": "text",
263
- "text": "We use a Multi-patch and a Multi-scale network for Nonhomogeneous Image Dehazing. In this section, we describe these two architectures in detail.",
264
- "bbox": [
265
- 503,
266
- 542,
267
- 890,
268
- 585
269
- ],
270
- "page_idx": 1
271
- },
272
- {
273
- "type": "text",
274
- "text": "3.1. Multi-patch Architecture:",
275
- "text_level": 1,
276
- "bbox": [
277
- 503,
278
- 599,
279
- 733,
280
- 614
281
- ],
282
- "page_idx": 1
283
- },
284
- {
285
- "type": "text",
286
- "text": "We use Deep Multi-patch Hierarchical Network(DMPHN). DMPHN is originally used for Single Image Deblurring[22]. We use (1-2-4) variant of DMPHN in this paper. For the sake of completeness, we will discuss the architecture in the following.",
287
- "bbox": [
288
- 503,
289
- 623,
290
- 890,
291
- 698
292
- ],
293
- "page_idx": 1
294
- },
295
- {
296
- "type": "text",
297
- "text": "DMPHN is a multi-level architecture. There is an encoder-decoder pair in each level. Each level works on different number of patches. In DMPHN(1-2-4), the number of patches used is 1,2 and 4 from top to bottom levels respectively. The top-most level (level-1) considers only one patch per image. In the next level (level-2), the image is divided into two patches vertically. In the bottom-most level (level-3) the patches from previous level are further divided horizontally, resulting in total 4 patches.",
298
- "bbox": [
299
- 503,
300
- 699,
301
- 890,
302
- 835
303
- ],
304
- "page_idx": 1
305
- },
306
- {
307
- "type": "text",
308
- "text": "Let us consider an input hazy image $I^H$ . We denote $j$ -th patch in $i$ -th level as $I_{i,j}^H$ . In level-1, $I^H$ is not divided into any patches. In level-2, $I^H$ is divided vertically into $I_{2,1}^H$ and $I_{2,2}^H$ . In level-3, $I_{2,1}^H$ and $I_{2,2}^H$ are divided horizontally",
309
- "bbox": [
310
- 503,
311
- 835,
312
- 890,
313
- 901
314
- ],
315
- "page_idx": 1
316
- },
317
- {
318
- "type": "image",
319
- "img_path": "images/260479b9b27b6d3d8b6234b30b23d67b18488524106be37a4bd3c54eb4414065.jpg",
320
- "image_caption": [
321
- "Figure 1: Architecture diagram of Deep Multi-Patch Hierarchical Network. $\\{^{\\prime}\\}$ denotes spatial concatenation and $\\bigoplus$ denotes residual addition."
322
- ],
323
- "image_footnote": [],
324
- "bbox": [
325
- 205,
326
- 90,
327
- 764,
328
- 334
329
- ],
330
- "page_idx": 2
331
- },
332
- {
333
- "type": "text",
334
- "text": "to create 4 patches, $I_{3,1}^{H}, I_{3,2}^{H}, I_{3,3}^{H}$ and $I_{3,4}^{H}$ . Encoders and Decoders at $i$ -th level is denoted as $Enc_{i}$ and $Dec_{i}$ respectively.",
335
- "bbox": [
336
- 75,
337
- 402,
338
- 468,
339
- 448
340
- ],
341
- "page_idx": 2
342
- },
343
- {
344
- "type": "text",
345
- "text": "The information flow in DMPHN is bottom-up. Patches in the lowest level are fed to encoder $Enc_3$ to generate corresponding feature maps.",
346
- "bbox": [
347
- 75,
348
- 449,
349
- 468,
350
- 494
351
- ],
352
- "page_idx": 2
353
- },
354
- {
355
- "type": "equation",
356
- "text": "\n$$\nF _ {3, j} = E n c _ {i} \\left(I _ {3, j} ^ {H}\\right), \\forall j \\in [ 1, 4 ] \\tag {2}\n$$\n",
357
- "text_format": "latex",
358
- "bbox": [
359
- 169,
360
- 503,
361
- 468,
362
- 525
363
- ],
364
- "page_idx": 2
365
- },
366
- {
367
- "type": "text",
368
- "text": "We concatenate spatially adjacent feature maps to obtain a new feature representation.",
369
- "bbox": [
370
- 76,
371
- 532,
372
- 468,
373
- 564
374
- ],
375
- "page_idx": 2
376
- },
377
- {
378
- "type": "equation",
379
- "text": "\n$$\nP _ {3, j} = \\left[ F _ {3, 2 j - 1}, F _ {3, 2 j} \\right], \\forall j \\in [ 1, 2 ] \\tag {3}\n$$\n",
380
- "text_format": "latex",
381
- "bbox": [
382
- 156,
383
- 573,
384
- 468,
385
- 592
386
- ],
387
- "page_idx": 2
388
- },
389
- {
390
- "type": "text",
391
- "text": "where $[\\ldots]$ stands for concatenation.",
392
- "bbox": [
393
- 76,
394
- 599,
395
- 316,
396
- 614
397
- ],
398
- "page_idx": 2
399
- },
400
- {
401
- "type": "text",
402
- "text": "The new concatenated features are passed through decoder $Dec_{3}$ .",
403
- "bbox": [
404
- 75,
405
- 614,
406
- 468,
407
- 646
408
- ],
409
- "page_idx": 2
410
- },
411
- {
412
- "type": "equation",
413
- "text": "\n$$\nQ _ {3, j} = \\operatorname {D e c} _ {3} \\left(P _ {3, j}\\right), \\forall j \\in [ 1, 2 ] \\tag {4}\n$$\n",
414
- "text_format": "latex",
415
- "bbox": [
416
- 166,
417
- 656,
418
- 468,
419
- 674
420
- ],
421
- "page_idx": 2
422
- },
423
- {
424
- "type": "text",
425
- "text": "The decoder output is added with patches in the next level and fed to encoder.",
426
- "bbox": [
427
- 75,
428
- 683,
429
- 468,
430
- 712
431
- ],
432
- "page_idx": 2
433
- },
434
- {
435
- "type": "equation",
436
- "text": "\n$$\nF _ {2, j} = \\operatorname {E n c} _ {2} \\left(I _ {2, j} ^ {H} + Q _ {3, j}\\right), \\forall j \\in [ 1, 2 ] \\tag {5}\n$$\n",
437
- "text_format": "latex",
438
- "bbox": [
439
- 145,
440
- 723,
441
- 468,
442
- 743
443
- ],
444
- "page_idx": 2
445
- },
446
- {
447
- "type": "text",
448
- "text": "The encoder outputs are added with respective decoder inputs from previous level. Then the resulting feature maps are spatially concatenated.",
449
- "bbox": [
450
- 75,
451
- 751,
452
- 468,
453
- 797
454
- ],
455
- "page_idx": 2
456
- },
457
- {
458
- "type": "equation",
459
- "text": "\n$$\nF _ {2, j} ^ {*} = F _ {2, j} + P _ {3, j}, \\forall j \\in [ 1, 2 ] \\tag {6}\n$$\n",
460
- "text_format": "latex",
461
- "bbox": [
462
- 169,
463
- 806,
464
- 468,
465
- 825
466
- ],
467
- "page_idx": 2
468
- },
469
- {
470
- "type": "equation",
471
- "text": "\n$$\nP _ {2} = \\left[ F _ {2, 1} ^ {*}, F _ {2, 2} ^ {*} \\right] \\tag {7}\n$$\n",
472
- "text_format": "latex",
473
- "bbox": [
474
- 259,
475
- 828,
476
- 468,
477
- 845
478
- ],
479
- "page_idx": 2
480
- },
481
- {
482
- "type": "text",
483
- "text": "$P_{2}$ is then fed to $Dec_{2}$ to generate residual feature maps for level-2.",
484
- "bbox": [
485
- 76,
486
- 854,
487
- 468,
488
- 883
489
- ],
490
- "page_idx": 2
491
- },
492
- {
493
- "type": "equation",
494
- "text": "\n$$\nQ _ {2} = \\operatorname {D e c} _ {2} (P _ {2}) \\tag {8}\n$$\n",
495
- "text_format": "latex",
496
- "bbox": [
497
- 215,
498
- 885,
499
- 468,
500
- 902
501
- ],
502
- "page_idx": 2
503
- },
504
- {
505
- "type": "text",
506
- "text": "Decoder output at level-2 is added to input image and passed through $Enc_1$ . Encoder output $F_1$ is added with decoder output at level-2, $Q_2$ .",
507
- "bbox": [
508
- 498,
509
- 402,
510
- 892,
511
- 450
512
- ],
513
- "page_idx": 2
514
- },
515
- {
516
- "type": "equation",
517
- "text": "\n$$\nF _ {1} = \\operatorname {E n c} _ {1} \\left(I ^ {H} + Q _ {2}\\right) \\tag {9}\n$$\n",
518
- "text_format": "latex",
519
- "bbox": [
520
- 617,
521
- 459,
522
- 890,
523
- 479
524
- ],
525
- "page_idx": 2
526
- },
527
- {
528
- "type": "text",
529
- "text": "$F_{1}$ is added with $P_{2}$ and fed to $Dec_{1}$ to produce the final dehazed output $\\hat{I}$ .",
530
- "bbox": [
531
- 498,
532
- 488,
533
- 890,
534
- 520
535
- ],
536
- "page_idx": 2
537
- },
538
- {
539
- "type": "equation",
540
- "text": "\n$$\nP _ {1} = F _ {1} + P _ {2} \\tag {10}\n$$\n",
541
- "text_format": "latex",
542
- "bbox": [
543
- 645,
544
- 531,
545
- 890,
546
- 547
547
- ],
548
- "page_idx": 2
549
- },
550
- {
551
- "type": "equation",
552
- "text": "\n$$\n\\hat {I} = D e c _ {1} \\left(P _ {1}\\right) \\tag {11}\n$$\n",
553
- "text_format": "latex",
554
- "bbox": [
555
- 645,
556
- 551,
557
- 890,
558
- 570
559
- ],
560
- "page_idx": 2
561
- },
562
- {
563
- "type": "text",
564
- "text": "3.2. Multi-scale Architecture:",
565
- "text_level": 1,
566
- "bbox": [
567
- 500,
568
- 590,
569
- 730,
570
- 606
571
- ],
572
- "page_idx": 2
573
- },
574
- {
575
- "type": "text",
576
- "text": "We also experiment with a multi-scale architecture. We name this architecture Deep Multi-scale Hierarchical Network(DMSHN). The details of the architecture are described as follows.",
577
- "bbox": [
578
- 498,
579
- 613,
580
- 890,
581
- 672
582
- ],
583
- "page_idx": 2
584
- },
585
- {
586
- "type": "text",
587
- "text": "Input hazy image $I^H$ is downsampled by factor of 2 and 4 to create an image pyramid. We call these downsampled images $I_{0.5}^H$ and $I_{0.25}^H$ respectively. The architecture consists of 3 levels where each level has a pair of encoder and decoder. Encoder and decoder at level $i$ is denoted as $Enc_i$ and $Dec_i$ respectively.",
588
- "bbox": [
589
- 498,
590
- 672,
591
- 890,
592
- 763
593
- ],
594
- "page_idx": 2
595
- },
596
- {
597
- "type": "text",
598
- "text": "At the lowest level $I_{0.25}^{H}$ is fed to encoder $Enc_3$ to obtain feature map $F_3$ and is further passed through decoder $Dec_3$ to feature representation $P_3$ .",
599
- "bbox": [
600
- 498,
601
- 763,
602
- 890,
603
- 810
604
- ],
605
- "page_idx": 2
606
- },
607
- {
608
- "type": "equation",
609
- "text": "\n$$\nF _ {3} = \\operatorname {E n c} _ {3} \\left(I _ {0. 2 5} ^ {H}\\right) \\tag {12}\n$$\n",
610
- "text_format": "latex",
611
- "bbox": [
612
- 632,
613
- 820,
614
- 890,
615
- 840
616
- ],
617
- "page_idx": 2
618
- },
619
- {
620
- "type": "equation",
621
- "text": "\n$$\nP _ {3} = \\operatorname {D e c} _ {3} \\left(F _ {3}\\right) \\tag {13}\n$$\n",
622
- "text_format": "latex",
623
- "bbox": [
624
- 648,
625
- 840,
626
- 890,
627
- 857
628
- ],
629
- "page_idx": 2
630
- },
631
- {
632
- "type": "text",
633
- "text": "$P_{3}$ is upscaled by factor of 2 and added to $I_{0.5}^{H}$ and passed through encoder Enc2 to generate $F_{2}^{*}$ . Encoder output from",
634
- "bbox": [
635
- 500,
636
- 869,
637
- 892,
638
- 902
639
- ],
640
- "page_idx": 2
641
- },
642
- {
643
- "type": "image",
644
- "img_path": "images/0342445da3bc03bee3acb95c0b949ec7c2b25382f8143b73abcebee7abd69fc2.jpg",
645
- "image_caption": [
646
- "Figure 2: Architecture diagram of Deep Multi-Scale Hierarchical Network. $\\updownarrow$ denotes Upsampling by factor of 2 and $\\bigoplus$ denotes residual addition."
647
- ],
648
- "image_footnote": [],
649
- "bbox": [
650
- 200,
651
- 85,
652
- 761,
653
- 314
654
- ],
655
- "page_idx": 3
656
- },
657
- {
658
- "type": "text",
659
- "text": "previous level is upscaled and added to intermediate feature map $F_{2}^{*}$ and fed to the decoder $Dec_{2}$ .",
660
- "bbox": [
661
- 75,
662
- 388,
663
- 468,
664
- 421
665
- ],
666
- "page_idx": 3
667
- },
668
- {
669
- "type": "equation",
670
- "text": "\n$$\nF _ {2} ^ {*} = \\operatorname {E n c} _ {2} \\left(I _ {0. 5} ^ {H} + u p \\left(P _ {3}\\right)\\right) \\tag {14}\n$$\n",
671
- "text_format": "latex",
672
- "bbox": [
673
- 178,
674
- 429,
675
- 467,
676
- 449
677
- ],
678
- "page_idx": 3
679
- },
680
- {
681
- "type": "equation",
682
- "text": "\n$$\nF _ {2} = F _ {2} ^ {*} + u p \\left(F _ {3}\\right) \\tag {15}\n$$\n",
683
- "text_format": "latex",
684
- "bbox": [
685
- 236,
686
- 450,
687
- 467,
688
- 465
689
- ],
690
- "page_idx": 3
691
- },
692
- {
693
- "type": "equation",
694
- "text": "\n$$\nP _ {2} = \\operatorname {D e c} _ {2} \\left(F _ {2}\\right) \\tag {16}\n$$\n",
695
- "text_format": "latex",
696
- "bbox": [
697
- 256,
698
- 469,
699
- 467,
700
- 484
701
- ],
702
- "page_idx": 3
703
- },
704
- {
705
- "type": "text",
706
- "text": "where $up(.)$ denotes Upsampling operation by a factor of 2. Residual feature map $P_{2}$ from level-2 is added to the input hazy image and fed to encoder $Enc_{1}$ . Encoder output is added with upscaled $F_{2}$ and passed through decoder to synthesize the dehazed output $\\hat{I}$ .",
707
- "bbox": [
708
- 75,
709
- 496,
710
- 468,
711
- 571
712
- ],
713
- "page_idx": 3
714
- },
715
- {
716
- "type": "equation",
717
- "text": "\n$$\nF _ {1} ^ {*} = \\operatorname {E n c} _ {2} \\left(I ^ {H} + u p \\left(P _ {2}\\right)\\right) \\tag {17}\n$$\n",
718
- "text_format": "latex",
719
- "bbox": [
720
- 179,
721
- 580,
722
- 467,
723
- 599
724
- ],
725
- "page_idx": 3
726
- },
727
- {
728
- "type": "equation",
729
- "text": "\n$$\nF _ {1} = F _ {1} ^ {*} + u p \\left(F _ {2}\\right) \\tag {18}\n$$\n",
730
- "text_format": "latex",
731
- "bbox": [
732
- 233,
733
- 601,
734
- 467,
735
- 618
736
- ],
737
- "page_idx": 3
738
- },
739
- {
740
- "type": "equation",
741
- "text": "\n$$\n\\hat {I} = D e c _ {1} \\left(F _ {1}\\right) \\tag {19}\n$$\n",
742
- "text_format": "latex",
743
- "bbox": [
744
- 264,
745
- 622,
746
- 467,
747
- 640
748
- ],
749
- "page_idx": 3
750
- },
751
- {
752
- "type": "text",
753
- "text": "3.3. Encoder and Decoder Architecture:",
754
- "text_level": 1,
755
- "bbox": [
756
- 76,
757
- 648,
758
- 387,
759
- 666
760
- ],
761
- "page_idx": 3
762
- },
763
- {
764
- "type": "text",
765
- "text": "We use the same encoder and decoder architecture at all levels of DMPHN and DMSHN. The encoder consists of 15 convolutional layers, 6 residual connections and 6 ReLU units. The layers in the decoder and encoder are similar except that 2 convolutional layers are replaced by deconvolutional layers to generate dehazed images as output.",
766
- "bbox": [
767
- 75,
768
- 672,
769
- 468,
770
- 763
771
- ],
772
- "page_idx": 3
773
- },
774
- {
775
- "type": "text",
776
- "text": "4. Experiments",
777
- "text_level": 1,
778
- "bbox": [
779
- 76,
780
- 777,
781
- 209,
782
- 792
783
- ],
784
- "page_idx": 3
785
- },
786
- {
787
- "type": "text",
788
- "text": "4.1. Dataset Description:",
789
- "text_level": 1,
790
- "bbox": [
791
- 76,
792
- 801,
793
- 269,
794
- 816
795
- ],
796
- "page_idx": 3
797
- },
798
- {
799
- "type": "text",
800
- "text": "We used NH-HAZE dataset[3] provided for NTIRE 2020 Nonhomogeneous Image Dehazing challenge in our experiments. This dataset contains a total of 55 hazy and clear image pairs, divided into trainset of 45 image pairs, validation set of 5 image pairs and test set of 5 image pairs. Val",
801
- "bbox": [
802
- 75,
803
- 824,
804
- 467,
805
- 901
806
- ],
807
- "page_idx": 3
808
- },
809
- {
810
- "type": "text",
811
- "text": "idation and test ground truth images are not publicly available at this moment. Resolution of images in this dataset is $1200 \\times 1600$ . This dataset contains hazed and hazefree images of various outdoor scenes. A few hazefree and hazed image pairs from this dataset is shown in Figure-4.",
812
- "bbox": [
813
- 498,
814
- 388,
815
- 892,
816
- 465
817
- ],
818
- "page_idx": 3
819
- },
820
- {
821
- "type": "text",
822
- "text": "4.2. Training data preparation:",
823
- "text_level": 1,
824
- "bbox": [
825
- 500,
826
- 474,
827
- 743,
828
- 492
829
- ],
830
- "page_idx": 3
831
- },
832
- {
833
- "type": "text",
834
- "text": "Due to the small amount of available data, we divide each image into 100 non-overlapping patches. Thus we obtain a training set of 4500 image-pairs of resolution $120 \\times 160$ . No data augmentation techniques were used.",
835
- "bbox": [
836
- 498,
837
- 498,
838
- 890,
839
- 560
840
- ],
841
- "page_idx": 3
842
- },
843
- {
844
- "type": "text",
845
- "text": "4.3. Loss functions:",
846
- "text_level": 1,
847
- "bbox": [
848
- 500,
849
- 569,
850
- 651,
851
- 584
852
- ],
853
- "page_idx": 3
854
- },
855
- {
856
- "type": "text",
857
- "text": "We use a linear combination of the following loss functions as our optimization objective.",
858
- "bbox": [
859
- 498,
860
- 593,
861
- 888,
862
- 622
863
- ],
864
- "page_idx": 3
865
- },
866
- {
867
- "type": "text",
868
- "text": "Reconstruction loss: Reconstruction loss helps the network to generate dehazed frames close to the ground truth. Our reconstruction loss is a weighted sum of MAE or $L_{1}$ loss and MSE or $L_{2}$ loss. The reconstruction loss is given by,",
869
- "bbox": [
870
- 498,
871
- 623,
872
- 890,
873
- 698
874
- ],
875
- "page_idx": 3
876
- },
877
- {
878
- "type": "equation",
879
- "text": "\n$$\nL _ {r} = \\lambda_ {1} L _ {1} + \\lambda_ {2} L _ {2} \\tag {20}\n$$\n",
880
- "text_format": "latex",
881
- "bbox": [
882
- 627,
883
- 700,
884
- 890,
885
- 715
886
- ],
887
- "page_idx": 3
888
- },
889
- {
890
- "type": "text",
891
- "text": "where $L_{1} = \\left\\| \\hat{I} - I\\right\\|_{1}$ and $L_{2} = \\left\\| \\hat{I} - I\\right\\|_{2}$",
892
- "bbox": [
893
- 500,
894
- 720,
895
- 784,
896
- 746
897
- ],
898
- "page_idx": 3
899
- },
900
- {
901
- "type": "text",
902
- "text": "Perceptual loss: $\\bar{L}_2$ distance between features extracted from conv4_3 layer of VGGNet[20] of predicted and ground truth images are used as Perceptual loss[11]. Perceptual loss is given by,",
903
- "bbox": [
904
- 498,
905
- 746,
906
- 890,
907
- 804
908
- ],
909
- "page_idx": 3
910
- },
911
- {
912
- "type": "equation",
913
- "text": "\n$$\nL _ {p} = \\left\\| \\phi (\\hat {I}) - \\phi (I) \\right\\| _ {2} \\tag {21}\n$$\n",
914
- "text_format": "latex",
915
- "bbox": [
916
- 617,
917
- 804,
918
- 890,
919
- 830
920
- ],
921
- "page_idx": 3
922
- },
923
- {
924
- "type": "text",
925
- "text": "TV loss: We use Total Variation(TV) loss[11] makes predictions smooth. TV loss is given by,",
926
- "bbox": [
927
- 498,
928
- 835,
929
- 890,
930
- 867
931
- ],
932
- "page_idx": 3
933
- },
934
- {
935
- "type": "equation",
936
- "text": "\n$$\nL _ {t v} = \\left\\| \\nabla_ {x} \\hat {I} \\right\\| _ {2} + \\left\\| \\nabla_ {y} \\hat {I} \\right\\| _ {2} \\tag {22}\n$$\n",
937
- "text_format": "latex",
938
- "bbox": [
939
- 604,
940
- 877,
941
- 890,
942
- 905
943
- ],
944
- "page_idx": 3
945
- },
946
- {
947
- "type": "image",
948
- "img_path": "images/b75b6ea7b7f05d654b8199a035bb11b5db37f9dab5e2bcf280a0a6a9b5e269ce.jpg",
949
- "image_caption": [
950
- "Figure 3: Encoder and Decoder architecture. Within brackets written values are Input Channel, Output Channel, Kernel and Stride respectively."
951
- ],
952
- "image_footnote": [],
953
- "bbox": [
954
- 83,
955
- 84,
956
- 890,
957
- 232
958
- ],
959
- "page_idx": 4
960
- },
961
- {
962
- "type": "image",
963
- "img_path": "images/53783e9f932eacc09c4fbf0cb7681c251c52803869c27e2992aba00126d9d1c0.jpg",
964
- "image_caption": [
965
- "Figure 4: A snapshot of Training Dataset. Top row contains hazy images and bottom row contains corresponding ground truth images."
966
- ],
967
- "image_footnote": [],
968
- "bbox": [
969
- 161,
970
- 289,
971
- 807,
972
- 476
973
- ],
974
- "page_idx": 4
975
- },
976
- {
977
- "type": "text",
978
- "text": "Our final loss function is given by,",
979
- "bbox": [
980
- 96,
981
- 547,
982
- 326,
983
- 563
984
- ],
985
- "page_idx": 4
986
- },
987
- {
988
- "type": "equation",
989
- "text": "\n$$\nL = \\lambda_ {r} L _ {r} + \\lambda_ {p} L _ {p} + \\lambda_ {t v} L _ {t v} \\tag {23}\n$$\n",
990
- "text_format": "latex",
991
- "bbox": [
992
- 174,
993
- 578,
994
- 468,
995
- 595
996
- ],
997
- "page_idx": 4
998
- },
999
- {
1000
- "type": "text",
1001
- "text": "In our experiments we choose $\\lambda_r = 1$ , $\\lambda_p = 6e - 3$ , $\\lambda_{tv} = 2e - 8$ . $\\lambda_1$ and $\\lambda_2$ is chosen to be 0.6 and 0.4 respectively.",
1002
- "bbox": [
1003
- 76,
1004
- 611,
1005
- 468,
1006
- 656
1007
- ],
1008
- "page_idx": 4
1009
- },
1010
- {
1011
- "type": "text",
1012
- "text": "4.4. Training details:",
1013
- "text_level": 1,
1014
- "bbox": [
1015
- 76,
1016
- 670,
1017
- 238,
1018
- 686
1019
- ],
1020
- "page_idx": 4
1021
- },
1022
- {
1023
- "type": "text",
1024
- "text": "We developed our models using Pytorch[16] on a system with AMD Ryzen 1600X CPU and NVIDIA GTX 1080 GPU. We use Adam optimizer[12] to train our networks with values of $\\beta_{1}$ and $\\beta_{2}$ 0.9 and 0.99 respectively. We use batchsize of 8. Initial learning rate is set to be 1e-4 which is gradually decreased to 5e-5. We train our models until convergence.",
1025
- "bbox": [
1026
- 75,
1027
- 695,
1028
- 468,
1029
- 801
1030
- ],
1031
- "page_idx": 4
1032
- },
1033
- {
1034
- "type": "text",
1035
- "text": "4.5. Testing details:",
1036
- "text_level": 1,
1037
- "bbox": [
1038
- 76,
1039
- 815,
1040
- 228,
1041
- 830
1042
- ],
1043
- "page_idx": 4
1044
- },
1045
- {
1046
- "type": "text",
1047
- "text": "We test our models' performance on the given full resolution images of validation data. Please note that, our models are fully convolutional, hence difference between train and test image size should not matter.",
1048
- "bbox": [
1049
- 75,
1050
- 839,
1051
- 468,
1052
- 901
1053
- ],
1054
- "page_idx": 4
1055
- },
1056
- {
1057
- "type": "text",
1058
- "text": "4.6. Results:",
1059
- "text_level": 1,
1060
- "bbox": [
1061
- 500,
1062
- 547,
1063
- 598,
1064
- 561
1065
- ],
1066
- "page_idx": 4
1067
- },
1068
- {
1069
- "type": "text",
1070
- "text": "4.6.1 Quantitative and Qualitative Results:",
1071
- "text_level": 1,
1072
- "bbox": [
1073
- 500,
1074
- 582,
1075
- 815,
1076
- 598
1077
- ],
1078
- "page_idx": 4
1079
- },
1080
- {
1081
- "type": "text",
1082
- "text": "As ground truth for validation set is not publicly available, we submit our validation results to Codalab server. We compare performance of our models with three state-of-the-art dehazing models namely AtJ-DH[10], 123-CEDH[9] and FFA-Net[17]. The quantitative results on Validation set are given in Table-1. DMPHN is performing better than the rest of the models. It can be observed that our Multi-patch network is performing better than our Multi-scale network in terms of both PSNR and SSIM. At lower levels of DMPHN, the network works on patch level, so the network learns local features compared to global features learnt by DMSHN, which explains the performance gain in DMPHN.",
1083
- "bbox": [
1084
- 496,
1085
- 631,
1086
- 890,
1087
- 813
1088
- ],
1089
- "page_idx": 4
1090
- },
1091
- {
1092
- "type": "text",
1093
- "text": "Apart from decent dehazing results, it is to be noted that both DMPHN and DMSHN are lightweight and efficient models. Checkpoints of both the networks take 21.7 MB on disk. GPU processing times for DMPHN and DMPSN make them suitable for real-time applications.",
1094
- "bbox": [
1095
- 496,
1096
- 825,
1097
- 892,
1098
- 901
1099
- ],
1100
- "page_idx": 4
1101
- },
1102
- {
1103
- "type": "image",
1104
- "img_path": "images/322dacf900c71f0650b47c7ba42723702ad7bcb3a68df5d94e63217f1a833931.jpg",
1105
- "image_caption": [],
1106
- "image_footnote": [],
1107
- "bbox": [
1108
- 80,
1109
- 88,
1110
- 210,
1111
- 166
1112
- ],
1113
- "page_idx": 5
1114
- },
1115
- {
1116
- "type": "image",
1117
- "img_path": "images/39333b7d0708f08b06a14fa0512445b94d2d73c027eb8ae01443ef63137e219e.jpg",
1118
- "image_caption": [],
1119
- "image_footnote": [],
1120
- "bbox": [
1121
- 215,
1122
- 88,
1123
- 346,
1124
- 166
1125
- ],
1126
- "page_idx": 5
1127
- },
1128
- {
1129
- "type": "image",
1130
- "img_path": "images/277e66f5bcdb4b1e35284a505aac004d7c384ad1fc1e260886bdabd0ec185d92.jpg",
1131
- "image_caption": [],
1132
- "image_footnote": [],
1133
- "bbox": [
1134
- 351,
1135
- 88,
1136
- 482,
1137
- 166
1138
- ],
1139
- "page_idx": 5
1140
- },
1141
- {
1142
- "type": "image",
1143
- "img_path": "images/aaf822e5068e3e78eb973d79d4a5bcd09b9523ab3e2ae0537fef5f072bf312b3.jpg",
1144
- "image_caption": [],
1145
- "image_footnote": [],
1146
- "bbox": [
1147
- 485,
1148
- 88,
1149
- 617,
1150
- 166
1151
- ],
1152
- "page_idx": 5
1153
- },
1154
- {
1155
- "type": "image",
1156
- "img_path": "images/ff69bb4f9d7194a2314db839708d935bcac1716ff8cdad75fda7b2a35e29ff0e.jpg",
1157
- "image_caption": [],
1158
- "image_footnote": [],
1159
- "bbox": [
1160
- 620,
1161
- 88,
1162
- 754,
1163
- 166
1164
- ],
1165
- "page_idx": 5
1166
- },
1167
- {
1168
- "type": "image",
1169
- "img_path": "images/1bb380d079fc7923dce507e6a8946b04ccb45777a59c4233e36f1ea1e316716d.jpg",
1170
- "image_caption": [],
1171
- "image_footnote": [],
1172
- "bbox": [
1173
- 758,
1174
- 88,
1175
- 890,
1176
- 166
1177
- ],
1178
- "page_idx": 5
1179
- },
1180
- {
1181
- "type": "image",
1182
- "img_path": "images/16d30bb661b01669099c1ede79200b942585bef680dd454051c1007c9be9c9e4.jpg",
1183
- "image_caption": [],
1184
- "image_footnote": [],
1185
- "bbox": [
1186
- 80,
1187
- 169,
1188
- 210,
1189
- 247
1190
- ],
1191
- "page_idx": 5
1192
- },
1193
- {
1194
- "type": "image",
1195
- "img_path": "images/036bcca9b8745e703c2d6eac42130e0b525c864130a462aaecef840f4ce0f6e5.jpg",
1196
- "image_caption": [],
1197
- "image_footnote": [],
1198
- "bbox": [
1199
- 215,
1200
- 169,
1201
- 346,
1202
- 247
1203
- ],
1204
- "page_idx": 5
1205
- },
1206
- {
1207
- "type": "image",
1208
- "img_path": "images/bafb1f94a9ac9de7be51d8f4512f4f0c8b56eb3af01e231d03d806991f552286.jpg",
1209
- "image_caption": [],
1210
- "image_footnote": [],
1211
- "bbox": [
1212
- 351,
1213
- 169,
1214
- 482,
1215
- 247
1216
- ],
1217
- "page_idx": 5
1218
- },
1219
- {
1220
- "type": "image",
1221
- "img_path": "images/289763fc9035f88d730e8708b8d3f27a9745a44a66c6d51f6ff8ab2fc71bedea.jpg",
1222
- "image_caption": [],
1223
- "image_footnote": [],
1224
- "bbox": [
1225
- 485,
1226
- 169,
1227
- 619,
1228
- 247
1229
- ],
1230
- "page_idx": 5
1231
- },
1232
- {
1233
- "type": "image",
1234
- "img_path": "images/d90a9801e197ed345da27802b6006a8df7d898a2db6d33e1d6d76ab764b074fd.jpg",
1235
- "image_caption": [],
1236
- "image_footnote": [],
1237
- "bbox": [
1238
- 622,
1239
- 169,
1240
- 754,
1241
- 247
1242
- ],
1243
- "page_idx": 5
1244
- },
1245
- {
1246
- "type": "image",
1247
- "img_path": "images/7818add7126c6691210bea765f9cc152e3eba05730675ffedb012559fb48d6e3.jpg",
1248
- "image_caption": [],
1249
- "image_footnote": [],
1250
- "bbox": [
1251
- 758,
1252
- 169,
1253
- 890,
1254
- 247
1255
- ],
1256
- "page_idx": 5
1257
- },
1258
- {
1259
- "type": "image",
1260
- "img_path": "images/e2ae1cbb238ff65555c1591fed18aea688b6dc8e7691db70aa05cd5ed6dbd5f1.jpg",
1261
- "image_caption": [
1262
- "(a) Input Image"
1263
- ],
1264
- "image_footnote": [],
1265
- "bbox": [
1266
- 80,
1267
- 251,
1268
- 210,
1269
- 327
1270
- ],
1271
- "page_idx": 5
1272
- },
1273
- {
1274
- "type": "image",
1275
- "img_path": "images/dace9d1bc36ebadd1dff706d809d3f44a6a6e725b1f09da206b960864a88ffdf.jpg",
1276
- "image_caption": [
1277
- "(b) AtJ-DH"
1278
- ],
1279
- "image_footnote": [],
1280
- "bbox": [
1281
- 215,
1282
- 251,
1283
- 346,
1284
- 327
1285
- ],
1286
- "page_idx": 5
1287
- },
1288
- {
1289
- "type": "image",
1290
- "img_path": "images/b2c2c16f899acfe25e056b4f40bac094e6bc23ce53664757c4ac86b1297a488f.jpg",
1291
- "image_caption": [
1292
- "(c) 123-CEDH"
1293
- ],
1294
- "image_footnote": [],
1295
- "bbox": [
1296
- 351,
1297
- 251,
1298
- 482,
1299
- 327
1300
- ],
1301
- "page_idx": 5
1302
- },
1303
- {
1304
- "type": "image",
1305
- "img_path": "images/970b251056da637eef385da25e0a04f22286444008de703dcdd3a1161dd390a0.jpg",
1306
- "image_caption": [
1307
- "(d) FFA-Net"
1308
- ],
1309
- "image_footnote": [],
1310
- "bbox": [
1311
- 486,
1312
- 251,
1313
- 619,
1314
- 327
1315
- ],
1316
- "page_idx": 5
1317
- },
1318
- {
1319
- "type": "image",
1320
- "img_path": "images/99755871b7e562ce1eab3fea5b56908bac3c75b3130c6384cce7e568b559b196.jpg",
1321
- "image_caption": [
1322
- "(e) Ours (DMPHN)",
1323
- "Figure 5: Qualitative results on NH-HAZE[3] Validation dataset."
1324
- ],
1325
- "image_footnote": [],
1326
- "bbox": [
1327
- 622,
1328
- 251,
1329
- 754,
1330
- 327
1331
- ],
1332
- "page_idx": 5
1333
- },
1334
- {
1335
- "type": "image",
1336
- "img_path": "images/b0412d4b6cff0e050c3862848499c707b64400ab900b41c6bcdf12cdd084e2cb.jpg",
1337
- "image_caption": [
1338
- "(f) Ours (DMSHN)"
1339
- ],
1340
- "image_footnote": [],
1341
- "bbox": [
1342
- 758,
1343
- 251,
1344
- 890,
1345
- 327
1346
- ],
1347
- "page_idx": 5
1348
- },
1349
- {
1350
- "type": "table",
1351
- "img_path": "images/6f6b333cfaa48ce52604bac7057871962515e3dec4fab5e16cfc6d0a3905ee50.jpg",
1352
- "table_caption": [],
1353
- "table_footnote": [],
1354
- "table_body": "<table><tr><td></td><td>PSNR</td><td>SSIM</td><td>Runtime(s)</td></tr><tr><td>AtJ-DH[10]</td><td>15.94</td><td>0.5662</td><td>0.0775</td></tr><tr><td>123-CEDH[9]</td><td>14.59</td><td>0.5488</td><td>0.0559</td></tr><tr><td>FFA-Net[17]</td><td>10.43</td><td>0.4168</td><td>1.7472</td></tr><tr><td>DMPHN</td><td>16.94</td><td>0.6177</td><td>0.0145</td></tr><tr><td>DMPSN</td><td>16.42</td><td>0.5991</td><td>0.0210</td></tr></table>",
1355
- "bbox": [
1356
- 107,
1357
- 393,
1358
- 439,
1359
- 489
1360
- ],
1361
- "page_idx": 5
1362
- },
1363
- {
1364
- "type": "text",
1365
- "text": "Table 1: Quantitative results on NH-HAZE[3] Validation set.",
1366
- "bbox": [
1367
- 76,
1368
- 494,
1369
- 468,
1370
- 523
1371
- ],
1372
- "page_idx": 5
1373
- },
1374
- {
1375
- "type": "text",
1376
- "text": "4.6.2 NTIRE 2020 challenge on NonHomogeneous Image Dehazing:",
1377
- "text_level": 1,
1378
- "bbox": [
1379
- 76,
1380
- 554,
1381
- 468,
1382
- 585
1383
- ],
1384
- "page_idx": 5
1385
- },
1386
- {
1387
- "type": "text",
1388
- "text": "We participated in NTIRE 2020 challenge on NonHomogeneous Image Dehazing[5]. 27 teams submitted results in test phase, out of which 19 teams don't take help of extra training data like Dense-Haze[2, 6] and OHaze[4, 1]. The test results were evaluated on Fidelity measures as well as Perceptual Measures. Fidelity measures included PSNR and SSIM[21], where LPIPS[24], Perceptual Index(PI)[7] and Mean Opinion Score(MOS) were used as Perceptual metrics. For fair comparison, we note down performances of some submissions that used only NH-HAZE dataset in Table-2. Our DMPHN network produced moderate quality outputs both in Fidelity and Perceptual metrics. Our network is the fastest entry among all the submissions.",
1389
- "bbox": [
1390
- 75,
1391
- 594,
1392
- 468,
1393
- 791
1394
- ],
1395
- "page_idx": 5
1396
- },
1397
- {
1398
- "type": "text",
1399
- "text": "4.6.3 Dense Haze Removal:",
1400
- "text_level": 1,
1401
- "bbox": [
1402
- 76,
1403
- 815,
1404
- 282,
1405
- 829
1406
- ],
1407
- "page_idx": 5
1408
- },
1409
- {
1410
- "type": "text",
1411
- "text": "DMPHN is effective for dense haze removal as well. We trained our network on Dense-HAZE dataset[2]. We train on 50 images for training and use 5 images for test. We compare the performance with AtJ-DH[10], 123-CEDH[9]",
1412
- "bbox": [
1413
- 75,
1414
- 839,
1415
- 468,
1416
- 902
1417
- ],
1418
- "page_idx": 5
1419
- },
1420
- {
1421
- "type": "text",
1422
- "text": "and FFA-Net[17]. Quantitative results and GPU runtimes are shown in Table-3. We observe that DMPHN is significantly better than other models both in terms of fidelity measures and runtime. Figure-6 shows qualitative comparison with the said models.",
1423
- "bbox": [
1424
- 498,
1425
- 396,
1426
- 890,
1427
- 470
1428
- ],
1429
- "page_idx": 5
1430
- },
1431
- {
1432
- "type": "text",
1433
- "text": "4.7. Conclusion",
1434
- "text_level": 1,
1435
- "bbox": [
1436
- 500,
1437
- 481,
1438
- 622,
1439
- 496
1440
- ],
1441
- "page_idx": 5
1442
- },
1443
- {
1444
- "type": "text",
1445
- "text": "In this paper, we use a Multi-Patch and a Multi-Scale architecture for Nonhomogeneous haze removal from images. We show that DMPHN is better than DMSHN because DMPHN aggregates local features generated from a finer level to coarser level. Moreover, DMPHN is a fast algorithm and can dehaze images from a video sequence in real-time. We also show that DMPHN performs well for Dense Haze Removal. In future, the effectiveness of DMPHN with more levels can be explored for performance improvement, but the addition of more levels to architecture will subject to sacrifice in runtime.",
1446
- "bbox": [
1447
- 496,
1448
- 503,
1449
- 892,
1450
- 670
1451
- ],
1452
- "page_idx": 5
1453
- },
1454
- {
1455
- "type": "table",
1456
- "img_path": "images/6730f990ad9eab47f4f95504ea356cb074ef996c5ea24b14b006c1bc878230ad.jpg",
1457
- "table_caption": [],
1458
- "table_footnote": [],
1459
- "table_body": "<table><tr><td rowspan=\"2\">Team</td><td colspan=\"2\">Fidelity</td><td colspan=\"3\">Perceptual quality</td><td rowspan=\"2\">Runtime(s)↓</td><td rowspan=\"2\">GPU/CPU</td></tr><tr><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>PI↓</td><td>MOS↓</td></tr><tr><td>method1</td><td>21.60</td><td>0.67</td><td>0.363</td><td>3.712</td><td>3</td><td>0.21</td><td>v100</td></tr><tr><td>method2</td><td>21.91</td><td>0.69</td><td>0.361</td><td>3.700</td><td>4</td><td>0.22</td><td>v100</td></tr><tr><td>method3</td><td>19.25</td><td>0.60</td><td>0.426</td><td>5.061</td><td>12</td><td>12.88</td><td>v100</td></tr><tr><td>method4</td><td>18.51</td><td>0.68</td><td>0.308</td><td>2.988</td><td>12</td><td>13.00</td><td>n/a</td></tr><tr><td>Ours (DMPHN)</td><td>18.24</td><td>0.65</td><td>0.329</td><td>3.051</td><td>14</td><td>0.01</td><td>1080</td></tr><tr><td>method5</td><td>18.70</td><td>0.64</td><td>0.328</td><td>3.114</td><td>14</td><td>10.43</td><td>1080ti</td></tr><tr><td>method6</td><td>18.67</td><td>0.64</td><td>0.303</td><td>3.211</td><td>16</td><td>1.64</td><td>TitanXP</td></tr><tr><td>method7</td><td>17.88</td><td>0.57</td><td>0.378</td><td>2.855</td><td>16</td><td>0.06</td><td>n/a</td></tr><tr><td>no processing</td><td>11.33</td><td>0.42</td><td>0.582</td><td>2.609</td><td>20</td><td></td><td></td></tr></table>",
1460
- "bbox": [
1461
- 163,
1462
- 88,
1463
- 805,
1464
- 262
1465
- ],
1466
- "page_idx": 6
1467
- },
1468
- {
1469
- "type": "image",
1470
- "img_path": "images/59f49e8016e6d9a461754e18cd5ba8e1c37340025f581297cbe64f9cfd3d7336.jpg",
1471
- "image_caption": [
1472
- "Figure 6: Qualitative results for Dense Haze Removal."
1473
- ],
1474
- "image_footnote": [],
1475
- "bbox": [
1476
- 80,
1477
- 297,
1478
- 890,
1479
- 554
1480
- ],
1481
- "page_idx": 6
1482
- },
1483
- {
1484
- "type": "table",
1485
- "img_path": "images/89b4180fd91230bf34e21ec704bcddb81557b665fe9fdc82cab9231b5e654201.jpg",
1486
- "table_caption": [
1487
- "Table 2: NTIRE 2020 Nonhomogeneous challenge[5] Leaderboard. Submissions are sorted in ascending order of MOS."
1488
- ],
1489
- "table_footnote": [],
1490
- "table_body": "<table><tr><td></td><td>PSNR</td><td>SSIM</td><td>Runtime(s)</td></tr><tr><td>AtJ-DH[10]</td><td>22.54</td><td>0.6436</td><td>0.0775</td></tr><tr><td>123-CEDH[9]</td><td>19.63</td><td>0.5758</td><td>0.0559</td></tr><tr><td>FFA-Net[17]</td><td>11.93</td><td>0.3790</td><td>1.7472</td></tr><tr><td>Ours(DMPHN)</td><td>23.41</td><td>0.6669</td><td>0.0145</td></tr></table>",
1491
- "bbox": [
1492
- 102,
1493
- 606,
1494
- 444,
1495
- 686
1496
- ],
1497
- "page_idx": 6
1498
- },
1499
- {
1500
- "type": "text",
1501
- "text": "Table 3: Quantitative Comparison on Dense-HAZE[2].",
1502
- "bbox": [
1503
- 91,
1504
- 691,
1505
- 452,
1506
- 705
1507
- ],
1508
- "page_idx": 6
1509
- },
1510
- {
1511
- "type": "text",
1512
- "text": "References",
1513
- "text_level": 1,
1514
- "bbox": [
1515
- 80,
1516
- 733,
1517
- 171,
1518
- 748
1519
- ],
1520
- "page_idx": 6
1521
- },
1522
- {
1523
- "type": "list",
1524
- "sub_type": "ref_text",
1525
- "list_items": [
1526
- "[1] C. Ancuti, C.O. Ancuti, R. Timofte, L. Van Gool, and L. Zhang et al. NTIRE 2018 challenge on image dehazing: Methods and results. IEEE CVPR, NTIRE Workshop, 2018. 6",
1527
- "[2] Codruta O Ancuti, Cosmin Ancuti, Mateu Sbert, and Radu Timofte. Dense-haze: A benchmark for image dehazing with dense-haze and haze-free images. In 2019 IEEE International Conference on Image Processing (ICIP), pages 1014-1018. IEEE, 2019. 6, 7",
1528
- "[3] Codruta O. Ancuti, Cosmin Ancuti, and Radu Timofte. NH-"
1529
- ],
1530
- "bbox": [
1531
- 84,
1532
- 758,
1533
- 467,
1534
- 898
1535
- ],
1536
- "page_idx": 6
1537
- },
1538
- {
1539
- "type": "list",
1540
- "sub_type": "ref_text",
1541
- "list_items": [
1542
- "HAZE: An image dehazing benchmark with nonhomogeneous hazy and haze-free images. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, June 2020. 4, 6",
1543
- "[4] Codruta O Ancuti, Cosmin Ancuti, Radu Timofte, and Christophe De Vleeschouwer. O-haze: a dehazing benchmark with real hazy and haze-free outdoor images. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 754-762, 2018. 6",
1544
- "[5] Codruta O. Ancuti, Cosmin Ancuti, Florin-Alexandru Vasluianu, Radu Timofte, et al. Ntire 2020 challenge on nonhomogeneous dehazing. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, June 2020. 6, 7",
1545
- "[6] C. O. Ancuti, C. Ancuti, R. Timofte, L. Van Gool, and L. Zhang et al. NTIRE 2019 challenge on image dehazing: Methods and results. IEEE CVPR, NTIRE Workshop, 2019. 6",
1546
- "[7] Yochai Blau, Roey Mechrez, Radu Timofte, Tomer Michaeli, and Lihi Zelnik-Manor. The 2018 pirm challenge on percep"
1547
- ],
1548
- "bbox": [
1549
- 508,
1550
- 609,
1551
- 890,
1552
- 901
1553
- ],
1554
- "page_idx": 6
1555
- },
1556
- {
1557
- "type": "list",
1558
- "sub_type": "ref_text",
1559
- "list_items": [
1560
- "tual image super-resolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 0-0, 2018. 6",
1561
- "[8] Zijun Deng, Lei Zhu, Xiaowei Hu, Chi-Wing Fu, Xuemiao Xu, Qing Zhang, Jing Qin, and Pheng-Ann Heng. Deep multi-model fusion for single-image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 2453–2462, 2019. 2",
1562
- "[9] Tiantong Guo, Venkateswararao Cherukuri, and Vishal Monga. Dense123'color enhancement dehazing network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 5, 6, 7",
1563
- "[10] Tiantong Guo, Xuelu Li, Venkateswararao Cherukuri, and Vishal Monga. Dense scene information estimation network for dehazing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 0–0, 2019. 5, 6, 7",
1564
- "[11] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In European conference on computer vision, pages 694–711. Springer, 2016. 4",
1565
- "[12] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5",
1566
- "[13] Yunan Li, Qiguang Miao, Wanli Ouyang, Zhenxin Ma, Huijuan Fang, Chao Dong, and Yining Quan. Lap-net: Level-aware progressive network for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 3276-3285, 2019. 2",
1567
- "[14] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. Grid-dehazenet: Attention-based multi-scale network for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 7314-7323, 2019. 2",
1568
- "[15] Yang Liu, Jinshan Pan, Jimmy Ren, and Zhixun Su. Learning deep priors for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 2492–2500, 2019. 2",
1569
- "[16] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 5",
1570
- "[17] Xu Qin, Zhilin Wang, Yuanchao Bai, Xiaodong Xie, and Huizhu Jia. Ffa-net: Feature fusion attention network for single image dehazing. arXiv preprint arXiv:1911.07559, 2019. 2, 5, 6, 7",
1571
- "[18] Yanyun Qu, Yizi Chen, Jingying Huang, and Yuan Xie. Enhanced pix2pix dehazing network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8160-8168, 2019. 2",
1572
- "[19] Prasen Sharma, Priyankar Jain, and Arijit Sur. Scale-aware conditional generative adversarial network for image dehaz-"
1573
- ],
1574
- "bbox": [
1575
- 80,
1576
- 92,
1577
- 468,
1578
- 900
1579
- ],
1580
- "page_idx": 7
1581
- },
1582
- {
1583
- "type": "list",
1584
- "sub_type": "ref_text",
1585
- "list_items": [
1586
- "ing. In The IEEE Winter Conference on Applications of Computer Vision, pages 2355-2365, 2020. 2",
1587
- "[20] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 4",
1588
- "[21] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6",
1589
- "[22] Hongguang Zhang, Yuchao Dai, Hongdong Li, and Piotr Koniusz. Deep stacked hierarchical multi-patch network for image deblurring. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5978-5986, 2019. 2",
1590
- "[23] He Zhang and Vishal M Patel. Densely connected pyramid dehazing network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3194-3203, 2018. 2",
1591
- "[24] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 586-595, 2018. 6"
1592
- ],
1593
- "bbox": [
1594
- 501,
1595
- 92,
1596
- 890,
1597
- 416
1598
- ],
1599
- "page_idx": 7
1600
- }
1601
- ]
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23b303a9bc2c37c98268abd0583577b27500a1c1d9bd784fc872333e30baf13f
3
+ size 57579
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_05xxx/2005.05999/55470d3a-eba0-4dac-8f5a-e101cace3cb0_model.json CHANGED
@@ -1,1899 +1,3 @@
1
- [
2
- [
3
- {
4
- "type": "aside_text",
5
- "bbox": [
6
- 0.023,
7
- 0.256,
8
- 0.058,
9
- 0.706
10
- ],
11
- "angle": 270,
12
- "content": "arXiv:2005.05999v1 [cs.CV] 12 May 2020"
13
- },
14
- {
15
- "type": "title",
16
- "bbox": [
17
- 0.115,
18
- 0.13,
19
- 0.856,
20
- 0.176
21
- ],
22
- "angle": 0,
23
- "content": "Fast Deep Multi-patch Hierarchical Network for Nonhomogeneous Image Dehazing"
24
- },
25
- {
26
- "type": "text",
27
- "bbox": [
28
- 0.263,
29
- 0.205,
30
- 0.422,
31
- 0.256
32
- ],
33
- "angle": 0,
34
- "content": "Sourya Dipta Das* \nJadavpur University \nKolkata, India"
35
- },
36
- {
37
- "type": "text",
38
- "bbox": [
39
- 0.245,
40
- 0.261,
41
- 0.442,
42
- 0.274
43
- ],
44
- "angle": 0,
45
- "content": "dipta.juetce@gmail.com"
46
- },
47
- {
48
- "type": "text",
49
- "bbox": [
50
- 0.558,
51
- 0.205,
52
- 0.669,
53
- 0.238
54
- ],
55
- "angle": 0,
56
- "content": "Saikat Dutta* IIT Madras"
57
- },
58
- {
59
- "type": "text",
60
- "bbox": [
61
- 0.556,
62
- 0.24,
63
- 0.675,
64
- 0.256
65
- ],
66
- "angle": 0,
67
- "content": "Chennai, India"
68
- },
69
- {
70
- "type": "text",
71
- "bbox": [
72
- 0.503,
73
- 0.261,
74
- 0.726,
75
- 0.273
76
- ],
77
- "angle": 0,
78
- "content": "cs18s016@smail.iitm.ac.in"
79
- },
80
- {
81
- "type": "title",
82
- "bbox": [
83
- 0.235,
84
- 0.31,
85
- 0.314,
86
- 0.325
87
- ],
88
- "angle": 0,
89
- "content": "Abstract"
90
- },
91
- {
92
- "type": "text",
93
- "bbox": [
94
- 0.076,
95
- 0.341,
96
- 0.473,
97
- 0.599
98
- ],
99
- "angle": 0,
100
- "content": "Recently, CNN based end-to-end deep learning methods achieve superiority in Image Dehazing but they tend to fail drastically in Non-homogeneous dehazing. Apart from that, existing popular Multi-scale approaches are runtime intensive and memory inefficient. In this context, we proposed a fast Deep Multi-patch Hierarchical Network to restore Non-homogeneous hazed images by aggregating features from multiple image patches from different spatial sections of the hazed image with fewer number of network parameters. Our proposed method is quite robust for different environments with various density of the haze or fog in the scene and very lightweight as the total size of the model is around 21.7 MB. It also provides faster runtime compared to current multi-scale methods with an average runtime of 0.0145s to process \\(1200 \\times 1600\\) HD quality image. Finally, we show the superiority of this network on Dense Haze Removal to other state-of-the-art models."
101
- },
102
- {
103
- "type": "title",
104
- "bbox": [
105
- 0.078,
106
- 0.625,
107
- 0.21,
108
- 0.64
109
- ],
110
- "angle": 0,
111
- "content": "1. Introduction"
112
- },
113
- {
114
- "type": "text",
115
- "bbox": [
116
- 0.076,
117
- 0.65,
118
- 0.47,
119
- 0.816
120
- ],
121
- "angle": 0,
122
- "content": "Outdoor images are often deteriorated due to the extreme weather, such as fog and haze, which influences visibility issues in the scene because of the degradation of color, contrast and textures for different distant objects, selective attenuation of the light spectrum. Restoring such hazed images has become an important problem in many computer vision applications like visual surveillance, remote sensing, and Autonomous transportation etc. Most of early methods proposed for image dehazing are based on the classic atmospheric scattering model which is shown as the following equation. 1."
123
- },
124
- {
125
- "type": "equation",
126
- "bbox": [
127
- 0.164,
128
- 0.825,
129
- 0.469,
130
- 0.842
131
- ],
132
- "angle": 0,
133
- "content": "\\[\nI (x) = J (x) t (x) + A (1 - t (x)) \\tag {1}\n\\]"
134
- },
135
- {
136
- "type": "text",
137
- "bbox": [
138
- 0.076,
139
- 0.849,
140
- 0.47,
141
- 0.88
142
- ],
143
- "angle": 0,
144
- "content": "where, \\( x \\) represents pixel locations, \\( I(x) \\) is the observed hazy image, \\( J(x) \\) is the dehazed image, \\( t(x) \\) is called"
145
- },
146
- {
147
- "type": "text",
148
- "bbox": [
149
- 0.5,
150
- 0.311,
151
- 0.895,
152
- 0.81
153
- ],
154
- "angle": 0,
155
- "content": "medium transmission function and \\( A \\) is the global atmospheric light. Recently, Deep learning based methods have shown remarkable improvements though those methods suffer from degradation of colour, texture in image, halo artifacts, haze residuals and distortions. In our problem statement, Non-homogeneous haze in the scene can be seen in the real world situation where different spatial domains of the image can be affected by different levels of haze. The degradation level also varies a lot for objects at different scene depth due to non-uniform haze distribution in the image. Few example images of such Non-homogeneous haze are shown in figure 4. Dehazing model should put more effort to handle non-uniform haze and different degradation between different scene depth jointly. Multi-scale and scale-recurrent models can be a viable solution in this type of problem because of its coarse-to-fine learning scheme by hierarchical integration of features from different spatial scale of the image. This type of methods is inefficient because of high runtime and large model size due to a lot of convolution and Deconvolution layers. Apart from that, increasing depth of layers at fine scale levels may not always improve the perceptual quality of the output dehazed image. On the contrary, main goal of our model is to aggregate features multiple image patches from different spatial sections of the image for better performance. The parameters of our encoder and decoder are very less due to residual links in our model which helps in fast dehazing inference. The main intuition behind our idea is to make the lower level network portion focus on local information by extracting local features from the finer grid to produce residual information for the upper level part of the network to get more global information from both finer and coarser grid which is achieved by concatenating convolutional features."
156
- },
157
- {
158
- "type": "title",
159
- "bbox": [
160
- 0.501,
161
- 0.829,
162
- 0.642,
163
- 0.844
164
- ],
165
- "angle": 0,
166
- "content": "2. Related Work"
167
- },
168
- {
169
- "type": "text",
170
- "bbox": [
171
- 0.5,
172
- 0.856,
173
- 0.893,
174
- 0.901
175
- ],
176
- "angle": 0,
177
- "content": "Most early work of image dehazing methods is developed on atmosphere scattering model as it's physical model. In that respect, previous works on image dehazing can be"
178
- },
179
- {
180
- "type": "page_footnote",
181
- "bbox": [
182
- 0.096,
183
- 0.888,
184
- 0.206,
185
- 0.9
186
- ],
187
- "angle": 0,
188
- "content": "*Equal contribution."
189
- },
190
- {
191
- "type": "page_number",
192
- "bbox": [
193
- 0.481,
194
- 0.925,
195
- 0.49,
196
- 0.937
197
- ],
198
- "angle": 0,
199
- "content": "1"
200
- }
201
- ],
202
- [
203
- {
204
- "type": "text",
205
- "bbox": [
206
- 0.081,
207
- 0.092,
208
- 0.468,
209
- 0.241
210
- ],
211
- "angle": 0,
212
- "content": "segregated into two classes which are traditional image prior-based methods and end to end deep learning based methods. Traditional image prior based methods relies on hand-crafted statistics from the images to leverage extra mathematical constraints to compensate for the information lost during reconstruction. On contrary, deep learning based methods learn the direct relationship between haze and haze-free image by utilizing multistage, attention mechanisms etc. Here, we discussed some recent deep learning based methods with state-of-the-art results."
213
- },
214
- {
215
- "type": "text",
216
- "bbox": [
217
- 0.081,
218
- 0.247,
219
- 0.468,
220
- 0.79
221
- ],
222
- "angle": 0,
223
- "content": "Zhang et al.[23] proposed a dehazing network with edge-preserving densely connected encoder-decoder architecture that jointly learns the dehazed image, transmission map and atmosphere light all together based on the scattering model for dehazing. In their encoder-decoder architecture, they use a multilevel pyramid pooling module and to improve their results further, joint-discriminator based on GAN is used to incorporate the correlation between estimated transmission map and dehazed image. Deng et al.[8] presents a multi-model fusion network to combine multiple models in its different levels of layers and enhance the overall performance of image dehazing. They generate the multi-model attention integrated feature from various CNN features at different levels and fed it to their fusion model to predict dehazed image for an atmospheric scattering model and four haze-layer separation models altogether. After that, they fused the corresponding results together to generate the final dehazed image. Qin et al.[17] proposed a novel Feature Attention module which fuses Channel Attention with Pixel Attention while considering different weighted information of different channel-wise features and uneven haze distribution on different pixels of the hazed image. For Outdoor hazy images, their work proves superiority though it didn't work well in case of dense dehazing. Liu et al.[14] proposed a grid network with attention-based multi-scale estimation which overcomes the bottleneck problems found in general multi-scale approach. Apart from that, their method also consists of pre-processing and post-processing modules. The pre-processing module used in this method is trainable to get more relevant features from diversified pre-processed image inputs and it outperforms the other hand picked classical pre-process techniques. The post-processing module is finally used on intermediate dehazed image to get more finer dehazed image. Their study shows how their method works quite independently and does not take any advantage from atmosphere scattering model for image dehazing."
224
- },
225
- {
226
- "type": "text",
227
- "bbox": [
228
- 0.081,
229
- 0.796,
230
- 0.468,
231
- 0.9
232
- ],
233
- "angle": 0,
234
- "content": "Unlike other multi-stage methods, Li et al.[13] used a level aware progressive deep network to learn different levels of haze from its different stages of the network by different supervision. Their network tends to progressively learn gradually more intense haze from image by focusing on a specific part of image with a certain haze level. They have also devised a adaptive hierarchical integration tech"
235
- },
236
- {
237
- "type": "text",
238
- "bbox": [
239
- 0.504,
240
- 0.092,
241
- 0.891,
242
- 0.498
243
- ],
244
- "angle": 0,
245
- "content": "nique by cooperating with the it's memory network component and domain information of dehazing to emphasize the well-reconstructed parts of the image in it's each stage of the network. Liu et al.[15] suggests a method to learn a haze relevant image priors by using a iteration algorithm with deep CNNs. They achieve this by using gradient descent method to optimize a variational model with image fidelity terms and proper regularization. this method indeed a great combination of properties from classical deep learning based method and physical hazed image formation model. Sharma et al.[19] explored the application of Laplacians of Gaussian (LoG) of the images to reattain the edge and intensity variation information. They optimize their end-to-end deep model by per-pixel difference between Laplacians of Gaussians of the dehazed and ground truth images. they additionally do adversarial training with a perceptual loss to enhance their results. Apart from other physical scattering model based methods, GAN , multiscale or multistage deep networks, Image dehazing can also be posed as image to image translation problem. Qu et al.(2019)[18] proposed their solution as an enhanced Pix2Pix Model which is widely used in image style transfer, image to image translation etc. problems. Their method consists of a GAN with a Enhancer modules to support the dehazing process to get more detailed, vivid image with less artifacts. Their work also proved superiority over other methods in the aspect of the perceptual quality of the dehazed images."
246
- },
247
- {
248
- "type": "title",
249
- "bbox": [
250
- 0.504,
251
- 0.516,
252
- 0.671,
253
- 0.533
254
- ],
255
- "angle": 0,
256
- "content": "3. Proposed Method"
257
- },
258
- {
259
- "type": "text",
260
- "bbox": [
261
- 0.504,
262
- 0.543,
263
- 0.891,
264
- 0.586
265
- ],
266
- "angle": 0,
267
- "content": "We use a Multi-patch and a Multi-scale network for Nonhomogeneous Image Dehazing. In this section, we describe these two architectures in detail."
268
- },
269
- {
270
- "type": "title",
271
- "bbox": [
272
- 0.504,
273
- 0.6,
274
- 0.734,
275
- 0.616
276
- ],
277
- "angle": 0,
278
- "content": "3.1. Multi-patch Architecture:"
279
- },
280
- {
281
- "type": "text",
282
- "bbox": [
283
- 0.504,
284
- 0.624,
285
- 0.891,
286
- 0.699
287
- ],
288
- "angle": 0,
289
- "content": "We use Deep Multi-patch Hierarchical Network(DMPHN). DMPHN is originally used for Single Image Deblurring[22]. We use (1-2-4) variant of DMPHN in this paper. For the sake of completeness, we will discuss the architecture in the following."
290
- },
291
- {
292
- "type": "text",
293
- "bbox": [
294
- 0.504,
295
- 0.7,
296
- 0.891,
297
- 0.836
298
- ],
299
- "angle": 0,
300
- "content": "DMPHN is a multi-level architecture. There is an encoder-decoder pair in each level. Each level works on different number of patches. In DMPHN(1-2-4), the number of patches used is 1,2 and 4 from top to bottom levels respectively. The top-most level (level-1) considers only one patch per image. In the next level (level-2), the image is divided into two patches vertically. In the bottom-most level (level-3) the patches from previous level are further divided horizontally, resulting in total 4 patches."
301
- },
302
- {
303
- "type": "text",
304
- "bbox": [
305
- 0.504,
306
- 0.837,
307
- 0.891,
308
- 0.902
309
- ],
310
- "angle": 0,
311
- "content": "Let us consider an input hazy image \\( I^H \\). We denote \\( j \\)-th patch in \\( i \\)-th level as \\( I_{i,j}^H \\). In level-1, \\( I^H \\) is not divided into any patches. In level-2, \\( I^H \\) is divided vertically into \\( I_{2,1}^H \\) and \\( I_{2,2}^H \\). In level-3, \\( I_{2,1}^H \\) and \\( I_{2,2}^H \\) are divided horizontally"
312
- }
313
- ],
314
- [
315
- {
316
- "type": "image",
317
- "bbox": [
318
- 0.207,
319
- 0.092,
320
- 0.765,
321
- 0.335
322
- ],
323
- "angle": 0,
324
- "content": null
325
- },
326
- {
327
- "type": "image_caption",
328
- "bbox": [
329
- 0.076,
330
- 0.347,
331
- 0.895,
332
- 0.378
333
- ],
334
- "angle": 0,
335
- "content": "Figure 1: Architecture diagram of Deep Multi-Patch Hierarchical Network. \\(\\{^{\\prime}\\}\\) denotes spatial concatenation and \\(\\bigoplus\\) denotes residual addition."
336
- },
337
- {
338
- "type": "text",
339
- "bbox": [
340
- 0.076,
341
- 0.403,
342
- 0.47,
343
- 0.449
344
- ],
345
- "angle": 0,
346
- "content": "to create 4 patches, \\( I_{3,1}^{H}, I_{3,2}^{H}, I_{3,3}^{H} \\) and \\( I_{3,4}^{H} \\). Encoders and Decoders at \\( i \\)-th level is denoted as \\( Enc_{i} \\) and \\( Dec_{i} \\) respectively."
347
- },
348
- {
349
- "type": "text",
350
- "bbox": [
351
- 0.076,
352
- 0.45,
353
- 0.47,
354
- 0.496
355
- ],
356
- "angle": 0,
357
- "content": "The information flow in DMPHN is bottom-up. Patches in the lowest level are fed to encoder \\( Enc_3 \\) to generate corresponding feature maps."
358
- },
359
- {
360
- "type": "equation",
361
- "bbox": [
362
- 0.171,
363
- 0.504,
364
- 0.47,
365
- 0.526
366
- ],
367
- "angle": 0,
368
- "content": "\\[\nF _ {3, j} = E n c _ {i} \\left(I _ {3, j} ^ {H}\\right), \\forall j \\in [ 1, 4 ] \\tag {2}\n\\]"
369
- },
370
- {
371
- "type": "text",
372
- "bbox": [
373
- 0.077,
374
- 0.533,
375
- 0.47,
376
- 0.565
377
- ],
378
- "angle": 0,
379
- "content": "We concatenate spatially adjacent feature maps to obtain a new feature representation."
380
- },
381
- {
382
- "type": "equation",
383
- "bbox": [
384
- 0.157,
385
- 0.574,
386
- 0.47,
387
- 0.593
388
- ],
389
- "angle": 0,
390
- "content": "\\[\nP _ {3, j} = \\left[ F _ {3, 2 j - 1}, F _ {3, 2 j} \\right], \\forall j \\in [ 1, 2 ] \\tag {3}\n\\]"
391
- },
392
- {
393
- "type": "text",
394
- "bbox": [
395
- 0.077,
396
- 0.601,
397
- 0.318,
398
- 0.615
399
- ],
400
- "angle": 0,
401
- "content": "where \\([\\ldots]\\) stands for concatenation."
402
- },
403
- {
404
- "type": "text",
405
- "bbox": [
406
- 0.076,
407
- 0.616,
408
- 0.47,
409
- 0.647
410
- ],
411
- "angle": 0,
412
- "content": "The new concatenated features are passed through decoder \\(Dec_{3}\\)."
413
- },
414
- {
415
- "type": "equation",
416
- "bbox": [
417
- 0.168,
418
- 0.657,
419
- 0.47,
420
- 0.675
421
- ],
422
- "angle": 0,
423
- "content": "\\[\nQ _ {3, j} = \\operatorname {D e c} _ {3} \\left(P _ {3, j}\\right), \\forall j \\in [ 1, 2 ] \\tag {4}\n\\]"
424
- },
425
- {
426
- "type": "text",
427
- "bbox": [
428
- 0.076,
429
- 0.684,
430
- 0.47,
431
- 0.713
432
- ],
433
- "angle": 0,
434
- "content": "The decoder output is added with patches in the next level and fed to encoder."
435
- },
436
- {
437
- "type": "equation",
438
- "bbox": [
439
- 0.146,
440
- 0.724,
441
- 0.47,
442
- 0.744
443
- ],
444
- "angle": 0,
445
- "content": "\\[\nF _ {2, j} = \\operatorname {E n c} _ {2} \\left(I _ {2, j} ^ {H} + Q _ {3, j}\\right), \\forall j \\in [ 1, 2 ] \\tag {5}\n\\]"
446
- },
447
- {
448
- "type": "text",
449
- "bbox": [
450
- 0.076,
451
- 0.752,
452
- 0.47,
453
- 0.799
454
- ],
455
- "angle": 0,
456
- "content": "The encoder outputs are added with respective decoder inputs from previous level. Then the resulting feature maps are spatially concatenated."
457
- },
458
- {
459
- "type": "equation",
460
- "bbox": [
461
- 0.17,
462
- 0.808,
463
- 0.469,
464
- 0.827
465
- ],
466
- "angle": 0,
467
- "content": "\\[\nF _ {2, j} ^ {*} = F _ {2, j} + P _ {3, j}, \\forall j \\in [ 1, 2 ] \\tag {6}\n\\]"
468
- },
469
- {
470
- "type": "equation",
471
- "bbox": [
472
- 0.26,
473
- 0.829,
474
- 0.469,
475
- 0.846
476
- ],
477
- "angle": 0,
478
- "content": "\\[\nP _ {2} = \\left[ F _ {2, 1} ^ {*}, F _ {2, 2} ^ {*} \\right] \\tag {7}\n\\]"
479
- },
480
- {
481
- "type": "text",
482
- "bbox": [
483
- 0.077,
484
- 0.856,
485
- 0.47,
486
- 0.884
487
- ],
488
- "angle": 0,
489
- "content": "\\(P_{2}\\) is then fed to \\(Dec_{2}\\) to generate residual feature maps for level-2."
490
- },
491
- {
492
- "type": "equation",
493
- "bbox": [
494
- 0.217,
495
- 0.886,
496
- 0.47,
497
- 0.903
498
- ],
499
- "angle": 0,
500
- "content": "\\[\nQ _ {2} = \\operatorname {D e c} _ {2} (P _ {2}) \\tag {8}\n\\]"
501
- },
502
- {
503
- "type": "text",
504
- "bbox": [
505
- 0.499,
506
- 0.404,
507
- 0.893,
508
- 0.451
509
- ],
510
- "angle": 0,
511
- "content": "Decoder output at level-2 is added to input image and passed through \\( Enc_1 \\). Encoder output \\( F_1 \\) is added with decoder output at level-2, \\( Q_2 \\)."
512
- },
513
- {
514
- "type": "equation",
515
- "bbox": [
516
- 0.618,
517
- 0.46,
518
- 0.892,
519
- 0.48
520
- ],
521
- "angle": 0,
522
- "content": "\\[\nF _ {1} = \\operatorname {E n c} _ {1} \\left(I ^ {H} + Q _ {2}\\right) \\tag {9}\n\\]"
523
- },
524
- {
525
- "type": "text",
526
- "bbox": [
527
- 0.499,
528
- 0.489,
529
- 0.892,
530
- 0.521
531
- ],
532
- "angle": 0,
533
- "content": "\\(F_{1}\\) is added with \\(P_{2}\\) and fed to \\(Dec_{1}\\) to produce the final dehazed output \\(\\hat{I}\\)."
534
- },
535
- {
536
- "type": "equation",
537
- "bbox": [
538
- 0.647,
539
- 0.532,
540
- 0.892,
541
- 0.548
542
- ],
543
- "angle": 0,
544
- "content": "\\[\nP _ {1} = F _ {1} + P _ {2} \\tag {10}\n\\]"
545
- },
546
- {
547
- "type": "equation",
548
- "bbox": [
549
- 0.646,
550
- 0.552,
551
- 0.892,
552
- 0.571
553
- ],
554
- "angle": 0,
555
- "content": "\\[\n\\hat {I} = D e c _ {1} \\left(P _ {1}\\right) \\tag {11}\n\\]"
556
- },
557
- {
558
- "type": "title",
559
- "bbox": [
560
- 0.5,
561
- 0.591,
562
- 0.731,
563
- 0.607
564
- ],
565
- "angle": 0,
566
- "content": "3.2. Multi-scale Architecture:"
567
- },
568
- {
569
- "type": "text",
570
- "bbox": [
571
- 0.499,
572
- 0.614,
573
- 0.892,
574
- 0.674
575
- ],
576
- "angle": 0,
577
- "content": "We also experiment with a multi-scale architecture. We name this architecture Deep Multi-scale Hierarchical Network(DMSHN). The details of the architecture are described as follows."
578
- },
579
- {
580
- "type": "text",
581
- "bbox": [
582
- 0.499,
583
- 0.674,
584
- 0.892,
585
- 0.765
586
- ],
587
- "angle": 0,
588
- "content": "Input hazy image \\( I^H \\) is downsampled by factor of 2 and 4 to create an image pyramid. We call these downsampled images \\( I_{0.5}^H \\) and \\( I_{0.25}^H \\) respectively. The architecture consists of 3 levels where each level has a pair of encoder and decoder. Encoder and decoder at level \\( i \\) is denoted as \\( Enc_i \\) and \\( Dec_i \\) respectively."
589
- },
590
- {
591
- "type": "text",
592
- "bbox": [
593
- 0.499,
594
- 0.765,
595
- 0.892,
596
- 0.811
597
- ],
598
- "angle": 0,
599
- "content": "At the lowest level \\( I_{0.25}^{H} \\) is fed to encoder \\( Enc_3 \\) to obtain feature map \\( F_3 \\) and is further passed through decoder \\( Dec_3 \\) to feature representation \\( P_3 \\)."
600
- },
601
- {
602
- "type": "equation",
603
- "bbox": [
604
- 0.633,
605
- 0.821,
606
- 0.891,
607
- 0.841
608
- ],
609
- "angle": 0,
610
- "content": "\\[\nF _ {3} = \\operatorname {E n c} _ {3} \\left(I _ {0. 2 5} ^ {H}\\right) \\tag {12}\n\\]"
611
- },
612
- {
613
- "type": "equation",
614
- "bbox": [
615
- 0.649,
616
- 0.842,
617
- 0.891,
618
- 0.858
619
- ],
620
- "angle": 0,
621
- "content": "\\[\nP _ {3} = \\operatorname {D e c} _ {3} \\left(F _ {3}\\right) \\tag {13}\n\\]"
622
- },
623
- {
624
- "type": "text",
625
- "bbox": [
626
- 0.5,
627
- 0.87,
628
- 0.893,
629
- 0.903
630
- ],
631
- "angle": 0,
632
- "content": "\\(P_{3}\\) is upscaled by factor of 2 and added to \\(I_{0.5}^{H}\\) and passed through encoder Enc2 to generate \\(F_{2}^{*}\\). Encoder output from"
633
- }
634
- ],
635
- [
636
- {
637
- "type": "image",
638
- "bbox": [
639
- 0.202,
640
- 0.087,
641
- 0.763,
642
- 0.315
643
- ],
644
- "angle": 0,
645
- "content": null
646
- },
647
- {
648
- "type": "image_caption",
649
- "bbox": [
650
- 0.076,
651
- 0.33,
652
- 0.894,
653
- 0.364
654
- ],
655
- "angle": 0,
656
- "content": "Figure 2: Architecture diagram of Deep Multi-Scale Hierarchical Network. \\(\\updownarrow\\) denotes Upsampling by factor of 2 and \\(\\bigoplus\\) denotes residual addition."
657
- },
658
- {
659
- "type": "text",
660
- "bbox": [
661
- 0.076,
662
- 0.39,
663
- 0.47,
664
- 0.422
665
- ],
666
- "angle": 0,
667
- "content": "previous level is upscaled and added to intermediate feature map \\( F_{2}^{*} \\) and fed to the decoder \\( Dec_{2} \\)."
668
- },
669
- {
670
- "type": "equation",
671
- "bbox": [
672
- 0.179,
673
- 0.43,
674
- 0.468,
675
- 0.45
676
- ],
677
- "angle": 0,
678
- "content": "\\[\nF _ {2} ^ {*} = \\operatorname {E n c} _ {2} \\left(I _ {0. 5} ^ {H} + u p \\left(P _ {3}\\right)\\right) \\tag {14}\n\\]"
679
- },
680
- {
681
- "type": "equation",
682
- "bbox": [
683
- 0.237,
684
- 0.452,
685
- 0.468,
686
- 0.467
687
- ],
688
- "angle": 0,
689
- "content": "\\[\nF _ {2} = F _ {2} ^ {*} + u p \\left(F _ {3}\\right) \\tag {15}\n\\]"
690
- },
691
- {
692
- "type": "equation",
693
- "bbox": [
694
- 0.258,
695
- 0.47,
696
- 0.468,
697
- 0.486
698
- ],
699
- "angle": 0,
700
- "content": "\\[\nP _ {2} = \\operatorname {D e c} _ {2} \\left(F _ {2}\\right) \\tag {16}\n\\]"
701
- },
702
- {
703
- "type": "text",
704
- "bbox": [
705
- 0.076,
706
- 0.497,
707
- 0.47,
708
- 0.573
709
- ],
710
- "angle": 0,
711
- "content": "where \\(up(.)\\) denotes Upsampling operation by a factor of 2. Residual feature map \\(P_{2}\\) from level-2 is added to the input hazy image and fed to encoder \\(Enc_{1}\\). Encoder output is added with upscaled \\(F_{2}\\) and passed through decoder to synthesize the dehazed output \\(\\hat{I}\\)."
712
- },
713
- {
714
- "type": "equation",
715
- "bbox": [
716
- 0.18,
717
- 0.582,
718
- 0.468,
719
- 0.6
720
- ],
721
- "angle": 0,
722
- "content": "\\[\nF _ {1} ^ {*} = \\operatorname {E n c} _ {2} \\left(I ^ {H} + u p \\left(P _ {2}\\right)\\right) \\tag {17}\n\\]"
723
- },
724
- {
725
- "type": "equation",
726
- "bbox": [
727
- 0.235,
728
- 0.602,
729
- 0.468,
730
- 0.619
731
- ],
732
- "angle": 0,
733
- "content": "\\[\nF _ {1} = F _ {1} ^ {*} + u p \\left(F _ {2}\\right) \\tag {18}\n\\]"
734
- },
735
- {
736
- "type": "equation",
737
- "bbox": [
738
- 0.265,
739
- 0.623,
740
- 0.468,
741
- 0.641
742
- ],
743
- "angle": 0,
744
- "content": "\\[\n\\hat {I} = D e c _ {1} \\left(F _ {1}\\right) \\tag {19}\n\\]"
745
- },
746
- {
747
- "type": "title",
748
- "bbox": [
749
- 0.077,
750
- 0.65,
751
- 0.388,
752
- 0.667
753
- ],
754
- "angle": 0,
755
- "content": "3.3. Encoder and Decoder Architecture:"
756
- },
757
- {
758
- "type": "text",
759
- "bbox": [
760
- 0.076,
761
- 0.673,
762
- 0.47,
763
- 0.765
764
- ],
765
- "angle": 0,
766
- "content": "We use the same encoder and decoder architecture at all levels of DMPHN and DMSHN. The encoder consists of 15 convolutional layers, 6 residual connections and 6 ReLU units. The layers in the decoder and encoder are similar except that 2 convolutional layers are replaced by deconvolutional layers to generate dehazed images as output."
767
- },
768
- {
769
- "type": "title",
770
- "bbox": [
771
- 0.077,
772
- 0.778,
773
- 0.21,
774
- 0.794
775
- ],
776
- "angle": 0,
777
- "content": "4. Experiments"
778
- },
779
- {
780
- "type": "title",
781
- "bbox": [
782
- 0.077,
783
- 0.802,
784
- 0.27,
785
- 0.818
786
- ],
787
- "angle": 0,
788
- "content": "4.1. Dataset Description:"
789
- },
790
- {
791
- "type": "text",
792
- "bbox": [
793
- 0.076,
794
- 0.825,
795
- 0.468,
796
- 0.902
797
- ],
798
- "angle": 0,
799
- "content": "We used NH-HAZE dataset[3] provided for NTIRE 2020 Nonhomogeneous Image Dehazing challenge in our experiments. This dataset contains a total of 55 hazy and clear image pairs, divided into trainset of 45 image pairs, validation set of 5 image pairs and test set of 5 image pairs. Val"
800
- },
801
- {
802
- "type": "text",
803
- "bbox": [
804
- 0.499,
805
- 0.39,
806
- 0.893,
807
- 0.467
808
- ],
809
- "angle": 0,
810
- "content": "idation and test ground truth images are not publicly available at this moment. Resolution of images in this dataset is \\(1200 \\times 1600\\). This dataset contains hazed and hazefree images of various outdoor scenes. A few hazefree and hazed image pairs from this dataset is shown in Figure-4."
811
- },
812
- {
813
- "type": "title",
814
- "bbox": [
815
- 0.5,
816
- 0.476,
817
- 0.744,
818
- 0.493
819
- ],
820
- "angle": 0,
821
- "content": "4.2. Training data preparation:"
822
- },
823
- {
824
- "type": "text",
825
- "bbox": [
826
- 0.499,
827
- 0.499,
828
- 0.892,
829
- 0.561
830
- ],
831
- "angle": 0,
832
- "content": "Due to the small amount of available data, we divide each image into 100 non-overlapping patches. Thus we obtain a training set of 4500 image-pairs of resolution \\(120 \\times 160\\). No data augmentation techniques were used."
833
- },
834
- {
835
- "type": "title",
836
- "bbox": [
837
- 0.5,
838
- 0.57,
839
- 0.652,
840
- 0.585
841
- ],
842
- "angle": 0,
843
- "content": "4.3. Loss functions:"
844
- },
845
- {
846
- "type": "text",
847
- "bbox": [
848
- 0.499,
849
- 0.594,
850
- 0.89,
851
- 0.623
852
- ],
853
- "angle": 0,
854
- "content": "We use a linear combination of the following loss functions as our optimization objective."
855
- },
856
- {
857
- "type": "text",
858
- "bbox": [
859
- 0.499,
860
- 0.624,
861
- 0.892,
862
- 0.699
863
- ],
864
- "angle": 0,
865
- "content": "Reconstruction loss: Reconstruction loss helps the network to generate dehazed frames close to the ground truth. Our reconstruction loss is a weighted sum of MAE or \\( L_{1} \\) loss and MSE or \\( L_{2} \\) loss. The reconstruction loss is given by,"
866
- },
867
- {
868
- "type": "equation",
869
- "bbox": [
870
- 0.628,
871
- 0.701,
872
- 0.891,
873
- 0.716
874
- ],
875
- "angle": 0,
876
- "content": "\\[\nL _ {r} = \\lambda_ {1} L _ {1} + \\lambda_ {2} L _ {2} \\tag {20}\n\\]"
877
- },
878
- {
879
- "type": "text",
880
- "bbox": [
881
- 0.5,
882
- 0.721,
883
- 0.785,
884
- 0.747
885
- ],
886
- "angle": 0,
887
- "content": "where \\(L_{1} = \\left\\| \\hat{I} - I\\right\\|_{1}\\) and \\(L_{2} = \\left\\| \\hat{I} - I\\right\\|_{2}\\)"
888
- },
889
- {
890
- "type": "text",
891
- "bbox": [
892
- 0.499,
893
- 0.747,
894
- 0.892,
895
- 0.805
896
- ],
897
- "angle": 0,
898
- "content": "Perceptual loss: \\( \\bar{L}_2 \\) distance between features extracted from conv4_3 layer of VGGNet[20] of predicted and ground truth images are used as Perceptual loss[11]. Perceptual loss is given by,"
899
- },
900
- {
901
- "type": "equation",
902
- "bbox": [
903
- 0.619,
904
- 0.805,
905
- 0.891,
906
- 0.832
907
- ],
908
- "angle": 0,
909
- "content": "\\[\nL _ {p} = \\left\\| \\phi (\\hat {I}) - \\phi (I) \\right\\| _ {2} \\tag {21}\n\\]"
910
- },
911
- {
912
- "type": "text",
913
- "bbox": [
914
- 0.499,
915
- 0.837,
916
- 0.891,
917
- 0.868
918
- ],
919
- "angle": 0,
920
- "content": "TV loss: We use Total Variation(TV) loss[11] makes predictions smooth. TV loss is given by,"
921
- },
922
- {
923
- "type": "equation",
924
- "bbox": [
925
- 0.605,
926
- 0.878,
927
- 0.891,
928
- 0.906
929
- ],
930
- "angle": 0,
931
- "content": "\\[\nL _ {t v} = \\left\\| \\nabla_ {x} \\hat {I} \\right\\| _ {2} + \\left\\| \\nabla_ {y} \\hat {I} \\right\\| _ {2} \\tag {22}\n\\]"
932
- }
933
- ],
934
- [
935
- {
936
- "type": "image",
937
- "bbox": [
938
- 0.084,
939
- 0.085,
940
- 0.891,
941
- 0.233
942
- ],
943
- "angle": 0,
944
- "content": null
945
- },
946
- {
947
- "type": "image_caption",
948
- "bbox": [
949
- 0.076,
950
- 0.243,
951
- 0.893,
952
- 0.274
953
- ],
954
- "angle": 0,
955
- "content": "Figure 3: Encoder and Decoder architecture. Within brackets written values are Input Channel, Output Channel, Kernel and Stride respectively."
956
- },
957
- {
958
- "type": "image",
959
- "bbox": [
960
- 0.162,
961
- 0.29,
962
- 0.808,
963
- 0.477
964
- ],
965
- "angle": 0,
966
- "content": null
967
- },
968
- {
969
- "type": "image_caption",
970
- "bbox": [
971
- 0.076,
972
- 0.491,
973
- 0.893,
974
- 0.522
975
- ],
976
- "angle": 0,
977
- "content": "Figure 4: A snapshot of Training Dataset. Top row contains hazy images and bottom row contains corresponding ground truth images."
978
- },
979
- {
980
- "type": "text",
981
- "bbox": [
982
- 0.097,
983
- 0.549,
984
- 0.327,
985
- 0.564
986
- ],
987
- "angle": 0,
988
- "content": "Our final loss function is given by,"
989
- },
990
- {
991
- "type": "equation",
992
- "bbox": [
993
- 0.176,
994
- 0.579,
995
- 0.469,
996
- 0.596
997
- ],
998
- "angle": 0,
999
- "content": "\\[\nL = \\lambda_ {r} L _ {r} + \\lambda_ {p} L _ {p} + \\lambda_ {t v} L _ {t v} \\tag {23}\n\\]"
1000
- },
1001
- {
1002
- "type": "text",
1003
- "bbox": [
1004
- 0.077,
1005
- 0.612,
1006
- 0.47,
1007
- 0.657
1008
- ],
1009
- "angle": 0,
1010
- "content": "In our experiments we choose \\(\\lambda_r = 1\\), \\(\\lambda_p = 6e - 3\\), \\(\\lambda_{tv} = 2e - 8\\). \\(\\lambda_1\\) and \\(\\lambda_2\\) is chosen to be 0.6 and 0.4 respectively."
1011
- },
1012
- {
1013
- "type": "title",
1014
- "bbox": [
1015
- 0.077,
1016
- 0.671,
1017
- 0.24,
1018
- 0.687
1019
- ],
1020
- "angle": 0,
1021
- "content": "4.4. Training details:"
1022
- },
1023
- {
1024
- "type": "text",
1025
- "bbox": [
1026
- 0.076,
1027
- 0.696,
1028
- 0.47,
1029
- 0.803
1030
- ],
1031
- "angle": 0,
1032
- "content": "We developed our models using Pytorch[16] on a system with AMD Ryzen 1600X CPU and NVIDIA GTX 1080 GPU. We use Adam optimizer[12] to train our networks with values of \\(\\beta_{1}\\) and \\(\\beta_{2}\\) 0.9 and 0.99 respectively. We use batchsize of 8. Initial learning rate is set to be 1e-4 which is gradually decreased to 5e-5. We train our models until convergence."
1033
- },
1034
- {
1035
- "type": "title",
1036
- "bbox": [
1037
- 0.077,
1038
- 0.816,
1039
- 0.229,
1040
- 0.832
1041
- ],
1042
- "angle": 0,
1043
- "content": "4.5. Testing details:"
1044
- },
1045
- {
1046
- "type": "text",
1047
- "bbox": [
1048
- 0.076,
1049
- 0.84,
1050
- 0.47,
1051
- 0.902
1052
- ],
1053
- "angle": 0,
1054
- "content": "We test our models' performance on the given full resolution images of validation data. Please note that, our models are fully convolutional, hence difference between train and test image size should not matter."
1055
- },
1056
- {
1057
- "type": "title",
1058
- "bbox": [
1059
- 0.5,
1060
- 0.548,
1061
- 0.599,
1062
- 0.563
1063
- ],
1064
- "angle": 0,
1065
- "content": "4.6. Results:"
1066
- },
1067
- {
1068
- "type": "title",
1069
- "bbox": [
1070
- 0.5,
1071
- 0.583,
1072
- 0.816,
1073
- 0.599
1074
- ],
1075
- "angle": 0,
1076
- "content": "4.6.1 Quantitative and Qualitative Results:"
1077
- },
1078
- {
1079
- "type": "text",
1080
- "bbox": [
1081
- 0.498,
1082
- 0.632,
1083
- 0.892,
1084
- 0.814
1085
- ],
1086
- "angle": 0,
1087
- "content": "As ground truth for validation set is not publicly available, we submit our validation results to Codalab server. We compare performance of our models with three state-of-the-art dehazing models namely AtJ-DH[10], 123-CEDH[9] and FFA-Net[17]. The quantitative results on Validation set are given in Table-1. DMPHN is performing better than the rest of the models. It can be observed that our Multi-patch network is performing better than our Multi-scale network in terms of both PSNR and SSIM. At lower levels of DMPHN, the network works on patch level, so the network learns local features compared to global features learnt by DMSHN, which explains the performance gain in DMPHN."
1088
- },
1089
- {
1090
- "type": "text",
1091
- "bbox": [
1092
- 0.498,
1093
- 0.826,
1094
- 0.893,
1095
- 0.902
1096
- ],
1097
- "angle": 0,
1098
- "content": "Apart from decent dehazing results, it is to be noted that both DMPHN and DMSHN are lightweight and efficient models. Checkpoints of both the networks take 21.7 MB on disk. GPU processing times for DMPHN and DMPSN make them suitable for real-time applications."
1099
- }
1100
- ],
1101
- [
1102
- {
1103
- "type": "image",
1104
- "bbox": [
1105
- 0.081,
1106
- 0.089,
1107
- 0.212,
1108
- 0.167
1109
- ],
1110
- "angle": 0,
1111
- "content": null
1112
- },
1113
- {
1114
- "type": "image",
1115
- "bbox": [
1116
- 0.217,
1117
- 0.089,
1118
- 0.348,
1119
- 0.167
1120
- ],
1121
- "angle": 0,
1122
- "content": null
1123
- },
1124
- {
1125
- "type": "image",
1126
- "bbox": [
1127
- 0.352,
1128
- 0.089,
1129
- 0.483,
1130
- 0.167
1131
- ],
1132
- "angle": 0,
1133
- "content": null
1134
- },
1135
- {
1136
- "type": "image",
1137
- "bbox": [
1138
- 0.486,
1139
- 0.089,
1140
- 0.619,
1141
- 0.167
1142
- ],
1143
- "angle": 0,
1144
- "content": null
1145
- },
1146
- {
1147
- "type": "image",
1148
- "bbox": [
1149
- 0.622,
1150
- 0.089,
1151
- 0.755,
1152
- 0.167
1153
- ],
1154
- "angle": 0,
1155
- "content": null
1156
- },
1157
- {
1158
- "type": "image",
1159
- "bbox": [
1160
- 0.759,
1161
- 0.089,
1162
- 0.892,
1163
- 0.167
1164
- ],
1165
- "angle": 0,
1166
- "content": null
1167
- },
1168
- {
1169
- "type": "image",
1170
- "bbox": [
1171
- 0.081,
1172
- 0.17,
1173
- 0.212,
1174
- 0.248
1175
- ],
1176
- "angle": 0,
1177
- "content": null
1178
- },
1179
- {
1180
- "type": "image",
1181
- "bbox": [
1182
- 0.217,
1183
- 0.17,
1184
- 0.348,
1185
- 0.248
1186
- ],
1187
- "angle": 0,
1188
- "content": null
1189
- },
1190
- {
1191
- "type": "image",
1192
- "bbox": [
1193
- 0.352,
1194
- 0.17,
1195
- 0.483,
1196
- 0.248
1197
- ],
1198
- "angle": 0,
1199
- "content": null
1200
- },
1201
- {
1202
- "type": "image",
1203
- "bbox": [
1204
- 0.486,
1205
- 0.17,
1206
- 0.62,
1207
- 0.248
1208
- ],
1209
- "angle": 0,
1210
- "content": null
1211
- },
1212
- {
1213
- "type": "image",
1214
- "bbox": [
1215
- 0.623,
1216
- 0.17,
1217
- 0.755,
1218
- 0.248
1219
- ],
1220
- "angle": 0,
1221
- "content": null
1222
- },
1223
- {
1224
- "type": "image",
1225
- "bbox": [
1226
- 0.759,
1227
- 0.17,
1228
- 0.892,
1229
- 0.248
1230
- ],
1231
- "angle": 0,
1232
- "content": null
1233
- },
1234
- {
1235
- "type": "image",
1236
- "bbox": [
1237
- 0.081,
1238
- 0.252,
1239
- 0.212,
1240
- 0.328
1241
- ],
1242
- "angle": 0,
1243
- "content": null
1244
- },
1245
- {
1246
- "type": "image_caption",
1247
- "bbox": [
1248
- 0.105,
1249
- 0.331,
1250
- 0.179,
1251
- 0.342
1252
- ],
1253
- "angle": 0,
1254
- "content": "(a) Input Image"
1255
- },
1256
- {
1257
- "type": "image",
1258
- "bbox": [
1259
- 0.217,
1260
- 0.252,
1261
- 0.348,
1262
- 0.328
1263
- ],
1264
- "angle": 0,
1265
- "content": null
1266
- },
1267
- {
1268
- "type": "image_caption",
1269
- "bbox": [
1270
- 0.257,
1271
- 0.331,
1272
- 0.306,
1273
- 0.341
1274
- ],
1275
- "angle": 0,
1276
- "content": "(b) AtJ-DH"
1277
- },
1278
- {
1279
- "type": "image",
1280
- "bbox": [
1281
- 0.352,
1282
- 0.252,
1283
- 0.483,
1284
- 0.328
1285
- ],
1286
- "angle": 0,
1287
- "content": null
1288
- },
1289
- {
1290
- "type": "image_caption",
1291
- "bbox": [
1292
- 0.377,
1293
- 0.331,
1294
- 0.441,
1295
- 0.341
1296
- ],
1297
- "angle": 0,
1298
- "content": "(c) 123-CEDH"
1299
- },
1300
- {
1301
- "type": "image",
1302
- "bbox": [
1303
- 0.488,
1304
- 0.252,
1305
- 0.62,
1306
- 0.328
1307
- ],
1308
- "angle": 0,
1309
- "content": null
1310
- },
1311
- {
1312
- "type": "image_caption",
1313
- "bbox": [
1314
- 0.521,
1315
- 0.331,
1316
- 0.576,
1317
- 0.341
1318
- ],
1319
- "angle": 0,
1320
- "content": "(d) FFA-Net"
1321
- },
1322
- {
1323
- "type": "image",
1324
- "bbox": [
1325
- 0.623,
1326
- 0.252,
1327
- 0.755,
1328
- 0.328
1329
- ],
1330
- "angle": 0,
1331
- "content": null
1332
- },
1333
- {
1334
- "type": "image_caption",
1335
- "bbox": [
1336
- 0.649,
1337
- 0.331,
1338
- 0.735,
1339
- 0.341
1340
- ],
1341
- "angle": 0,
1342
- "content": "(e) Ours (DMPHN)"
1343
- },
1344
- {
1345
- "type": "image",
1346
- "bbox": [
1347
- 0.759,
1348
- 0.252,
1349
- 0.892,
1350
- 0.328
1351
- ],
1352
- "angle": 0,
1353
- "content": null
1354
- },
1355
- {
1356
- "type": "image_caption",
1357
- "bbox": [
1358
- 0.779,
1359
- 0.331,
1360
- 0.862,
1361
- 0.341
1362
- ],
1363
- "angle": 0,
1364
- "content": "(f) Ours (DMSHN)"
1365
- },
1366
- {
1367
- "type": "image_caption",
1368
- "bbox": [
1369
- 0.27,
1370
- 0.355,
1371
- 0.698,
1372
- 0.371
1373
- ],
1374
- "angle": 0,
1375
- "content": "Figure 5: Qualitative results on NH-HAZE[3] Validation dataset."
1376
- },
1377
- {
1378
- "type": "table",
1379
- "bbox": [
1380
- 0.108,
1381
- 0.395,
1382
- 0.441,
1383
- 0.491
1384
- ],
1385
- "angle": 0,
1386
- "content": "<table><tr><td></td><td>PSNR</td><td>SSIM</td><td>Runtime(s)</td></tr><tr><td>AtJ-DH[10]</td><td>15.94</td><td>0.5662</td><td>0.0775</td></tr><tr><td>123-CEDH[9]</td><td>14.59</td><td>0.5488</td><td>0.0559</td></tr><tr><td>FFA-Net[17]</td><td>10.43</td><td>0.4168</td><td>1.7472</td></tr><tr><td>DMPHN</td><td>16.94</td><td>0.6177</td><td>0.0145</td></tr><tr><td>DMPSN</td><td>16.42</td><td>0.5991</td><td>0.0210</td></tr></table>"
1387
- },
1388
- {
1389
- "type": "table_caption",
1390
- "bbox": [
1391
- 0.077,
1392
- 0.496,
1393
- 0.469,
1394
- 0.525
1395
- ],
1396
- "angle": 0,
1397
- "content": "Table 1: Quantitative results on NH-HAZE[3] Validation set."
1398
- },
1399
- {
1400
- "type": "title",
1401
- "bbox": [
1402
- 0.077,
1403
- 0.555,
1404
- 0.469,
1405
- 0.586
1406
- ],
1407
- "angle": 0,
1408
- "content": "4.6.2 NTIRE 2020 challenge on NonHomogeneous Image Dehazing:"
1409
- },
1410
- {
1411
- "type": "text",
1412
- "bbox": [
1413
- 0.076,
1414
- 0.595,
1415
- 0.47,
1416
- 0.792
1417
- ],
1418
- "angle": 0,
1419
- "content": "We participated in NTIRE 2020 challenge on NonHomogeneous Image Dehazing[5]. 27 teams submitted results in test phase, out of which 19 teams don't take help of extra training data like Dense-Haze[2, 6] and OHaze[4, 1]. The test results were evaluated on Fidelity measures as well as Perceptual Measures. Fidelity measures included PSNR and SSIM[21], where LPIPS[24], Perceptual Index(PI)[7] and Mean Opinion Score(MOS) were used as Perceptual metrics. For fair comparison, we note down performances of some submissions that used only NH-HAZE dataset in Table-2. Our DMPHN network produced moderate quality outputs both in Fidelity and Perceptual metrics. Our network is the fastest entry among all the submissions."
1420
- },
1421
- {
1422
- "type": "title",
1423
- "bbox": [
1424
- 0.077,
1425
- 0.816,
1426
- 0.283,
1427
- 0.83
1428
- ],
1429
- "angle": 0,
1430
- "content": "4.6.3 Dense Haze Removal:"
1431
- },
1432
- {
1433
- "type": "text",
1434
- "bbox": [
1435
- 0.076,
1436
- 0.84,
1437
- 0.47,
1438
- 0.903
1439
- ],
1440
- "angle": 0,
1441
- "content": "DMPHN is effective for dense haze removal as well. We trained our network on Dense-HAZE dataset[2]. We train on 50 images for training and use 5 images for test. We compare the performance with AtJ-DH[10], 123-CEDH[9]"
1442
- },
1443
- {
1444
- "type": "text",
1445
- "bbox": [
1446
- 0.499,
1447
- 0.397,
1448
- 0.892,
1449
- 0.472
1450
- ],
1451
- "angle": 0,
1452
- "content": "and FFA-Net[17]. Quantitative results and GPU runtimes are shown in Table-3. We observe that DMPHN is significantly better than other models both in terms of fidelity measures and runtime. Figure-6 shows qualitative comparison with the said models."
1453
- },
1454
- {
1455
- "type": "title",
1456
- "bbox": [
1457
- 0.5,
1458
- 0.482,
1459
- 0.623,
1460
- 0.497
1461
- ],
1462
- "angle": 0,
1463
- "content": "4.7. Conclusion"
1464
- },
1465
- {
1466
- "type": "text",
1467
- "bbox": [
1468
- 0.498,
1469
- 0.505,
1470
- 0.893,
1471
- 0.671
1472
- ],
1473
- "angle": 0,
1474
- "content": "In this paper, we use a Multi-Patch and a Multi-Scale architecture for Nonhomogeneous haze removal from images. We show that DMPHN is better than DMSHN because DMPHN aggregates local features generated from a finer level to coarser level. Moreover, DMPHN is a fast algorithm and can dehaze images from a video sequence in real-time. We also show that DMPHN performs well for Dense Haze Removal. In future, the effectiveness of DMPHN with more levels can be explored for performance improvement, but the addition of more levels to architecture will subject to sacrifice in runtime."
1475
- }
1476
- ],
1477
- [
1478
- {
1479
- "type": "table",
1480
- "bbox": [
1481
- 0.165,
1482
- 0.089,
1483
- 0.807,
1484
- 0.263
1485
- ],
1486
- "angle": 0,
1487
- "content": "<table><tr><td rowspan=\"2\">Team</td><td colspan=\"2\">Fidelity</td><td colspan=\"3\">Perceptual quality</td><td rowspan=\"2\">Runtime(s)↓</td><td rowspan=\"2\">GPU/CPU</td></tr><tr><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>PI↓</td><td>MOS↓</td></tr><tr><td>method1</td><td>21.60</td><td>0.67</td><td>0.363</td><td>3.712</td><td>3</td><td>0.21</td><td>v100</td></tr><tr><td>method2</td><td>21.91</td><td>0.69</td><td>0.361</td><td>3.700</td><td>4</td><td>0.22</td><td>v100</td></tr><tr><td>method3</td><td>19.25</td><td>0.60</td><td>0.426</td><td>5.061</td><td>12</td><td>12.88</td><td>v100</td></tr><tr><td>method4</td><td>18.51</td><td>0.68</td><td>0.308</td><td>2.988</td><td>12</td><td>13.00</td><td>n/a</td></tr><tr><td>Ours (DMPHN)</td><td>18.24</td><td>0.65</td><td>0.329</td><td>3.051</td><td>14</td><td>0.01</td><td>1080</td></tr><tr><td>method5</td><td>18.70</td><td>0.64</td><td>0.328</td><td>3.114</td><td>14</td><td>10.43</td><td>1080ti</td></tr><tr><td>method6</td><td>18.67</td><td>0.64</td><td>0.303</td><td>3.211</td><td>16</td><td>1.64</td><td>TitanXP</td></tr><tr><td>method7</td><td>17.88</td><td>0.57</td><td>0.378</td><td>2.855</td><td>16</td><td>0.06</td><td>n/a</td></tr><tr><td>no processing</td><td>11.33</td><td>0.42</td><td>0.582</td><td>2.609</td><td>20</td><td></td><td></td></tr></table>"
1488
- },
1489
- {
1490
- "type": "table_caption",
1491
- "bbox": [
1492
- 0.09,
1493
- 0.268,
1494
- 0.877,
1495
- 0.283
1496
- ],
1497
- "angle": 0,
1498
- "content": "Table 2: NTIRE 2020 Nonhomogeneous challenge[5] Leaderboard. Submissions are sorted in ascending order of MOS."
1499
- },
1500
- {
1501
- "type": "image",
1502
- "bbox": [
1503
- 0.081,
1504
- 0.298,
1505
- 0.891,
1506
- 0.555
1507
- ],
1508
- "angle": 0,
1509
- "content": null
1510
- },
1511
- {
1512
- "type": "image_caption",
1513
- "bbox": [
1514
- 0.305,
1515
- 0.568,
1516
- 0.663,
1517
- 0.582
1518
- ],
1519
- "angle": 0,
1520
- "content": "Figure 6: Qualitative results for Dense Haze Removal."
1521
- },
1522
- {
1523
- "type": "table",
1524
- "bbox": [
1525
- 0.104,
1526
- 0.607,
1527
- 0.445,
1528
- 0.687
1529
- ],
1530
- "angle": 0,
1531
- "content": "<table><tr><td></td><td>PSNR</td><td>SSIM</td><td>Runtime(s)</td></tr><tr><td>AtJ-DH[10]</td><td>22.54</td><td>0.6436</td><td>0.0775</td></tr><tr><td>123-CEDH[9]</td><td>19.63</td><td>0.5758</td><td>0.0559</td></tr><tr><td>FFA-Net[17]</td><td>11.93</td><td>0.3790</td><td>1.7472</td></tr><tr><td>Ours(DMPHN)</td><td>23.41</td><td>0.6669</td><td>0.0145</td></tr></table>"
1532
- },
1533
- {
1534
- "type": "table_caption",
1535
- "bbox": [
1536
- 0.092,
1537
- 0.692,
1538
- 0.454,
1539
- 0.707
1540
- ],
1541
- "angle": 0,
1542
- "content": "Table 3: Quantitative Comparison on Dense-HAZE[2]."
1543
- },
1544
- {
1545
- "type": "title",
1546
- "bbox": [
1547
- 0.081,
1548
- 0.734,
1549
- 0.173,
1550
- 0.749
1551
- ],
1552
- "angle": 0,
1553
- "content": "References"
1554
- },
1555
- {
1556
- "type": "ref_text",
1557
- "bbox": [
1558
- 0.085,
1559
- 0.759,
1560
- 0.468,
1561
- 0.814
1562
- ],
1563
- "angle": 0,
1564
- "content": "[1] C. Ancuti, C.O. Ancuti, R. Timofte, L. Van Gool, and L. Zhang et al. NTIRE 2018 challenge on image dehazing: Methods and results. IEEE CVPR, NTIRE Workshop, 2018. 6"
1565
- },
1566
- {
1567
- "type": "ref_text",
1568
- "bbox": [
1569
- 0.085,
1570
- 0.816,
1571
- 0.468,
1572
- 0.884
1573
- ],
1574
- "angle": 0,
1575
- "content": "[2] Codruta O Ancuti, Cosmin Ancuti, Mateu Sbert, and Radu Timofte. Dense-haze: A benchmark for image dehazing with dense-haze and haze-free images. In 2019 IEEE International Conference on Image Processing (ICIP), pages 1014-1018. IEEE, 2019. 6, 7"
1576
- },
1577
- {
1578
- "type": "ref_text",
1579
- "bbox": [
1580
- 0.085,
1581
- 0.887,
1582
- 0.468,
1583
- 0.9
1584
- ],
1585
- "angle": 0,
1586
- "content": "[3] Codruta O. Ancuti, Cosmin Ancuti, and Radu Timofte. NH-"
1587
- },
1588
- {
1589
- "type": "list",
1590
- "bbox": [
1591
- 0.085,
1592
- 0.759,
1593
- 0.468,
1594
- 0.9
1595
- ],
1596
- "angle": 0,
1597
- "content": null
1598
- },
1599
- {
1600
- "type": "ref_text",
1601
- "bbox": [
1602
- 0.533,
1603
- 0.611,
1604
- 0.892,
1605
- 0.666
1606
- ],
1607
- "angle": 0,
1608
- "content": "HAZE: An image dehazing benchmark with nonhomogeneous hazy and haze-free images. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, June 2020. 4, 6"
1609
- },
1610
- {
1611
- "type": "ref_text",
1612
- "bbox": [
1613
- 0.509,
1614
- 0.669,
1615
- 0.892,
1616
- 0.739
1617
- ],
1618
- "angle": 0,
1619
- "content": "[4] Codruta O Ancuti, Cosmin Ancuti, Radu Timofte, and Christophe De Vleeschouwer. O-haze: a dehazing benchmark with real hazy and haze-free outdoor images. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 754-762, 2018. 6"
1620
- },
1621
- {
1622
- "type": "ref_text",
1623
- "bbox": [
1624
- 0.509,
1625
- 0.742,
1626
- 0.892,
1627
- 0.81
1628
- ],
1629
- "angle": 0,
1630
- "content": "[5] Codruta O. Ancuti, Cosmin Ancuti, Florin-Alexandru Vasluianu, Radu Timofte, et al. Ntire 2020 challenge on nonhomogeneous dehazing. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, June 2020. 6, 7"
1631
- },
1632
- {
1633
- "type": "ref_text",
1634
- "bbox": [
1635
- 0.509,
1636
- 0.814,
1637
- 0.892,
1638
- 0.869
1639
- ],
1640
- "angle": 0,
1641
- "content": "[6] C. O. Ancuti, C. Ancuti, R. Timofte, L. Van Gool, and L. Zhang et al. NTIRE 2019 challenge on image dehazing: Methods and results. IEEE CVPR, NTIRE Workshop, 2019. 6"
1642
- },
1643
- {
1644
- "type": "ref_text",
1645
- "bbox": [
1646
- 0.509,
1647
- 0.873,
1648
- 0.892,
1649
- 0.902
1650
- ],
1651
- "angle": 0,
1652
- "content": "[7] Yochai Blau, Roey Mechrez, Radu Timofte, Tomer Michaeli, and Lihi Zelnik-Manor. The 2018 pirm challenge on percep"
1653
- },
1654
- {
1655
- "type": "list",
1656
- "bbox": [
1657
- 0.509,
1658
- 0.611,
1659
- 0.892,
1660
- 0.902
1661
- ],
1662
- "angle": 0,
1663
- "content": null
1664
- }
1665
- ],
1666
- [
1667
- {
1668
- "type": "ref_text",
1669
- "bbox": [
1670
- 0.108,
1671
- 0.093,
1672
- 0.47,
1673
- 0.12
1674
- ],
1675
- "angle": 0,
1676
- "content": "tual image super-resolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 0-0, 2018. 6"
1677
- },
1678
- {
1679
- "type": "ref_text",
1680
- "bbox": [
1681
- 0.088,
1682
- 0.122,
1683
- 0.47,
1684
- 0.189
1685
- ],
1686
- "angle": 0,
1687
- "content": "[8] Zijun Deng, Lei Zhu, Xiaowei Hu, Chi-Wing Fu, Xuemiao Xu, Qing Zhang, Jing Qin, and Pheng-Ann Heng. Deep multi-model fusion for single-image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 2453–2462, 2019. 2"
1688
- },
1689
- {
1690
- "type": "ref_text",
1691
- "bbox": [
1692
- 0.088,
1693
- 0.19,
1694
- 0.468,
1695
- 0.258
1696
- ],
1697
- "angle": 0,
1698
- "content": "[9] Tiantong Guo, Venkateswararao Cherukuri, and Vishal Monga. Dense123'color enhancement dehazing network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 5, 6, 7"
1699
- },
1700
- {
1701
- "type": "ref_text",
1702
- "bbox": [
1703
- 0.081,
1704
- 0.261,
1705
- 0.469,
1706
- 0.328
1707
- ],
1708
- "angle": 0,
1709
- "content": "[10] Tiantong Guo, Xuelu Li, Venkateswararao Cherukuri, and Vishal Monga. Dense scene information estimation network for dehazing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 0–0, 2019. 5, 6, 7"
1710
- },
1711
- {
1712
- "type": "ref_text",
1713
- "bbox": [
1714
- 0.081,
1715
- 0.33,
1716
- 0.469,
1717
- 0.385
1718
- ],
1719
- "angle": 0,
1720
- "content": "[11] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In European conference on computer vision, pages 694–711. Springer, 2016. 4"
1721
- },
1722
- {
1723
- "type": "ref_text",
1724
- "bbox": [
1725
- 0.081,
1726
- 0.386,
1727
- 0.469,
1728
- 0.425
1729
- ],
1730
- "angle": 0,
1731
- "content": "[12] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5"
1732
- },
1733
- {
1734
- "type": "ref_text",
1735
- "bbox": [
1736
- 0.081,
1737
- 0.428,
1738
- 0.469,
1739
- 0.496
1740
- ],
1741
- "angle": 0,
1742
- "content": "[13] Yunan Li, Qiguang Miao, Wanli Ouyang, Zhenxin Ma, Huijuan Fang, Chao Dong, and Yining Quan. Lap-net: Level-aware progressive network for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 3276-3285, 2019. 2"
1743
- },
1744
- {
1745
- "type": "ref_text",
1746
- "bbox": [
1747
- 0.081,
1748
- 0.498,
1749
- 0.469,
1750
- 0.552
1751
- ],
1752
- "angle": 0,
1753
- "content": "[14] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. Grid-dehazenet: Attention-based multi-scale network for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 7314-7323, 2019. 2"
1754
- },
1755
- {
1756
- "type": "ref_text",
1757
- "bbox": [
1758
- 0.081,
1759
- 0.554,
1760
- 0.469,
1761
- 0.607
1762
- ],
1763
- "angle": 0,
1764
- "content": "[15] Yang Liu, Jinshan Pan, Jimmy Ren, and Zhixun Su. Learning deep priors for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 2492–2500, 2019. 2"
1765
- },
1766
- {
1767
- "type": "ref_text",
1768
- "bbox": [
1769
- 0.081,
1770
- 0.609,
1771
- 0.469,
1772
- 0.76
1773
- ],
1774
- "angle": 0,
1775
- "content": "[16] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 5"
1776
- },
1777
- {
1778
- "type": "ref_text",
1779
- "bbox": [
1780
- 0.081,
1781
- 0.762,
1782
- 0.469,
1783
- 0.815
1784
- ],
1785
- "angle": 0,
1786
- "content": "[17] Xu Qin, Zhilin Wang, Yuanchao Bai, Xiaodong Xie, and Huizhu Jia. Ffa-net: Feature fusion attention network for single image dehazing. arXiv preprint arXiv:1911.07559, 2019. 2, 5, 6, 7"
1787
- },
1788
- {
1789
- "type": "ref_text",
1790
- "bbox": [
1791
- 0.081,
1792
- 0.818,
1793
- 0.469,
1794
- 0.872
1795
- ],
1796
- "angle": 0,
1797
- "content": "[18] Yanyun Qu, Yizi Chen, Jingying Huang, and Yuan Xie. Enhanced pix2pix dehazing network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8160-8168, 2019. 2"
1798
- },
1799
- {
1800
- "type": "ref_text",
1801
- "bbox": [
1802
- 0.081,
1803
- 0.874,
1804
- 0.469,
1805
- 0.901
1806
- ],
1807
- "angle": 0,
1808
- "content": "[19] Prasen Sharma, Priyankar Jain, and Arijit Sur. Scale-aware conditional generative adversarial network for image dehaz-"
1809
- },
1810
- {
1811
- "type": "list",
1812
- "bbox": [
1813
- 0.081,
1814
- 0.093,
1815
- 0.47,
1816
- 0.901
1817
- ],
1818
- "angle": 0,
1819
- "content": null
1820
- },
1821
- {
1822
- "type": "ref_text",
1823
- "bbox": [
1824
- 0.533,
1825
- 0.093,
1826
- 0.891,
1827
- 0.12
1828
- ],
1829
- "angle": 0,
1830
- "content": "ing. In The IEEE Winter Conference on Applications of Computer Vision, pages 2355-2365, 2020. 2"
1831
- },
1832
- {
1833
- "type": "ref_text",
1834
- "bbox": [
1835
- 0.504,
1836
- 0.122,
1837
- 0.892,
1838
- 0.163
1839
- ],
1840
- "angle": 0,
1841
- "content": "[20] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 4"
1842
- },
1843
- {
1844
- "type": "ref_text",
1845
- "bbox": [
1846
- 0.503,
1847
- 0.165,
1848
- 0.892,
1849
- 0.219
1850
- ],
1851
- "angle": 0,
1852
- "content": "[21] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6"
1853
- },
1854
- {
1855
- "type": "ref_text",
1856
- "bbox": [
1857
- 0.504,
1858
- 0.221,
1859
- 0.892,
1860
- 0.289
1861
- ],
1862
- "angle": 0,
1863
- "content": "[22] Hongguang Zhang, Yuchao Dai, Hongdong Li, and Piotr Koniusz. Deep stacked hierarchical multi-patch network for image deblurring. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5978-5986, 2019. 2"
1864
- },
1865
- {
1866
- "type": "ref_text",
1867
- "bbox": [
1868
- 0.504,
1869
- 0.291,
1870
- 0.892,
1871
- 0.345
1872
- ],
1873
- "angle": 0,
1874
- "content": "[23] He Zhang and Vishal M Patel. Densely connected pyramid dehazing network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3194-3203, 2018. 2"
1875
- },
1876
- {
1877
- "type": "ref_text",
1878
- "bbox": [
1879
- 0.504,
1880
- 0.348,
1881
- 0.892,
1882
- 0.417
1883
- ],
1884
- "angle": 0,
1885
- "content": "[24] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 586-595, 2018. 6"
1886
- },
1887
- {
1888
- "type": "list",
1889
- "bbox": [
1890
- 0.503,
1891
- 0.093,
1892
- 0.892,
1893
- 0.417
1894
- ],
1895
- "angle": 0,
1896
- "content": null
1897
- }
1898
- ]
1899
- ]
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02be11f4761acfdbb93499c87614977d687168343caa1f0cfc4ae9d4a3fbcf89
3
+ size 68197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_05xxx/2005.05999/full.md CHANGED
@@ -1,340 +1,3 @@
1
- # Fast Deep Multi-patch Hierarchical Network for Nonhomogeneous Image Dehazing
2
-
3
- Sourya Dipta Das*
4
- Jadavpur University
5
- Kolkata, India
6
-
7
- dipta.juetce@gmail.com
8
-
9
- Saikat Dutta* IIT Madras
10
-
11
- Chennai, India
12
-
13
- cs18s016@smail.iitm.ac.in
14
-
15
- # Abstract
16
-
17
- Recently, CNN based end-to-end deep learning methods achieve superiority in Image Dehazing but they tend to fail drastically in Non-homogeneous dehazing. Apart from that, existing popular Multi-scale approaches are runtime intensive and memory inefficient. In this context, we proposed a fast Deep Multi-patch Hierarchical Network to restore Non-homogeneous hazed images by aggregating features from multiple image patches from different spatial sections of the hazed image with fewer number of network parameters. Our proposed method is quite robust for different environments with various density of the haze or fog in the scene and very lightweight as the total size of the model is around 21.7 MB. It also provides faster runtime compared to current multi-scale methods with an average runtime of 0.0145s to process $1200 \times 1600$ HD quality image. Finally, we show the superiority of this network on Dense Haze Removal to other state-of-the-art models.
18
-
19
- # 1. Introduction
20
-
21
- Outdoor images are often deteriorated due to the extreme weather, such as fog and haze, which influences visibility issues in the scene because of the degradation of color, contrast and textures for different distant objects, selective attenuation of the light spectrum. Restoring such hazed images has become an important problem in many computer vision applications like visual surveillance, remote sensing, and Autonomous transportation etc. Most of early methods proposed for image dehazing are based on the classic atmospheric scattering model which is shown as the following equation. 1.
22
-
23
- $$
24
- I (x) = J (x) t (x) + A (1 - t (x)) \tag {1}
25
- $$
26
-
27
- where, $x$ represents pixel locations, $I(x)$ is the observed hazy image, $J(x)$ is the dehazed image, $t(x)$ is called
28
-
29
- medium transmission function and $A$ is the global atmospheric light. Recently, Deep learning based methods have shown remarkable improvements though those methods suffer from degradation of colour, texture in image, halo artifacts, haze residuals and distortions. In our problem statement, Non-homogeneous haze in the scene can be seen in the real world situation where different spatial domains of the image can be affected by different levels of haze. The degradation level also varies a lot for objects at different scene depth due to non-uniform haze distribution in the image. Few example images of such Non-homogeneous haze are shown in figure 4. Dehazing model should put more effort to handle non-uniform haze and different degradation between different scene depth jointly. Multi-scale and scale-recurrent models can be a viable solution in this type of problem because of its coarse-to-fine learning scheme by hierarchical integration of features from different spatial scale of the image. This type of methods is inefficient because of high runtime and large model size due to a lot of convolution and Deconvolution layers. Apart from that, increasing depth of layers at fine scale levels may not always improve the perceptual quality of the output dehazed image. On the contrary, main goal of our model is to aggregate features multiple image patches from different spatial sections of the image for better performance. The parameters of our encoder and decoder are very less due to residual links in our model which helps in fast dehazing inference. The main intuition behind our idea is to make the lower level network portion focus on local information by extracting local features from the finer grid to produce residual information for the upper level part of the network to get more global information from both finer and coarser grid which is achieved by concatenating convolutional features.
30
-
31
- # 2. Related Work
32
-
33
- Most early work of image dehazing methods is developed on atmosphere scattering model as it's physical model. In that respect, previous works on image dehazing can be
34
-
35
- segregated into two classes which are traditional image prior-based methods and end to end deep learning based methods. Traditional image prior based methods relies on hand-crafted statistics from the images to leverage extra mathematical constraints to compensate for the information lost during reconstruction. On contrary, deep learning based methods learn the direct relationship between haze and haze-free image by utilizing multistage, attention mechanisms etc. Here, we discussed some recent deep learning based methods with state-of-the-art results.
36
-
37
- Zhang et al.[23] proposed a dehazing network with edge-preserving densely connected encoder-decoder architecture that jointly learns the dehazed image, transmission map and atmosphere light all together based on the scattering model for dehazing. In their encoder-decoder architecture, they use a multilevel pyramid pooling module and to improve their results further, joint-discriminator based on GAN is used to incorporate the correlation between estimated transmission map and dehazed image. Deng et al.[8] presents a multi-model fusion network to combine multiple models in its different levels of layers and enhance the overall performance of image dehazing. They generate the multi-model attention integrated feature from various CNN features at different levels and fed it to their fusion model to predict dehazed image for an atmospheric scattering model and four haze-layer separation models altogether. After that, they fused the corresponding results together to generate the final dehazed image. Qin et al.[17] proposed a novel Feature Attention module which fuses Channel Attention with Pixel Attention while considering different weighted information of different channel-wise features and uneven haze distribution on different pixels of the hazed image. For Outdoor hazy images, their work proves superiority though it didn't work well in case of dense dehazing. Liu et al.[14] proposed a grid network with attention-based multi-scale estimation which overcomes the bottleneck problems found in general multi-scale approach. Apart from that, their method also consists of pre-processing and post-processing modules. The pre-processing module used in this method is trainable to get more relevant features from diversified pre-processed image inputs and it outperforms the other hand picked classical pre-process techniques. The post-processing module is finally used on intermediate dehazed image to get more finer dehazed image. Their study shows how their method works quite independently and does not take any advantage from atmosphere scattering model for image dehazing.
38
-
39
- Unlike other multi-stage methods, Li et al.[13] used a level aware progressive deep network to learn different levels of haze from its different stages of the network by different supervision. Their network tends to progressively learn gradually more intense haze from image by focusing on a specific part of image with a certain haze level. They have also devised a adaptive hierarchical integration tech
40
-
41
- nique by cooperating with the it's memory network component and domain information of dehazing to emphasize the well-reconstructed parts of the image in it's each stage of the network. Liu et al.[15] suggests a method to learn a haze relevant image priors by using a iteration algorithm with deep CNNs. They achieve this by using gradient descent method to optimize a variational model with image fidelity terms and proper regularization. this method indeed a great combination of properties from classical deep learning based method and physical hazed image formation model. Sharma et al.[19] explored the application of Laplacians of Gaussian (LoG) of the images to reattain the edge and intensity variation information. They optimize their end-to-end deep model by per-pixel difference between Laplacians of Gaussians of the dehazed and ground truth images. they additionally do adversarial training with a perceptual loss to enhance their results. Apart from other physical scattering model based methods, GAN , multiscale or multistage deep networks, Image dehazing can also be posed as image to image translation problem. Qu et al.(2019)[18] proposed their solution as an enhanced Pix2Pix Model which is widely used in image style transfer, image to image translation etc. problems. Their method consists of a GAN with a Enhancer modules to support the dehazing process to get more detailed, vivid image with less artifacts. Their work also proved superiority over other methods in the aspect of the perceptual quality of the dehazed images.
42
-
43
- # 3. Proposed Method
44
-
45
- We use a Multi-patch and a Multi-scale network for Nonhomogeneous Image Dehazing. In this section, we describe these two architectures in detail.
46
-
47
- # 3.1. Multi-patch Architecture:
48
-
49
- We use Deep Multi-patch Hierarchical Network(DMPHN). DMPHN is originally used for Single Image Deblurring[22]. We use (1-2-4) variant of DMPHN in this paper. For the sake of completeness, we will discuss the architecture in the following.
50
-
51
- DMPHN is a multi-level architecture. There is an encoder-decoder pair in each level. Each level works on different number of patches. In DMPHN(1-2-4), the number of patches used is 1,2 and 4 from top to bottom levels respectively. The top-most level (level-1) considers only one patch per image. In the next level (level-2), the image is divided into two patches vertically. In the bottom-most level (level-3) the patches from previous level are further divided horizontally, resulting in total 4 patches.
52
-
53
- Let us consider an input hazy image $I^H$ . We denote $j$ -th patch in $i$ -th level as $I_{i,j}^H$ . In level-1, $I^H$ is not divided into any patches. In level-2, $I^H$ is divided vertically into $I_{2,1}^H$ and $I_{2,2}^H$ . In level-3, $I_{2,1}^H$ and $I_{2,2}^H$ are divided horizontally
54
-
55
- ![](images/260479b9b27b6d3d8b6234b30b23d67b18488524106be37a4bd3c54eb4414065.jpg)
56
- Figure 1: Architecture diagram of Deep Multi-Patch Hierarchical Network. $\{^{\prime}\}$ denotes spatial concatenation and $\bigoplus$ denotes residual addition.
57
-
58
- to create 4 patches, $I_{3,1}^{H}, I_{3,2}^{H}, I_{3,3}^{H}$ and $I_{3,4}^{H}$ . Encoders and Decoders at $i$ -th level is denoted as $Enc_{i}$ and $Dec_{i}$ respectively.
59
-
60
- The information flow in DMPHN is bottom-up. Patches in the lowest level are fed to encoder $Enc_3$ to generate corresponding feature maps.
61
-
62
- $$
63
- F _ {3, j} = E n c _ {i} \left(I _ {3, j} ^ {H}\right), \forall j \in [ 1, 4 ] \tag {2}
64
- $$
65
-
66
- We concatenate spatially adjacent feature maps to obtain a new feature representation.
67
-
68
- $$
69
- P _ {3, j} = \left[ F _ {3, 2 j - 1}, F _ {3, 2 j} \right], \forall j \in [ 1, 2 ] \tag {3}
70
- $$
71
-
72
- where $[\ldots]$ stands for concatenation.
73
-
74
- The new concatenated features are passed through decoder $Dec_{3}$ .
75
-
76
- $$
77
- Q _ {3, j} = \operatorname {D e c} _ {3} \left(P _ {3, j}\right), \forall j \in [ 1, 2 ] \tag {4}
78
- $$
79
-
80
- The decoder output is added with patches in the next level and fed to encoder.
81
-
82
- $$
83
- F _ {2, j} = \operatorname {E n c} _ {2} \left(I _ {2, j} ^ {H} + Q _ {3, j}\right), \forall j \in [ 1, 2 ] \tag {5}
84
- $$
85
-
86
- The encoder outputs are added with respective decoder inputs from previous level. Then the resulting feature maps are spatially concatenated.
87
-
88
- $$
89
- F _ {2, j} ^ {*} = F _ {2, j} + P _ {3, j}, \forall j \in [ 1, 2 ] \tag {6}
90
- $$
91
-
92
- $$
93
- P _ {2} = \left[ F _ {2, 1} ^ {*}, F _ {2, 2} ^ {*} \right] \tag {7}
94
- $$
95
-
96
- $P_{2}$ is then fed to $Dec_{2}$ to generate residual feature maps for level-2.
97
-
98
- $$
99
- Q _ {2} = \operatorname {D e c} _ {2} (P _ {2}) \tag {8}
100
- $$
101
-
102
- Decoder output at level-2 is added to input image and passed through $Enc_1$ . Encoder output $F_1$ is added with decoder output at level-2, $Q_2$ .
103
-
104
- $$
105
- F _ {1} = \operatorname {E n c} _ {1} \left(I ^ {H} + Q _ {2}\right) \tag {9}
106
- $$
107
-
108
- $F_{1}$ is added with $P_{2}$ and fed to $Dec_{1}$ to produce the final dehazed output $\hat{I}$ .
109
-
110
- $$
111
- P _ {1} = F _ {1} + P _ {2} \tag {10}
112
- $$
113
-
114
- $$
115
- \hat {I} = D e c _ {1} \left(P _ {1}\right) \tag {11}
116
- $$
117
-
118
- # 3.2. Multi-scale Architecture:
119
-
120
- We also experiment with a multi-scale architecture. We name this architecture Deep Multi-scale Hierarchical Network(DMSHN). The details of the architecture are described as follows.
121
-
122
- Input hazy image $I^H$ is downsampled by factor of 2 and 4 to create an image pyramid. We call these downsampled images $I_{0.5}^H$ and $I_{0.25}^H$ respectively. The architecture consists of 3 levels where each level has a pair of encoder and decoder. Encoder and decoder at level $i$ is denoted as $Enc_i$ and $Dec_i$ respectively.
123
-
124
- At the lowest level $I_{0.25}^{H}$ is fed to encoder $Enc_3$ to obtain feature map $F_3$ and is further passed through decoder $Dec_3$ to feature representation $P_3$ .
125
-
126
- $$
127
- F _ {3} = \operatorname {E n c} _ {3} \left(I _ {0. 2 5} ^ {H}\right) \tag {12}
128
- $$
129
-
130
- $$
131
- P _ {3} = \operatorname {D e c} _ {3} \left(F _ {3}\right) \tag {13}
132
- $$
133
-
134
- $P_{3}$ is upscaled by factor of 2 and added to $I_{0.5}^{H}$ and passed through encoder Enc2 to generate $F_{2}^{*}$ . Encoder output from
135
-
136
- ![](images/0342445da3bc03bee3acb95c0b949ec7c2b25382f8143b73abcebee7abd69fc2.jpg)
137
- Figure 2: Architecture diagram of Deep Multi-Scale Hierarchical Network. $\updownarrow$ denotes Upsampling by factor of 2 and $\bigoplus$ denotes residual addition.
138
-
139
- previous level is upscaled and added to intermediate feature map $F_{2}^{*}$ and fed to the decoder $Dec_{2}$ .
140
-
141
- $$
142
- F _ {2} ^ {*} = \operatorname {E n c} _ {2} \left(I _ {0. 5} ^ {H} + u p \left(P _ {3}\right)\right) \tag {14}
143
- $$
144
-
145
- $$
146
- F _ {2} = F _ {2} ^ {*} + u p \left(F _ {3}\right) \tag {15}
147
- $$
148
-
149
- $$
150
- P _ {2} = \operatorname {D e c} _ {2} \left(F _ {2}\right) \tag {16}
151
- $$
152
-
153
- where $up(.)$ denotes Upsampling operation by a factor of 2. Residual feature map $P_{2}$ from level-2 is added to the input hazy image and fed to encoder $Enc_{1}$ . Encoder output is added with upscaled $F_{2}$ and passed through decoder to synthesize the dehazed output $\hat{I}$ .
154
-
155
- $$
156
- F _ {1} ^ {*} = \operatorname {E n c} _ {2} \left(I ^ {H} + u p \left(P _ {2}\right)\right) \tag {17}
157
- $$
158
-
159
- $$
160
- F _ {1} = F _ {1} ^ {*} + u p \left(F _ {2}\right) \tag {18}
161
- $$
162
-
163
- $$
164
- \hat {I} = D e c _ {1} \left(F _ {1}\right) \tag {19}
165
- $$
166
-
167
- # 3.3. Encoder and Decoder Architecture:
168
-
169
- We use the same encoder and decoder architecture at all levels of DMPHN and DMSHN. The encoder consists of 15 convolutional layers, 6 residual connections and 6 ReLU units. The layers in the decoder and encoder are similar except that 2 convolutional layers are replaced by deconvolutional layers to generate dehazed images as output.
170
-
171
- # 4. Experiments
172
-
173
- # 4.1. Dataset Description:
174
-
175
- We used NH-HAZE dataset[3] provided for NTIRE 2020 Nonhomogeneous Image Dehazing challenge in our experiments. This dataset contains a total of 55 hazy and clear image pairs, divided into trainset of 45 image pairs, validation set of 5 image pairs and test set of 5 image pairs. Val
176
-
177
- idation and test ground truth images are not publicly available at this moment. Resolution of images in this dataset is $1200 \times 1600$ . This dataset contains hazed and hazefree images of various outdoor scenes. A few hazefree and hazed image pairs from this dataset is shown in Figure-4.
178
-
179
- # 4.2. Training data preparation:
180
-
181
- Due to the small amount of available data, we divide each image into 100 non-overlapping patches. Thus we obtain a training set of 4500 image-pairs of resolution $120 \times 160$ . No data augmentation techniques were used.
182
-
183
- # 4.3. Loss functions:
184
-
185
- We use a linear combination of the following loss functions as our optimization objective.
186
-
187
- Reconstruction loss: Reconstruction loss helps the network to generate dehazed frames close to the ground truth. Our reconstruction loss is a weighted sum of MAE or $L_{1}$ loss and MSE or $L_{2}$ loss. The reconstruction loss is given by,
188
-
189
- $$
190
- L _ {r} = \lambda_ {1} L _ {1} + \lambda_ {2} L _ {2} \tag {20}
191
- $$
192
-
193
- where $L_{1} = \left\| \hat{I} - I\right\|_{1}$ and $L_{2} = \left\| \hat{I} - I\right\|_{2}$
194
-
195
- Perceptual loss: $\bar{L}_2$ distance between features extracted from conv4_3 layer of VGGNet[20] of predicted and ground truth images are used as Perceptual loss[11]. Perceptual loss is given by,
196
-
197
- $$
198
- L _ {p} = \left\| \phi (\hat {I}) - \phi (I) \right\| _ {2} \tag {21}
199
- $$
200
-
201
- TV loss: We use Total Variation(TV) loss[11] makes predictions smooth. TV loss is given by,
202
-
203
- $$
204
- L _ {t v} = \left\| \nabla_ {x} \hat {I} \right\| _ {2} + \left\| \nabla_ {y} \hat {I} \right\| _ {2} \tag {22}
205
- $$
206
-
207
- ![](images/b75b6ea7b7f05d654b8199a035bb11b5db37f9dab5e2bcf280a0a6a9b5e269ce.jpg)
208
- Figure 3: Encoder and Decoder architecture. Within brackets written values are Input Channel, Output Channel, Kernel and Stride respectively.
209
-
210
- ![](images/53783e9f932eacc09c4fbf0cb7681c251c52803869c27e2992aba00126d9d1c0.jpg)
211
- Figure 4: A snapshot of Training Dataset. Top row contains hazy images and bottom row contains corresponding ground truth images.
212
-
213
- Our final loss function is given by,
214
-
215
- $$
216
- L = \lambda_ {r} L _ {r} + \lambda_ {p} L _ {p} + \lambda_ {t v} L _ {t v} \tag {23}
217
- $$
218
-
219
- In our experiments we choose $\lambda_r = 1$ , $\lambda_p = 6e - 3$ , $\lambda_{tv} = 2e - 8$ . $\lambda_1$ and $\lambda_2$ is chosen to be 0.6 and 0.4 respectively.
220
-
221
- # 4.4. Training details:
222
-
223
- We developed our models using Pytorch[16] on a system with AMD Ryzen 1600X CPU and NVIDIA GTX 1080 GPU. We use Adam optimizer[12] to train our networks with values of $\beta_{1}$ and $\beta_{2}$ 0.9 and 0.99 respectively. We use batchsize of 8. Initial learning rate is set to be 1e-4 which is gradually decreased to 5e-5. We train our models until convergence.
224
-
225
- # 4.5. Testing details:
226
-
227
- We test our models' performance on the given full resolution images of validation data. Please note that, our models are fully convolutional, hence difference between train and test image size should not matter.
228
-
229
- # 4.6. Results:
230
-
231
- # 4.6.1 Quantitative and Qualitative Results:
232
-
233
- As ground truth for validation set is not publicly available, we submit our validation results to Codalab server. We compare performance of our models with three state-of-the-art dehazing models namely AtJ-DH[10], 123-CEDH[9] and FFA-Net[17]. The quantitative results on Validation set are given in Table-1. DMPHN is performing better than the rest of the models. It can be observed that our Multi-patch network is performing better than our Multi-scale network in terms of both PSNR and SSIM. At lower levels of DMPHN, the network works on patch level, so the network learns local features compared to global features learnt by DMSHN, which explains the performance gain in DMPHN.
234
-
235
- Apart from decent dehazing results, it is to be noted that both DMPHN and DMSHN are lightweight and efficient models. Checkpoints of both the networks take 21.7 MB on disk. GPU processing times for DMPHN and DMPSN make them suitable for real-time applications.
236
-
237
- ![](images/322dacf900c71f0650b47c7ba42723702ad7bcb3a68df5d94e63217f1a833931.jpg)
238
-
239
- ![](images/39333b7d0708f08b06a14fa0512445b94d2d73c027eb8ae01443ef63137e219e.jpg)
240
-
241
- ![](images/277e66f5bcdb4b1e35284a505aac004d7c384ad1fc1e260886bdabd0ec185d92.jpg)
242
-
243
- ![](images/aaf822e5068e3e78eb973d79d4a5bcd09b9523ab3e2ae0537fef5f072bf312b3.jpg)
244
-
245
- ![](images/ff69bb4f9d7194a2314db839708d935bcac1716ff8cdad75fda7b2a35e29ff0e.jpg)
246
-
247
- ![](images/1bb380d079fc7923dce507e6a8946b04ccb45777a59c4233e36f1ea1e316716d.jpg)
248
-
249
- ![](images/16d30bb661b01669099c1ede79200b942585bef680dd454051c1007c9be9c9e4.jpg)
250
-
251
- ![](images/036bcca9b8745e703c2d6eac42130e0b525c864130a462aaecef840f4ce0f6e5.jpg)
252
-
253
- ![](images/bafb1f94a9ac9de7be51d8f4512f4f0c8b56eb3af01e231d03d806991f552286.jpg)
254
-
255
- ![](images/289763fc9035f88d730e8708b8d3f27a9745a44a66c6d51f6ff8ab2fc71bedea.jpg)
256
-
257
- ![](images/d90a9801e197ed345da27802b6006a8df7d898a2db6d33e1d6d76ab764b074fd.jpg)
258
-
259
- ![](images/7818add7126c6691210bea765f9cc152e3eba05730675ffedb012559fb48d6e3.jpg)
260
-
261
- ![](images/e2ae1cbb238ff65555c1591fed18aea688b6dc8e7691db70aa05cd5ed6dbd5f1.jpg)
262
- (a) Input Image
263
-
264
- ![](images/dace9d1bc36ebadd1dff706d809d3f44a6a6e725b1f09da206b960864a88ffdf.jpg)
265
- (b) AtJ-DH
266
-
267
- ![](images/b2c2c16f899acfe25e056b4f40bac094e6bc23ce53664757c4ac86b1297a488f.jpg)
268
- (c) 123-CEDH
269
-
270
- ![](images/970b251056da637eef385da25e0a04f22286444008de703dcdd3a1161dd390a0.jpg)
271
- (d) FFA-Net
272
-
273
- ![](images/99755871b7e562ce1eab3fea5b56908bac3c75b3130c6384cce7e568b559b196.jpg)
274
- (e) Ours (DMPHN)
275
- Figure 5: Qualitative results on NH-HAZE[3] Validation dataset.
276
-
277
- ![](images/b0412d4b6cff0e050c3862848499c707b64400ab900b41c6bcdf12cdd084e2cb.jpg)
278
- (f) Ours (DMSHN)
279
-
280
- <table><tr><td></td><td>PSNR</td><td>SSIM</td><td>Runtime(s)</td></tr><tr><td>AtJ-DH[10]</td><td>15.94</td><td>0.5662</td><td>0.0775</td></tr><tr><td>123-CEDH[9]</td><td>14.59</td><td>0.5488</td><td>0.0559</td></tr><tr><td>FFA-Net[17]</td><td>10.43</td><td>0.4168</td><td>1.7472</td></tr><tr><td>DMPHN</td><td>16.94</td><td>0.6177</td><td>0.0145</td></tr><tr><td>DMPSN</td><td>16.42</td><td>0.5991</td><td>0.0210</td></tr></table>
281
-
282
- Table 1: Quantitative results on NH-HAZE[3] Validation set.
283
-
284
- # 4.6.2 NTIRE 2020 challenge on NonHomogeneous Image Dehazing:
285
-
286
- We participated in NTIRE 2020 challenge on NonHomogeneous Image Dehazing[5]. 27 teams submitted results in test phase, out of which 19 teams don't take help of extra training data like Dense-Haze[2, 6] and OHaze[4, 1]. The test results were evaluated on Fidelity measures as well as Perceptual Measures. Fidelity measures included PSNR and SSIM[21], where LPIPS[24], Perceptual Index(PI)[7] and Mean Opinion Score(MOS) were used as Perceptual metrics. For fair comparison, we note down performances of some submissions that used only NH-HAZE dataset in Table-2. Our DMPHN network produced moderate quality outputs both in Fidelity and Perceptual metrics. Our network is the fastest entry among all the submissions.
287
-
288
- # 4.6.3 Dense Haze Removal:
289
-
290
- DMPHN is effective for dense haze removal as well. We trained our network on Dense-HAZE dataset[2]. We train on 50 images for training and use 5 images for test. We compare the performance with AtJ-DH[10], 123-CEDH[9]
291
-
292
- and FFA-Net[17]. Quantitative results and GPU runtimes are shown in Table-3. We observe that DMPHN is significantly better than other models both in terms of fidelity measures and runtime. Figure-6 shows qualitative comparison with the said models.
293
-
294
- # 4.7. Conclusion
295
-
296
- In this paper, we use a Multi-Patch and a Multi-Scale architecture for Nonhomogeneous haze removal from images. We show that DMPHN is better than DMSHN because DMPHN aggregates local features generated from a finer level to coarser level. Moreover, DMPHN is a fast algorithm and can dehaze images from a video sequence in real-time. We also show that DMPHN performs well for Dense Haze Removal. In future, the effectiveness of DMPHN with more levels can be explored for performance improvement, but the addition of more levels to architecture will subject to sacrifice in runtime.
297
-
298
- <table><tr><td rowspan="2">Team</td><td colspan="2">Fidelity</td><td colspan="3">Perceptual quality</td><td rowspan="2">Runtime(s)↓</td><td rowspan="2">GPU/CPU</td></tr><tr><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>PI↓</td><td>MOS↓</td></tr><tr><td>method1</td><td>21.60</td><td>0.67</td><td>0.363</td><td>3.712</td><td>3</td><td>0.21</td><td>v100</td></tr><tr><td>method2</td><td>21.91</td><td>0.69</td><td>0.361</td><td>3.700</td><td>4</td><td>0.22</td><td>v100</td></tr><tr><td>method3</td><td>19.25</td><td>0.60</td><td>0.426</td><td>5.061</td><td>12</td><td>12.88</td><td>v100</td></tr><tr><td>method4</td><td>18.51</td><td>0.68</td><td>0.308</td><td>2.988</td><td>12</td><td>13.00</td><td>n/a</td></tr><tr><td>Ours (DMPHN)</td><td>18.24</td><td>0.65</td><td>0.329</td><td>3.051</td><td>14</td><td>0.01</td><td>1080</td></tr><tr><td>method5</td><td>18.70</td><td>0.64</td><td>0.328</td><td>3.114</td><td>14</td><td>10.43</td><td>1080ti</td></tr><tr><td>method6</td><td>18.67</td><td>0.64</td><td>0.303</td><td>3.211</td><td>16</td><td>1.64</td><td>TitanXP</td></tr><tr><td>method7</td><td>17.88</td><td>0.57</td><td>0.378</td><td>2.855</td><td>16</td><td>0.06</td><td>n/a</td></tr><tr><td>no processing</td><td>11.33</td><td>0.42</td><td>0.582</td><td>2.609</td><td>20</td><td></td><td></td></tr></table>
299
-
300
- ![](images/59f49e8016e6d9a461754e18cd5ba8e1c37340025f581297cbe64f9cfd3d7336.jpg)
301
- Figure 6: Qualitative results for Dense Haze Removal.
302
-
303
- Table 2: NTIRE 2020 Nonhomogeneous challenge[5] Leaderboard. Submissions are sorted in ascending order of MOS.
304
-
305
- <table><tr><td></td><td>PSNR</td><td>SSIM</td><td>Runtime(s)</td></tr><tr><td>AtJ-DH[10]</td><td>22.54</td><td>0.6436</td><td>0.0775</td></tr><tr><td>123-CEDH[9]</td><td>19.63</td><td>0.5758</td><td>0.0559</td></tr><tr><td>FFA-Net[17]</td><td>11.93</td><td>0.3790</td><td>1.7472</td></tr><tr><td>Ours(DMPHN)</td><td>23.41</td><td>0.6669</td><td>0.0145</td></tr></table>
306
-
307
- Table 3: Quantitative Comparison on Dense-HAZE[2].
308
-
309
- # References
310
-
311
- [1] C. Ancuti, C.O. Ancuti, R. Timofte, L. Van Gool, and L. Zhang et al. NTIRE 2018 challenge on image dehazing: Methods and results. IEEE CVPR, NTIRE Workshop, 2018. 6
312
- [2] Codruta O Ancuti, Cosmin Ancuti, Mateu Sbert, and Radu Timofte. Dense-haze: A benchmark for image dehazing with dense-haze and haze-free images. In 2019 IEEE International Conference on Image Processing (ICIP), pages 1014-1018. IEEE, 2019. 6, 7
313
- [3] Codruta O. Ancuti, Cosmin Ancuti, and Radu Timofte. NH-
314
-
315
- HAZE: An image dehazing benchmark with nonhomogeneous hazy and haze-free images. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, June 2020. 4, 6
316
- [4] Codruta O Ancuti, Cosmin Ancuti, Radu Timofte, and Christophe De Vleeschouwer. O-haze: a dehazing benchmark with real hazy and haze-free outdoor images. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 754-762, 2018. 6
317
- [5] Codruta O. Ancuti, Cosmin Ancuti, Florin-Alexandru Vasluianu, Radu Timofte, et al. Ntire 2020 challenge on nonhomogeneous dehazing. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, June 2020. 6, 7
318
- [6] C. O. Ancuti, C. Ancuti, R. Timofte, L. Van Gool, and L. Zhang et al. NTIRE 2019 challenge on image dehazing: Methods and results. IEEE CVPR, NTIRE Workshop, 2019. 6
319
- [7] Yochai Blau, Roey Mechrez, Radu Timofte, Tomer Michaeli, and Lihi Zelnik-Manor. The 2018 pirm challenge on percep
320
-
321
- tual image super-resolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 0-0, 2018. 6
322
- [8] Zijun Deng, Lei Zhu, Xiaowei Hu, Chi-Wing Fu, Xuemiao Xu, Qing Zhang, Jing Qin, and Pheng-Ann Heng. Deep multi-model fusion for single-image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 2453–2462, 2019. 2
323
- [9] Tiantong Guo, Venkateswararao Cherukuri, and Vishal Monga. Dense123'color enhancement dehazing network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 5, 6, 7
324
- [10] Tiantong Guo, Xuelu Li, Venkateswararao Cherukuri, and Vishal Monga. Dense scene information estimation network for dehazing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 0–0, 2019. 5, 6, 7
325
- [11] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In European conference on computer vision, pages 694–711. Springer, 2016. 4
326
- [12] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5
327
- [13] Yunan Li, Qiguang Miao, Wanli Ouyang, Zhenxin Ma, Huijuan Fang, Chao Dong, and Yining Quan. Lap-net: Level-aware progressive network for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 3276-3285, 2019. 2
328
- [14] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. Grid-dehazenet: Attention-based multi-scale network for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 7314-7323, 2019. 2
329
- [15] Yang Liu, Jinshan Pan, Jimmy Ren, and Zhixun Su. Learning deep priors for image dehazing. In Proceedings of the IEEE International Conference on Computer Vision, pages 2492–2500, 2019. 2
330
- [16] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. 5
331
- [17] Xu Qin, Zhilin Wang, Yuanchao Bai, Xiaodong Xie, and Huizhu Jia. Ffa-net: Feature fusion attention network for single image dehazing. arXiv preprint arXiv:1911.07559, 2019. 2, 5, 6, 7
332
- [18] Yanyun Qu, Yizi Chen, Jingying Huang, and Yuan Xie. Enhanced pix2pix dehazing network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8160-8168, 2019. 2
333
- [19] Prasen Sharma, Priyankar Jain, and Arijit Sur. Scale-aware conditional generative adversarial network for image dehaz-
334
-
335
- ing. In The IEEE Winter Conference on Applications of Computer Vision, pages 2355-2365, 2020. 2
336
- [20] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 4
337
- [21] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6
338
- [22] Hongguang Zhang, Yuchao Dai, Hongdong Li, and Piotr Koniusz. Deep stacked hierarchical multi-patch network for image deblurring. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5978-5986, 2019. 2
339
- [23] He Zhang and Vishal M Patel. Densely connected pyramid dehazing network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3194-3203, 2018. 2
340
- [24] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 586-595, 2018. 6
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc6634fe6a8a4800fd955edbe24018f956420e4ff7e178560d04335e71409ccd
3
+ size 30651
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_05xxx/2005.05999/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7c4dd0d08ade17e18a435440efca0dfe6f3f2a6cb24e99a2fa67c1fe0cbe235
3
  size 675395
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72ac2b519b4facd28e066b75bace0848785c48b150e5bddc4a0aa0a4560b17b8
3
  size 675395
data/2020/2005_05xxx/2005.05999/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06001/54cec5b6-a91c-4aea-ae6a-d5399be25314_content_list.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06001/54cec5b6-a91c-4aea-ae6a-d5399be25314_model.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06001/full.md CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06001/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e426128aaeedc05c254cf99ed973dcaa2a4d6903ff287016041ceb2c2732203
3
  size 922318
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9496edf43a9b62b87b4454a0009006ff99e609248218d5cd9b8ba73cf59b146f
3
  size 922318
data/2020/2005_06xxx/2005.06001/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06040/56b116d0-0e46-408a-9475-7cd9f859a9ca_content_list.json CHANGED
@@ -1,1315 +1,3 @@
1
- [
2
- {
3
- "type": "text",
4
- "text": "Occlusion-Adaptive Deep Network for Robust Facial Expression Recognition",
5
- "text_level": 1,
6
- "bbox": [
7
- 99,
8
- 128,
9
- 870,
10
- 152
11
- ],
12
- "page_idx": 0
13
- },
14
- {
15
- "type": "text",
16
- "text": "Hui Ding, Peng Zhou, and Rama Chellappa",
17
- "bbox": [
18
- 308,
19
- 193,
20
- 658,
21
- 212
22
- ],
23
- "page_idx": 0
24
- },
25
- {
26
- "type": "text",
27
- "text": "University of Maryland, College Park",
28
- "bbox": [
29
- 334,
30
- 223,
31
- 633,
32
- 242
33
- ],
34
- "page_idx": 0
35
- },
36
- {
37
- "type": "text",
38
- "text": "Abstract",
39
- "text_level": 1,
40
- "bbox": [
41
- 235,
42
- 286,
43
- 310,
44
- 301
45
- ],
46
- "page_idx": 0
47
- },
48
- {
49
- "type": "text",
50
- "text": "Recognizing the expressions of partially occluded faces is a challenging computer vision problem. Previous expression recognition methods, either overlooked this issue or resolved it using extreme assumptions. Motivated by the fact that the human visual system is adept at ignoring the occlusion and focus on non-occluded facial areas, we propose a landmark-guided attention branch to find and discard corrupted features from occluded regions so that they are not used for recognition. An attention map is first generated to indicate if a specific facial part is occluded and guide our model to attend to non-occluded regions. To further improve robustness, we propose a facial region branch to partition the feature maps into non-overlapping facial blocks and task each block to predict the expression independently. This results in more diverse and discriminative features, enabling the expression recognition system to recover even though the face is partially occluded. Depending on the synergistic effects of the two branches, our occlusion-adaptive deep network significantly outperforms state-of-the-art methods on two challenging in-the-wild benchmark datasets and three real-world occluded expression datasets.",
51
- "bbox": [
52
- 78,
53
- 319,
54
- 468,
55
- 633
56
- ],
57
- "page_idx": 0
58
- },
59
- {
60
- "type": "text",
61
- "text": "1. Introduction",
62
- "text_level": 1,
63
- "bbox": [
64
- 80,
65
- 664,
66
- 207,
67
- 679
68
- ],
69
- "page_idx": 0
70
- },
71
- {
72
- "type": "text",
73
- "text": "Facial expressions play an important role in social communication in our daily life. In recent years, automatically recognizing expression has received increasing attention due to its wide applications, including driver safety, health care, video conferencing, virtual reality, and cognitive science etc.",
74
- "bbox": [
75
- 78,
76
- 689,
77
- 467,
78
- 777
79
- ],
80
- "page_idx": 0
81
- },
82
- {
83
- "type": "text",
84
- "text": "Existing methods that address expression recognition can be divided into two categories. One category utilized synthesis techniques to facilitate discriminative feature learning [33, 36, 3, 5]; While the other tried to boost the performance by designing new loss functions or network architectures [16, 35, 6, 1]. In the first category, de-expression residue learning proposed in [33] leveraged the neutral face images to distill the expression information from the cor",
85
- "bbox": [
86
- 78,
87
- 780,
88
- 467,
89
- 898
90
- ],
91
- "page_idx": 0
92
- },
93
- {
94
- "type": "text",
95
- "text": "responding expressive images. Zhang et al. [36] explored an adversarial autoencoder to generate facial images with different expressions under arbitrary poses to enlarge the training set. However, those works mainly focus on lab-collected datasets captured in controlled environments, such as $\\mathrm{CK + }$ [23], MMI [30] and OULU-CASIA [38]. Although high accuracy results have been obtained on these datasets, they perform poorly when recognizing facial expressions in-the-wild. In the second category, Li et al. [16] proposed a locality preserving loss to enhance deep features by preserving the locality closeness measure while maximizing the inter-class scatters. To address the annotation inconsistencies among different facial expression datasets, Zeng et al. [35] introduced a probability transition layer to recover the latent truths from noisy labels. Although expression datasets under natural and uncontrollable conditions are explored, facial expression recognition under partial occlusions is still a challenging problem that has been relatively unexplored. In real-life images or videos, facial occlusions can often be observed, e.g., facial accessories including sunglasses, scarves, and masks or other random objects like hands, hairs and cups.",
96
- "bbox": [
97
- 501,
98
- 287,
99
- 890,
100
- 619
101
- ],
102
- "page_idx": 0
103
- },
104
- {
105
- "type": "text",
106
- "text": "Recently, some related works have been proposed to solve this challenge. Patch-gated Convolutional Neural Network [18] decomposed a face into different patches and explicitly predicted the occlusion likelihood of the corresponding patch using a patch-gated unit. Wang et al. [31] proposed a self-attention scheme to learn the importance weights for multiple facial regions. However, the unobstructed scores are learned without any ground truth on the occlusion information and may be biased. In this work, we present an Occlusion-Adaptive Deep Network (OADN) to overcome the occlusion problem for robust facial expression recognition in-the-wild. It consists of two branches: a landmark-guided attention branch and a facial region branch.",
107
- "bbox": [
108
- 503,
109
- 625,
110
- 890,
111
- 834
112
- ],
113
- "page_idx": 0
114
- },
115
- {
116
- "type": "text",
117
- "text": "The landmark-guided attention branch is proposed to discard feature elements that have been corrupted by occlusions. The interest points covering the most distinctive facial areas for facial expression recognition are computed",
118
- "bbox": [
119
- 503,
120
- 840,
121
- 890,
122
- 898
123
- ],
124
- "page_idx": 0
125
- },
126
- {
127
- "type": "aside_text",
128
- "text": "arXiv:2005.06040v1 [cs.CV] 12 May 2020",
129
- "bbox": [
130
- 22,
131
- 255,
132
- 57,
133
- 707
134
- ],
135
- "page_idx": 0
136
- },
137
- {
138
- "type": "text",
139
- "text": "based on the domain knowledge. Then the meta information of these points is utilized to generate the attention maps. The global features are modulated by the attention maps to guide the model to focus on the non-occluded facial regions and filter out the occluded regions.",
140
- "bbox": [
141
- 75,
142
- 90,
143
- 467,
144
- 165
145
- ],
146
- "page_idx": 1
147
- },
148
- {
149
- "type": "text",
150
- "text": "To further enhance robustness and learn complementary context information, we introduce a facial region branch to train multiple region-based expression classifiers. This is achieved by first partitioning the global feature maps into non-overlapping facial blocks. Then each block is trained by backpropogating the recognition loss independently. Thus even when the face is partially occluded, the classifiers from other non-occluded regions are still able to function properly. Furthermore, since the expression datasets are usually small, having multiple region-based classifiers adds more supervision and acts as a regularizer to alleviate the overfitting issue.",
151
- "bbox": [
152
- 75,
153
- 167,
154
- 467,
155
- 345
156
- ],
157
- "page_idx": 1
158
- },
159
- {
160
- "type": "text",
161
- "text": "The main contributions in this work are summarized as follows:",
162
- "bbox": [
163
- 76,
164
- 347,
165
- 467,
166
- 377
167
- ],
168
- "page_idx": 1
169
- },
170
- {
171
- "type": "list",
172
- "sub_type": "text",
173
- "list_items": [
174
- "- We propose OADN, an effective method to deal with the occlusion problem for facial expression recognition in-the-wild.",
175
- "- We introduce a landmark-guided attention branch to guide the network to attend to non-occluded regions for representation learning.",
176
- "- We design a facial region branch to learn region-based classifiers for complementary context features and further increasing the robustness.",
177
- "- Experimental results on five challenging benchmark datasets show that our proposed OADN obtains significantly better performance than existing methods."
178
- ],
179
- "bbox": [
180
- 93,
181
- 387,
182
- 467,
183
- 597
184
- ],
185
- "page_idx": 1
186
- },
187
- {
188
- "type": "text",
189
- "text": "2. Related Work",
190
- "text_level": 1,
191
- "bbox": [
192
- 76,
193
- 609,
194
- 217,
195
- 626
196
- ],
197
- "page_idx": 1
198
- },
199
- {
200
- "type": "text",
201
- "text": "2.1. Deep Facial Expression Recognition",
202
- "text_level": 1,
203
- "bbox": [
204
- 76,
205
- 635,
206
- 387,
207
- 651
208
- ],
209
- "page_idx": 1
210
- },
211
- {
212
- "type": "text",
213
- "text": "Deep learning methods [39, 19, 22, 21, 20, 33, 5, 36, 16, 6, 35, 24, 1] for facial expression recognition have achieved great success in the past few years. Based on the assumption that a facial expression is the combination of a neutral face image and the expressive component, Yang et al. [33] proposed a de-expression residue learning to learn the residual expressive component in a generative model. To reduce the inter-subject variations, Cai et al. [5] introduced an identity-free generative adversarial network [11] to generate an average identity face image while keep the expression unchanged. Considering the pose variation, Zhang et al. [36] leveraged an adversarial autoencoder to augment the training set with face images under different expression and poses. However, these methods mainly focused on datasets captured in controlled environments, where the facial images are near frontal. Thus the models generalize poorly",
214
- "bbox": [
215
- 75,
216
- 657,
217
- 467,
218
- 900
219
- ],
220
- "page_idx": 1
221
- },
222
- {
223
- "type": "text",
224
- "text": "when recognizing human expressions under natural and uncontrollable variations.",
225
- "bbox": [
226
- 498,
227
- 90,
228
- 888,
229
- 119
230
- ],
231
- "page_idx": 1
232
- },
233
- {
234
- "type": "text",
235
- "text": "Another line of work focused on designing advanced network architectures [1] or loss functions [16, 6, 35, 24]. Li et al. [16] proposed a deep locality-preserving Convolutional Neural Network, which preserved the local proximity by minimizing the distance to K-nearest neighbors within the same class. Building on this, Cai et al. [6] further introduced an island loss to simultaneously reduce intraclass variations and augment inter-class differences. Zeng et al. [35] studied the annotation error and bias problem among different facial expression datasets. Each image is predicted with multiple pseudo labels and a model is learned to fit the latent truth from these inconsistent labels. Acharya et al. [1] explored a covariance pooling layer to better capture the distortions in regional facial features and temporal evolution of per-frame features. Although the aforementioned approaches achieve good performance on data in the wild, facial expression recognition is still challenging due to the existence of partially occluded faces.",
236
- "bbox": [
237
- 496,
238
- 121,
239
- 890,
240
- 393
241
- ],
242
- "page_idx": 1
243
- },
244
- {
245
- "type": "text",
246
- "text": "2.2. Occlusive Facial Expression Recognition",
247
- "text_level": 1,
248
- "bbox": [
249
- 500,
250
- 401,
251
- 844,
252
- 417
253
- ],
254
- "page_idx": 1
255
- },
256
- {
257
- "type": "text",
258
- "text": "Recently, there are some works starting to investigate the occlusion issue. Li et al. [17] proposed a gate unit to enable the model to shift attention from the occluded patches to other visible facial regions. The gate unit estimates how informative a face patch is through an attention net, then the features are modulated by the learned weights. Similarly, region attention network [31] cropped multiple face regions and utilized a self-attention based model to learn an important weight for each region. However, the self-attention based methods lack additional supervision to ensure the functionality. Thus, the network may not be able to locate these non-occluded facial regions accurately under large occlusions and poses.",
259
- "bbox": [
260
- 496,
261
- 425,
262
- 890,
263
- 621
264
- ],
265
- "page_idx": 1
266
- },
267
- {
268
- "type": "text",
269
- "text": "3. Occlusion Adaptive Deep Network",
270
- "text_level": 1,
271
- "bbox": [
272
- 500,
273
- 633,
274
- 812,
275
- 650
276
- ],
277
- "page_idx": 1
278
- },
279
- {
280
- "type": "text",
281
- "text": "To this end, we propose OADN for robust facial expression recognition in-the-wild. To be specific, we use ResNet50 [13] without the average pooling layer and the fully connected layer as the backbone to extract global feature maps from given images. We set the stride of conv4_1 to be 1, so a larger feature map is obtained. For an input image with height $H$ and width $W$ , the resolution of the output feature $F$ will be $H / 16 \\times W / 16$ instead of $H / 32 \\times W / 32$ . This is beneficial to identify the occlusion information and focus on the visible facial regions.",
282
- "bbox": [
283
- 496,
284
- 657,
285
- 890,
286
- 809
287
- ],
288
- "page_idx": 1
289
- },
290
- {
291
- "type": "text",
292
- "text": "As illustrated in Figure 1, OADN mainly consists of two branches: one is the landmark-guided attention branch, which utilizes a landmark detector to locate the landmarks and to guide the network to attend to the non-occluded facial areas. The other one is the facial region branch which divides the global feature maps into blocks and utilizes",
293
- "bbox": [
294
- 496,
295
- 810,
296
- 890,
297
- 900
298
- ],
299
- "page_idx": 1
300
- },
301
- {
302
- "type": "image",
303
- "img_path": "images/6cf5afb4cd07944180064478fb35a61cbf2c6315e00d563af784260909b3a106.jpg",
304
- "image_caption": [
305
- "Figure 1. Pipeline of the Occlusion Adaptive Deep Network. It consists of two branches: a Landmark-guided Attention Branch and a Facial Region Branch. The ResNet50 backbone is shared between the two branches to extract the global features. For the Landmark-guided Attention Branch, the facial landmarks are first detected. Then the interest points are computed to cover the most informative facial areas. The confidence scores of these points are further utilized to generate the attention maps, guiding the model to attend to the visible facial components. For the Facial Region Branch, the feature maps are divided into non-overlapping facial blocks and each block is trained to be a discriminative expression classifier on its own."
306
- ],
307
- "image_footnote": [],
308
- "bbox": [
309
- 80,
310
- 87,
311
- 890,
312
- 371
313
- ],
314
- "page_idx": 2
315
- },
316
- {
317
- "type": "text",
318
- "text": "region-based classifiers to increase robustness. We describe each branch and the structural relationship among the two branches below.",
319
- "bbox": [
320
- 75,
321
- 483,
322
- 468,
323
- 527
324
- ],
325
- "page_idx": 2
326
- },
327
- {
328
- "type": "image",
329
- "img_path": "images/d543ef302c21c60a57d610862d9786cca14c19e675d2c703d88b0bd199d88ed0.jpg",
330
- "image_caption": [
331
- "(a) Original 68 detected landmarks"
332
- ],
333
- "image_footnote": [],
334
- "bbox": [
335
- 112,
336
- 537,
337
- 233,
338
- 628
339
- ],
340
- "page_idx": 2
341
- },
342
- {
343
- "type": "image",
344
- "img_path": "images/e15278b81833ae8d71cbdadd0fd89c9c673dc5cafd9fd5680bd33f1832a38b10.jpg",
345
- "image_caption": [],
346
- "image_footnote": [],
347
- "bbox": [
348
- 238,
349
- 573,
350
- 305,
351
- 599
352
- ],
353
- "page_idx": 2
354
- },
355
- {
356
- "type": "image",
357
- "img_path": "images/b2c94855bdf1766d4df9ba7902f61618b1eb63a9fe857ddaf3982acef549b5b2.jpg",
358
- "image_caption": [
359
- "(b) Recomputed 24 points",
360
- "Figure 2. We select 16 points from the original 68 landmarks (a) to cover the regions around eyes, eyebrows, nose and mouth. We further recompute 8 points (b) to cover facial cheeks and the areas between eyes and eyebrows."
361
- ],
362
- "image_footnote": [],
363
- "bbox": [
364
- 313,
365
- 539,
366
- 433,
367
- 628
368
- ],
369
- "page_idx": 2
370
- },
371
- {
372
- "type": "text",
373
- "text": "3.1. Landmark-guided Attention Branch",
374
- "text_level": 1,
375
- "bbox": [
376
- 76,
377
- 726,
378
- 393,
379
- 742
380
- ],
381
- "page_idx": 2
382
- },
383
- {
384
- "type": "text",
385
- "text": "OADN employs a facial landmark detector [9] to obtain landmarks from face images. The landmark detector is pretrained on the 300W dataset [28]. Given an input image, OADN utilizes the detector to extract $N = 68$ landmarks. For each landmark, the detector predicts its coordinates and confidence score. Then based on the detected 68 points, we select or recompute $M = 24$ interest points that cover the distinctive regions of face, including the eyes, nose, mouth and cheeks. Figure 2 illustrates the computation results. For those recomputed points (mainly around eyes and cheeks),",
386
- "bbox": [
387
- 75,
388
- 750,
389
- 470,
390
- 901
391
- ],
392
- "page_idx": 2
393
- },
394
- {
395
- "type": "text",
396
- "text": "we set their confidence scores to be the minimum confidence score of landmark points that used to compute them. To remove the occluded facial regions, we set a threshold $T$ to filter out the landmarks that have confidence scores smaller than $T$ . Specifically, the interest points are obtained by:",
397
- "bbox": [
398
- 496,
399
- 483,
400
- 890,
401
- 571
402
- ],
403
- "page_idx": 2
404
- },
405
- {
406
- "type": "equation",
407
- "text": "\n$$\np _ {i} = \\left\\{ \\begin{array}{l l} \\left(x _ {i}, y _ {i}\\right) & \\text {i f} s _ {i} ^ {\\text {c o n f}} \\geq T \\\\ 0 & \\text {e l s e} \\end{array} \\right. \\tag {1}\n$$\n",
408
- "text_format": "latex",
409
- "bbox": [
410
- 570,
411
- 574,
412
- 890,
413
- 608
414
- ],
415
- "page_idx": 2
416
- },
417
- {
418
- "type": "text",
419
- "text": "where $p_i$ denotes the $i$ th interest point, and $x_i, y_i$ denote the coordinates of the $i$ th point. $s_i$ is the confidence score ranged from 0 to 1 and $T$ is the threshold.",
420
- "bbox": [
421
- 496,
422
- 616,
423
- 890,
424
- 660
425
- ],
426
- "page_idx": 2
427
- },
428
- {
429
- "type": "text",
430
- "text": "We then generate the attention heatmaps consisting of a 2D Gaussian distribution, where the centers are the ground truth locations of the visible landmarks. For those occluded landmarks, the corresponding attention maps are set to zero. We further downsample the attention maps by linear interpolation to match the size of the output feature maps. As shown in Figure 1, the attention map $A_{i}$ modulates the global feature maps $F$ to obtain the re-weighted features $F_{i}^{A}$ . To achieve this, the feature map $F$ from the backbone is multiplied by each attention map $A_{i}$ , $i = 1,\\dots,M$ element-wisely, resulting $M$ landmark-guided feature maps $F_{i}^{A}$ :",
431
- "bbox": [
432
- 496,
433
- 662,
434
- 890,
435
- 843
436
- ],
437
- "page_idx": 2
438
- },
439
- {
440
- "type": "equation",
441
- "text": "\n$$\nF _ {i} ^ {A} = F \\odot A _ {i}, i = 1, \\dots , M \\tag {2}\n$$\n",
442
- "text_format": "latex",
443
- "bbox": [
444
- 599,
445
- 844,
446
- 890,
447
- 861
448
- ],
449
- "page_idx": 2
450
- },
451
- {
452
- "type": "text",
453
- "text": "where $A_{i}$ is the ith heatmap, and $\\odot$ is element-wise product. Since the attention map indicates the visibility of each",
454
- "bbox": [
455
- 496,
456
- 869,
457
- 890,
458
- 900
459
- ],
460
- "page_idx": 2
461
- },
462
- {
463
- "type": "text",
464
- "text": "facial component, the landmark-guided feature map $F_{i}^{A}$ can attend to the non-occluded facial parts and remove the information from the occluded regions. Thus, the feature from the visible region is emphasized and occluded part is canceled.",
465
- "bbox": [
466
- 75,
467
- 90,
468
- 467,
469
- 165
470
- ],
471
- "page_idx": 3
472
- },
473
- {
474
- "type": "text",
475
- "text": "Then global average pooling is applied to each landmark-guided feature map $F_{i}^{A}$ to obtain a 2048- $D$ feature $f_{i}^{A}, i = 1, \\dots, M$ , corresponding to the facial component containing the specific interest point. Finally, the component-wise feature $f_{i}^{A}$ is max-pooled to fuse features from the non-occluded facial areas. A fully-connected layer is further used to reduce the dimension from 2048 to 256, and the output is fed into a softmax layer to predict the expression category of each input face image. We utilize the cross-entropy loss to train the landmark-guided attention branch, which is expressed as follows:",
476
- "bbox": [
477
- 75,
478
- 167,
479
- 467,
480
- 330
481
- ],
482
- "page_idx": 3
483
- },
484
- {
485
- "type": "equation",
486
- "text": "\n$$\nL _ {L A B} = - \\sum_ {i = 1} ^ {C} y _ {i} \\log \\hat {y} _ {i} \\tag {3}\n$$\n",
487
- "text_format": "latex",
488
- "bbox": [
489
- 191,
490
- 343,
491
- 467,
492
- 383
493
- ],
494
- "page_idx": 3
495
- },
496
- {
497
- "type": "text",
498
- "text": "where $\\hat{y}_i$ is the prediction, $y_i$ is the ground truth and $C$ is the number of expression classes.",
499
- "bbox": [
500
- 76,
501
- 393,
502
- 467,
503
- 425
504
- ],
505
- "page_idx": 3
506
- },
507
- {
508
- "type": "text",
509
- "text": "3.2. Facial Region Branch",
510
- "text_level": 1,
511
- "bbox": [
512
- 76,
513
- 434,
514
- 277,
515
- 450
516
- ],
517
- "page_idx": 3
518
- },
519
- {
520
- "type": "text",
521
- "text": "When the face is severely occluded, the landmark detection results may not be accurate. Since relying on the landmark-guided attention branch solely is not enough, OADN utilizes a Facial Region Branch (FRB) to learn useful context information and further increase the robustness.",
522
- "bbox": [
523
- 75,
524
- 457,
525
- 467,
526
- 531
527
- ],
528
- "page_idx": 3
529
- },
530
- {
531
- "type": "text",
532
- "text": "Given the global feature maps $F \\in h \\times w \\times c$ , where $h, w, c$ are the height, width and channel dimensions, we first divide them into small $m \\times n$ non-overlapping blocks. Each facial region feature $F_{i}^{R} \\in m \\times n \\times c, i = 1, \\dots, K$ where $K = \\lceil \\frac{h}{m} \\rceil \\cdot \\lceil \\frac{w}{n} \\rceil$ is then fed into a global average pooling layer to obtain a region-level feature $f_{i}^{R}$ . Afterwards, a fully-connected layer is employed to reduce the dimension of $f_{i}^{R}$ from 2048 to 256. Finally, a softmax layer is applied to each region to obtain a set of predictions $y_{i}^{R}$ , where $i = 1, \\dots, K$ .",
533
- "bbox": [
534
- 75,
535
- 532,
536
- 467,
537
- 681
538
- ],
539
- "page_idx": 3
540
- },
541
- {
542
- "type": "text",
543
- "text": "To train the facial region branch, we minimize the cross-entropy loss over the $K$ regions independently. Formally, the loss is expressed as:",
544
- "bbox": [
545
- 75,
546
- 684,
547
- 467,
548
- 728
549
- ],
550
- "page_idx": 3
551
- },
552
- {
553
- "type": "equation",
554
- "text": "\n$$\nL _ {F R B} = - \\sum_ {i = 1} ^ {C} \\sum_ {j = 1} ^ {K} y _ {i} \\log \\hat {y} _ {i, j} ^ {R} \\tag {4}\n$$\n",
555
- "text_format": "latex",
556
- "bbox": [
557
- 171,
558
- 739,
559
- 467,
560
- 781
561
- ],
562
- "page_idx": 3
563
- },
564
- {
565
- "type": "text",
566
- "text": "where $K$ is the number of facial regions, $\\hat{y}_{i,j}^{R}$ is the prediction of the $j$ th region, and $y_{i}$ is the ground truth expression category.",
567
- "bbox": [
568
- 75,
569
- 794,
570
- 467,
571
- 840
572
- ],
573
- "page_idx": 3
574
- },
575
- {
576
- "type": "text",
577
- "text": "To make an accurate prediction based on facial region only, OADN is required to learn more discriminative and diverse features at a finer-level. As a result, the partial occlusion will have a less effect on the network compared to a",
578
- "bbox": [
579
- 75,
580
- 840,
581
- 467,
582
- 898
583
- ],
584
- "page_idx": 3
585
- },
586
- {
587
- "type": "text",
588
- "text": "standard model. Moreover, the size of the expression recognition dataset is usually not very large. Training multiple region-based classifiers adds more supervision and reduces overfitting.",
589
- "bbox": [
590
- 496,
591
- 90,
592
- 890,
593
- 151
594
- ],
595
- "page_idx": 3
596
- },
597
- {
598
- "type": "text",
599
- "text": "3.3. Relationship between the Two Branches",
600
- "text_level": 1,
601
- "bbox": [
602
- 498,
603
- 162,
604
- 841,
605
- 178
606
- ],
607
- "page_idx": 3
608
- },
609
- {
610
- "type": "text",
611
- "text": "OADN is specifically designed to handle the occlusion problem for in-the-wild facial expression recognition. The landmark-guided attention branch explicitly guides the model to focus on non-occluded facial areas, learning a clean global feature. While the facial region branch promotes part-level features and enables the model to work robustly when the face is largely occluded. Combining the benefits from each branch, we train OADN using the following loss:",
612
- "bbox": [
613
- 496,
614
- 186,
615
- 890,
616
- 321
617
- ],
618
- "page_idx": 3
619
- },
620
- {
621
- "type": "equation",
622
- "text": "\n$$\nL = \\lambda L _ {L A B} + (1 - \\lambda) L _ {F R B} \\tag {5}\n$$\n",
623
- "text_format": "latex",
624
- "bbox": [
625
- 594,
626
- 335,
627
- 890,
628
- 351
629
- ],
630
- "page_idx": 3
631
- },
632
- {
633
- "type": "text",
634
- "text": "where $\\lambda$ is the loss combination weight. $L_{LAB}$ and $L_{FRB}$ are defined in Equation (3) and (4).",
635
- "bbox": [
636
- 496,
637
- 364,
638
- 888,
639
- 393
640
- ],
641
- "page_idx": 3
642
- },
643
- {
644
- "type": "text",
645
- "text": "4. Experiments",
646
- "text_level": 1,
647
- "bbox": [
648
- 498,
649
- 409,
650
- 630,
651
- 425
652
- ],
653
- "page_idx": 3
654
- },
655
- {
656
- "type": "text",
657
- "text": "4.1. Datasets",
658
- "text_level": 1,
659
- "bbox": [
660
- 500,
661
- 435,
662
- 601,
663
- 449
664
- ],
665
- "page_idx": 3
666
- },
667
- {
668
- "type": "text",
669
- "text": "We validate the effectiveness of our method on two largest in-the-wild expression datasets: RAF-DB [16] and AffectNet [25]. The in-the-wild datasets contain facial expression in real world with various poses, illuminations, intensities, and other uncontrolled conditions. We also evaluate our method on three recently proposed real-world occlusion datasets: Occlusion-AffectNet[31], Occlusion-FERPlus [31] and FED-RO [17]. The occlusions are diverse in color, shape, position and occlusion ratio.",
670
- "bbox": [
671
- 496,
672
- 458,
673
- 890,
674
- 593
675
- ],
676
- "page_idx": 3
677
- },
678
- {
679
- "type": "image",
680
- "img_path": "images/9b4901edcfa7a1e2ca4e83b6efe49a12d0ba0dd3d9720c50393a2c24d3a418c1.jpg",
681
- "image_caption": [
682
- "Figure 3. The interest points with confidence scores greater than the threshold $T$ are shown in red points. We can see that occluded facial areas are removed."
683
- ],
684
- "image_footnote": [],
685
- "bbox": [
686
- 501,
687
- 607,
688
- 890,
689
- 672
690
- ],
691
- "page_idx": 3
692
- },
693
- {
694
- "type": "text",
695
- "text": "4.2. Implementation Details",
696
- "text_level": 1,
697
- "bbox": [
698
- 500,
699
- 739,
700
- 715,
701
- 756
702
- ],
703
- "page_idx": 3
704
- },
705
- {
706
- "type": "text",
707
- "text": "Preprocessing. The standard MTCNN [37] is used to detect five face landmarks for all the images. After performing a similarity transformation, we obtain the aligned face images and resize them to be $224 \\times 224$ pixels. To detect landmarks from occluded images, we use SAN [9] pretrained on the 300W dataset [28] to get 68 face landmarks. We also try another landmark detector [4] and similar results are obtained. Then we select 18 points covering eyebrows, eyes, nose and mouth, and recompute eight points",
708
- "bbox": [
709
- 496,
710
- 763,
711
- 890,
712
- 900
713
- ],
714
- "page_idx": 3
715
- },
716
- {
717
- "type": "table",
718
- "img_path": "images/d9bcf45c2dc4750d88d64c576c97a28c1b10866192ced980401d6938ba2746ac.jpg",
719
- "table_caption": [
720
- "Table 1. Test set accuracy on RAF dataset"
721
- ],
722
- "table_footnote": [],
723
- "table_body": "<table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>86.90%</td></tr><tr><td>OADN(ours)</td><td>89.83%</td></tr><tr><td>ResiDen [14]</td><td>76.54%</td></tr><tr><td>ResNet-PL [26]</td><td>81.97%</td></tr><tr><td>PG-CNN [18]</td><td>83.27%</td></tr><tr><td>Center Loss [32]</td><td>83.68%</td></tr><tr><td>DLP-CNN [15]</td><td>84.13%</td></tr><tr><td>ALT [10]</td><td>84.50%</td></tr><tr><td>gACNN [17]</td><td>85.07%</td></tr><tr><td>OADN(ours)</td><td>87.16%</td></tr></table>",
724
- "bbox": [
725
- 138,
726
- 99,
727
- 410,
728
- 273
729
- ],
730
- "page_idx": 4
731
- },
732
- {
733
- "type": "text",
734
- "text": "from facial cheeks. The confidence scores of these recomputed points are the minimum scores of the points used to compute them. In all experiments, we set the threshold $T$ of the confidence score to be 0.6, thus landmarks with confidence scores smaller than 0.6 are removed. Figure 3 shows the computed interest points after thresholding. From it we can see that the occluded facial regions are discarded. Finally, we generate attention maps consisting of a Gaussian with the centers as the coordinates of the visible points. For those occluded points, the attention maps are all zeros. We resize the attention maps to be $14 \\times 14$ to match the size of the global feature maps $F$ .",
735
- "bbox": [
736
- 75,
737
- 309,
738
- 470,
739
- 489
740
- ],
741
- "page_idx": 4
742
- },
743
- {
744
- "type": "text",
745
- "text": "Training and Testing. We employ the ResNet50 as our backbone, removing the average pooling layer and the fully connected layer. We modify the stride of conv4_1 from 2 to 1, so a larger feature map with size $14 \\times 14$ is obtained. We initialize the model with the weights pre-trained on ImageNet [8]. The mini-batch size is set to be 128, the momentum is 0.9, and the weight decay is 0.0005. The learning rate starts at 0.1, and decreased by 10 after 20 epochs. We train the model for a total of 60 epochs. Stochastic Gradient Descent (SGD) is adopted as the optimization algorithm. During training, only random flipping is used for data augmentation. For testing, a single image is used and the predication scores from the landmark-guided attention branch and the facial region branch are averaged to get the final prediction score. The settings are same for all the experiments. For evaluation, the total accuracy metric is adopted. Considering the imbalance of the expression classes, a confusion matrix is also employed to show the average class accuracy. The deep learning software Pytorch [27] is used to conduct the experiments. Upon publication, the codes and trained expression models will be made publicly available.",
746
- "bbox": [
747
- 75,
748
- 489,
749
- 468,
750
- 808
751
- ],
752
- "page_idx": 4
753
- },
754
- {
755
- "type": "text",
756
- "text": "4.3. Results Comparison",
757
- "text_level": 1,
758
- "bbox": [
759
- 76,
760
- 816,
761
- 269,
762
- 833
763
- ],
764
- "page_idx": 4
765
- },
766
- {
767
- "type": "text",
768
- "text": "RAF [16] contains 30,000 in-the-wild facial expression images, annotated with basic or compound expressions by forty independent human labelers. In this experiment, only images with seven basic expressions are used, including",
769
- "bbox": [
770
- 75,
771
- 839,
772
- 470,
773
- 902
774
- ],
775
- "page_idx": 4
776
- },
777
- {
778
- "type": "table",
779
- "img_path": "images/84e6d1a2e3fea9a00c592eecc51c3c9fa436c745fcea681a3478a394dd9ae102.jpg",
780
- "table_caption": [
781
- "Table 2. Validation set accuracy on AffectNet dataset"
782
- ],
783
- "table_footnote": [],
784
- "table_body": "<table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>59.50%</td></tr><tr><td>OADN(ours)</td><td>64.06%</td></tr><tr><td>VGG16 [29]</td><td>51.11%</td></tr><tr><td>GAN-Inpainting [34]</td><td>52.97%</td></tr><tr><td>DLP-CNN [16]</td><td>54.47%</td></tr><tr><td>PG-CNN [18]</td><td>55.33%</td></tr><tr><td>ResNet-PL [26]</td><td>56.42%</td></tr><tr><td>gACNN [17]</td><td>58.78%</td></tr><tr><td>OADN(ours)</td><td>61.89%</td></tr></table>",
785
- "bbox": [
786
- 544,
787
- 99,
788
- 848,
789
- 258
790
- ],
791
- "page_idx": 4
792
- },
793
- {
794
- "type": "text",
795
- "text": "12,271 for training and 3,068 for testing.",
796
- "bbox": [
797
- 500,
798
- 295,
799
- 769,
800
- 310
801
- ],
802
- "page_idx": 4
803
- },
804
- {
805
- "type": "text",
806
- "text": "Table 1 shows the results of our method and previous works. Our OADN achieves $87.16\\%$ in terms of total accuracy on the test set, outperforming all the previous methods. Compared with the strongest competing method in the same setting gACNN [17], OADN surpasses it by $2.1\\%$ . This is because OADN explicitly utilizes the meta information of landmarks to depress the noisy information from the occluded regions and enhances the robustness with multiple region-based classifiers. To have a fair comparison with [31], we also pre-trained our model on a large-scale face recognition dataset VGGFace2 [7]. OADN achieves a new state-of-the-art result with an accuracy of $89.83\\%$ to the best of our knowledge, outperforming RAN by $2.93\\%$ . This validates the superiority of the proposed method.",
807
- "bbox": [
808
- 496,
809
- 311,
810
- 890,
811
- 523
812
- ],
813
- "page_idx": 4
814
- },
815
- {
816
- "type": "image",
817
- "img_path": "images/c6f240893a6796b93dc549574ae64107e182416e57a519fcd99862dca836f875.jpg",
818
- "image_caption": [
819
- "Figure 4. Confusion matrix for RAF-DB dataset. The darker the color, the higher the accuracy."
820
- ],
821
- "image_footnote": [],
822
- "bbox": [
823
- 513,
824
- 536,
825
- 875,
826
- 806
827
- ],
828
- "page_idx": 4
829
- },
830
- {
831
- "type": "text",
832
- "text": "We show the confusion matrix in Figure 4. It is observed that Fear and Disgust are the two most confusing expression, where Fear is easily confused with Surprise because",
833
- "bbox": [
834
- 496,
835
- 854,
836
- 890,
837
- 900
838
- ],
839
- "page_idx": 4
840
- },
841
- {
842
- "type": "table",
843
- "img_path": "images/a114a110d863e822034c0b680d17f7574f91c55ed77843a56f9c323aefc6c3a2.jpg",
844
- "table_caption": [
845
- "Table 3. Validation set accuracy on Occlusion-AffectNet and Pose-AffectNet dataset"
846
- ],
847
- "table_footnote": [],
848
- "table_body": "<table><tr><td>Method</td><td>Occ. Acc.</td><td>Pose&gt;30 Acc.</td><td>Pose&gt;45 Acc.</td></tr><tr><td>RAN [31]</td><td>58.50%</td><td>53.90%</td><td>53.19%</td></tr><tr><td>OADN(ours)</td><td>64.02%</td><td>61.12%</td><td>61.08%</td></tr></table>",
849
- "bbox": [
850
- 78,
851
- 138,
852
- 504,
853
- 191
854
- ],
855
- "page_idx": 5
856
- },
857
- {
858
- "type": "text",
859
- "text": "of similar facial appearance while Disgust is mainly confused by Neutral due to the subtleness of the expression.",
860
- "bbox": [
861
- 76,
862
- 231,
863
- 468,
864
- 261
865
- ],
866
- "page_idx": 5
867
- },
868
- {
869
- "type": "image",
870
- "img_path": "images/9372ee4566d1fd1ed3d1cd150413fa463037b882f8c2e60391928dde4e6e69d1.jpg",
871
- "image_caption": [
872
- "Figure 5. Confusion matrix for Affectnet dataset. The darker the color, the higher the accuracy."
873
- ],
874
- "image_footnote": [],
875
- "bbox": [
876
- 94,
877
- 277,
878
- 452,
879
- 547
880
- ],
881
- "page_idx": 5
882
- },
883
- {
884
- "type": "text",
885
- "text": "AffectNet [25] is currently the largest expression dataset. There are about 400,000 images manually annotated with seven discrete facial expressions and the intensity of valence and arousal. Following the experiment setting in [17], we only used images with neutral and six basic emotions, containing 280,000 images for training and 3,500 images from the validation set for testing since the test set is not publicly available. Very recently, Wang et al. [31] released the Occlusion-AffectNet and Pose-AffectNet datasets where only images with challenging conditions are selected as the test sets. For the Occlusion-Affectnet, each image is occluded with at least one type of occlusion: wearing mask, wearing glasses, etc. There are a total of 682 images. For the Pose-AffectNet, images with pose degrees larger than 30 and 45 are collected. The number of images are 1,949 and 985, respectively.",
886
- "bbox": [
887
- 75,
888
- 597,
889
- 468,
890
- 838
891
- ],
892
- "page_idx": 5
893
- },
894
- {
895
- "type": "text",
896
- "text": "As shown in Table 3, OADN achieves the best performance with an accuracy of $61.89\\%$ on the validation set. Compared to the strongest competing method in the same setting gACNN [17], OADN surpasses it by $3.1\\%$ , which",
897
- "bbox": [
898
- 75,
899
- 839,
900
- 468,
901
- 901
902
- ],
903
- "page_idx": 5
904
- },
905
- {
906
- "type": "table",
907
- "img_path": "images/2d54a89f96bbba81ee72afe1614ae9b02106163733488b810583c167176f21ea.jpg",
908
- "table_caption": [
909
- "Table 4. Test set accuracy on FED-RO dataset"
910
- ],
911
- "table_footnote": [],
912
- "table_body": "<table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>67.98%</td></tr><tr><td>OADN(ours)</td><td>71.17%</td></tr><tr><td>VGG16 [29]</td><td>51.11%</td></tr><tr><td>ResNet18 [13]</td><td>64.25%</td></tr><tr><td>GAN-Inpainting [34]</td><td>58.33%</td></tr><tr><td>DLP-CNN [16]</td><td>60.31%</td></tr><tr><td>PG-CNN [18]</td><td>64.25%</td></tr><tr><td>gACNN [17]</td><td>66.50%</td></tr><tr><td>OADN(ours)</td><td>68.11%</td></tr></table>",
913
- "bbox": [
914
- 545,
915
- 99,
916
- 846,
917
- 258
918
- ],
919
- "page_idx": 5
920
- },
921
- {
922
- "type": "text",
923
- "text": "is a reasonable improvement. OADN also outperforms RAN [31] by $4.56\\%$ , when both are pre-trained on a large-scale face recognition dataset. On the Occlusion-AffectNet and Pose-AffectNet datasets, the performance gap between OADN and RAN is further increased. As a comparison, OADN exceeds RAN by $5.52\\%$ , $7.22\\%$ and $7.89\\%$ on the test sets with occlusion, pose degree greater than 30 and 45, respectively. This validates the effectiveness of the proposed method for the occluded facial expression recognition problem. The confusion matrix is shown in Figure 5. From it we can find both Disgust and Anger are the most difficult expressions to classify.",
924
- "bbox": [
925
- 496,
926
- 296,
927
- 890,
928
- 476
929
- ],
930
- "page_idx": 5
931
- },
932
- {
933
- "type": "text",
934
- "text": "FED-RO [17] is a recently released facial expression dataset with real world occlusions. Each image has natural occlusions including sunglasses, medical mask, hands and hair. It contains 400 images labeled with seven expressions for testing. We train our model on the joint training data of RAF and AffectNet, following the protocol suggested in [17].",
935
- "bbox": [
936
- 496,
937
- 477,
938
- 890,
939
- 582
940
- ],
941
- "page_idx": 5
942
- },
943
- {
944
- "type": "text",
945
- "text": "As shown in Table 4, OADN achieves the best performance with an accuracy of $68.11\\%$ , improving over gACNN by $1.61\\%$ . OADN also outperforms RAN by $3.19\\%$ when pre-trained on a face recognition dataset. From the confusion matrix shown in Figure 6, we can see both Surprise and Happy have high accuracy, while Fear and Disgust are easily confused with Surprise and Sad.",
946
- "bbox": [
947
- 496,
948
- 583,
949
- 890,
950
- 688
951
- ],
952
- "page_idx": 5
953
- },
954
- {
955
- "type": "text",
956
- "text": "FERPlus [2] is a real-world facial expression dataset initially introduced in ICML 2013 Challenge [12]. It consists of 28,709 training images, 3,589 validation images and 3,589 test images. Each image is labeled with one of the eight expressions by 10 independent taggers. Recently, Wang et al. [31] released the Occlusion-FERPlus and Pose-FERPlus datasets, where images under occlusion and large pose ( $>30$ and $>45$ ) are collected from the FERPlus test sets. The Occlusion-FERPlus has a total number of 605 images, while Pose-FERPlus has 1,171 and 634 images with pose larger than 30 and 45 degrees, respectively. Following [31], we trained our model on the training data of FERPlus and test on these challenging datasets.",
957
- "bbox": [
958
- 496,
959
- 688,
960
- 890,
961
- 883
962
- ],
963
- "page_idx": 5
964
- },
965
- {
966
- "type": "text",
967
- "text": "Table 5 reports the test accuracy. The OADN sig",
968
- "bbox": [
969
- 517,
970
- 885,
971
- 890,
972
- 901
973
- ],
974
- "page_idx": 5
975
- },
976
- {
977
- "type": "image",
978
- "img_path": "images/d8f3bcd34e5663c23177191284fb51262210fbf408bb65a694e0aea4bfaf2a04.jpg",
979
- "image_caption": [
980
- "Figure 6. Confusion matrix for FED-RO dataset. The darker the color, the higher the accuracy."
981
- ],
982
- "image_footnote": [],
983
- "bbox": [
984
- 89,
985
- 89,
986
- 450,
987
- 361
988
- ],
989
- "page_idx": 6
990
- },
991
- {
992
- "type": "table",
993
- "img_path": "images/f66cb8867c5267fc95da1f55b6aa0340c0e3007f3fa8e5ad538f2bcc1f26f504.jpg",
994
- "table_caption": [
995
- "Table 5. Test set accuracy on Occlusion-FERPlus and Pose-FERPlus dataset"
996
- ],
997
- "table_footnote": [],
998
- "table_body": "<table><tr><td>Method</td><td>Occ. Acc.</td><td>Pose&gt;30 Acc.</td><td>Pose&gt;45 Acc.</td></tr><tr><td>RAN [31]</td><td>83.63%</td><td>82.23%</td><td>80.40%</td></tr><tr><td>OADN(ours)</td><td>84.57%</td><td>88.52%</td><td>87.50%</td></tr></table>",
999
- "bbox": [
1000
- 76,
1001
- 455,
1002
- 500,
1003
- 508
1004
- ],
1005
- "page_idx": 6
1006
- },
1007
- {
1008
- "type": "text",
1009
- "text": "nificantly surpasses RAN by a large margin with $6.29\\%$ and $7.10\\%$ improvements on the Pose-FERPlus datasets. OADN also achieves better performance on the Occlusion-FERPlus dataset. This validates the effectiveness of our method for recognizing facial expressions under challenging conditions.",
1010
- "bbox": [
1011
- 75,
1012
- 547,
1013
- 468,
1014
- 638
1015
- ],
1016
- "page_idx": 6
1017
- },
1018
- {
1019
- "type": "text",
1020
- "text": "4.4. Ablation Study",
1021
- "text_level": 1,
1022
- "bbox": [
1023
- 76,
1024
- 648,
1025
- 230,
1026
- 666
1027
- ],
1028
- "page_idx": 6
1029
- },
1030
- {
1031
- "type": "text",
1032
- "text": "In this section, we conduct ablation studies on the RAF dataset to analyze each component of OADN.",
1033
- "bbox": [
1034
- 75,
1035
- 672,
1036
- 468,
1037
- 702
1038
- ],
1039
- "page_idx": 6
1040
- },
1041
- {
1042
- "type": "text",
1043
- "text": "The impact of the landmark confidence threshold $T$ . The confidence scores of the interest points are utilized to select the interest points from non-occluded facial areas. From Equation (1), points with confidence scores higher than $T$ are kept. We can see from Figure 7 (a) that with $T = 0.6$ , OADN achieves the best performance. When $T$ is further increased, the performance drops quickly since some important facial areas which may not be occluded are also thrown away. On the other hand, when $T$ becomes less than 0.6, OADN starts to perform worse. This is because noisy information from the occluded areas are also included, which degrades the clean features.",
1044
- "bbox": [
1045
- 75,
1046
- 703,
1047
- 470,
1048
- 883
1049
- ],
1050
- "page_idx": 6
1051
- },
1052
- {
1053
- "type": "text",
1054
- "text": "The impact of the number of regions $K$ . In the facial",
1055
- "bbox": [
1056
- 96,
1057
- 885,
1058
- 468,
1059
- 901
1060
- ],
1061
- "page_idx": 6
1062
- },
1063
- {
1064
- "type": "image",
1065
- "img_path": "images/b7475d30cf4d387aacd8ac721b475389e30f4be18dfeed10ef002a0b259741ec.jpg",
1066
- "image_caption": [
1067
- "Figure 7. The impacts of the confidence threshold $T$ , number of regions $K$ and the loss combination weight $\\lambda$ on the performance of OADN."
1068
- ],
1069
- "image_footnote": [],
1070
- "bbox": [
1071
- 504,
1072
- 88,
1073
- 632,
1074
- 172
1075
- ],
1076
- "page_idx": 6
1077
- },
1078
- {
1079
- "type": "image",
1080
- "img_path": "images/ac689a519a42f4017274d0ba7ceac4dcdc87f6fd078ee448ecaa1e2287776b26.jpg",
1081
- "image_caption": [],
1082
- "image_footnote": [],
1083
- "bbox": [
1084
- 640,
1085
- 88,
1086
- 771,
1087
- 172
1088
- ],
1089
- "page_idx": 6
1090
- },
1091
- {
1092
- "type": "image",
1093
- "img_path": "images/b7af8bddaec2afc2f3f8d609bd4633f5b368144be64f1e214d6d4a7d36a8a453.jpg",
1094
- "image_caption": [],
1095
- "image_footnote": [],
1096
- "bbox": [
1097
- 781,
1098
- 88,
1099
- 908,
1100
- 172
1101
- ],
1102
- "page_idx": 6
1103
- },
1104
- {
1105
- "type": "text",
1106
- "text": "region branch, we partition the global feature maps into $K$ blocks and train an expression classifier from each block independently. So $K$ decides the granularity of the part-level features. From Figure 7 (b), it is observed that the best accuracy is achieved at $K = 4$ . When $K = 1$ , the facial region branch equals to the standard ResNet50 classifier. The worse performance indicates the necessity to learn features at part-level. However, increasing $K$ to be a large number like 16 does not bring further increasement. This is because when the facial region is too small, it lacks enough information to make the prediction due to the occlusion. Thus the classifiers are confused and the training is stagnated.",
1107
- "bbox": [
1108
- 496,
1109
- 247,
1110
- 890,
1111
- 429
1112
- ],
1113
- "page_idx": 6
1114
- },
1115
- {
1116
- "type": "text",
1117
- "text": "The impact of the loss combination weight $\\lambda$ . To train OADN, we jointly optimize the loss from the landmark-guided attention branch (LAB) and the facial region branch (FRB) as defined in Equation (5). The loss weight $\\lambda$ controls the relative importance of each loss. When $\\lambda$ equals 1, only LAB is utilized. While $\\lambda = 0$ means only FRB is used. From Figure 7 (c), we can find that LAB obtains better performance since the network is guided to attend to the most discriminative facial areas. While combining the two branches achieves better performance than using either one branch alone. This validates the effectiveness of the complementary features learned by the two branches.",
1118
- "bbox": [
1119
- 496,
1120
- 431,
1121
- 892,
1122
- 613
1123
- ],
1124
- "page_idx": 6
1125
- },
1126
- {
1127
- "type": "text",
1128
- "text": "4.5. Visualization",
1129
- "text_level": 1,
1130
- "bbox": [
1131
- 500,
1132
- 628,
1133
- 637,
1134
- 643
1135
- ],
1136
- "page_idx": 6
1137
- },
1138
- {
1139
- "type": "text",
1140
- "text": "Figure 8 shows some expression recognition examples of the gACNN [17] and our OADN method on the FEDRO dataset. The classification results show that gACNN is vulnerable to large head poses and heavy facial occlusions. On the contrary, the OADN can work successfully under these conditions.",
1141
- "bbox": [
1142
- 496,
1143
- 655,
1144
- 890,
1145
- 744
1146
- ],
1147
- "page_idx": 6
1148
- },
1149
- {
1150
- "type": "image",
1151
- "img_path": "images/04e3dab619a8e5a2046a56eb56b1d1e6c0eaf03c0ba0d7ddd77b2fa9c8469ea8.jpg",
1152
- "image_caption": [
1153
- "Figure 8. Comparison of the gACNN method and our OADN method on the FED-RO dataset. Red and green texts indicate the error and correct predictions."
1154
- ],
1155
- "image_footnote": [],
1156
- "bbox": [
1157
- 511,
1158
- 762,
1159
- 880,
1160
- 835
1161
- ],
1162
- "page_idx": 6
1163
- },
1164
- {
1165
- "type": "text",
1166
- "text": "5. Conclusions",
1167
- "text_level": 1,
1168
- "bbox": [
1169
- 76,
1170
- 89,
1171
- 204,
1172
- 106
1173
- ],
1174
- "page_idx": 7
1175
- },
1176
- {
1177
- "type": "text",
1178
- "text": "In this paper, we present an occlusion-adaptive deep network to tackle the occluded facial expression recognition problem. The network is composed of two branches: the landmark-guided attention branch guides the network to learn clean features from the non-occluded facial areas. The facial region branch increases the robustness by dividing the last convolutional layer into several part classifiers. We conduct extensive experiments on both challenging in-the-wild expression datasets and real-world occluded expression datasets. The results show that our method outperforms existing methods and achieves robustness against occlusion and various poses.",
1179
- "bbox": [
1180
- 75,
1181
- 114,
1182
- 472,
1183
- 297
1184
- ],
1185
- "page_idx": 7
1186
- },
1187
- {
1188
- "type": "text",
1189
- "text": "6. Acknowledgement",
1190
- "text_level": 1,
1191
- "bbox": [
1192
- 76,
1193
- 308,
1194
- 256,
1195
- 325
1196
- ],
1197
- "page_idx": 7
1198
- },
1199
- {
1200
- "type": "text",
1201
- "text": "This research is based upon work supported by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via IARPA R&D Contract No. 2019-022600002. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies or endorsements, either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes notwithstanding any copyright annotation thereon.",
1202
- "bbox": [
1203
- 75,
1204
- 333,
1205
- 472,
1206
- 501
1207
- ],
1208
- "page_idx": 7
1209
- },
1210
- {
1211
- "type": "text",
1212
- "text": "References",
1213
- "text_level": 1,
1214
- "bbox": [
1215
- 76,
1216
- 511,
1217
- 174,
1218
- 527
1219
- ],
1220
- "page_idx": 7
1221
- },
1222
- {
1223
- "type": "list",
1224
- "sub_type": "ref_text",
1225
- "list_items": [
1226
- "[1] D. Acharya, Z. Huang, D. Pani Paudel, and L. Van Gool. Covariance pooling for facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 367-374, 2018.",
1227
- "[2] E. Barsoum, C. Zhang, C. C. Ferrer, and Z. Zhang. Training deep networks for facial expression recognition with crowdsourced label distribution. In Proceedings of the 18th ACM International Conference on Multimodal Interaction, pages 279-283, 2016.",
1228
- "[3] B. Bozortabar, M. S. Rad, H. K. Ekenel, and J.-P. Thiran. Using photorealistic face synthesis and domain adaptation to improve facial expression analysis. In 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pages 1-8. IEEE, 2019.",
1229
- "[4] A. Bulat and G. Tzimiropoulos. How far are we from solving the 2D & 3D face alignment problem? (and a dataset of 230,000 3D facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 1021-1030, 2017.",
1230
- "[5] J. Cai, Z. Meng, A. S. Khan, Z. Li, J. O'Reilly, and Y. Tong. Identity-free facial expression recognition using conditional generative adversarial network. arXiv preprint arXiv:1903.08051, 2019.",
1231
- "[6] J. Cai, Z. Meng, A. S. Khan, Z. Li, J. OReilly, and Y. Tong. Island loss for learning discriminative features in facial ex"
1232
- ],
1233
- "bbox": [
1234
- 84,
1235
- 536,
1236
- 470,
1237
- 900
1238
- ],
1239
- "page_idx": 7
1240
- },
1241
- {
1242
- "type": "list",
1243
- "sub_type": "ref_text",
1244
- "list_items": [
1245
- "pression recognition. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 302-309. IEEE, 2018.",
1246
- "[7] Q. Cao, L. Shen, W. Xie, O. M. Parkhi, and A. Zisserman. Vggface2: A dataset for recognising faces across pose and age. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 67-74. IEEE, 2018.",
1247
- "[8] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. Imagenet: A large-scale hierarchical image database. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255. IEEE, 2009.",
1248
- "[9] X. Dong, Y. Yan, W. Ouyang, and Y. Yang. Style aggregated network for facial landmark detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 379-388, 2018.",
1249
- "[10] C. Florea, L. Florea, M. Badea, C. Vertan, and A. Racoviteanu. Annealed label transfer for face expression recognition. In British Machine Vision Conference (BMVC), 2019.",
1250
- "[11] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, pages 2672–2680, 2014.",
1251
- "[12] I. J. Goodfellow, D. Erhan, P. L. Carrier, A. Courville, M. Mirza, B. Hammer, W. Cukierski, Y. Tang, D. Thaler, D.-H. Lee, et al. Challenges in representation learning: A report on three machine learning contests. In International Conference on Neural Information Processing, pages 117-124. Springer, 2013.",
1252
- "[13] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016.",
1253
- "[14] S. Jyoti, G. Sharma, and A. Dhall. Expression empowered resident network for facial action unit detection. In 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pages 1-8. IEEE, 2019.",
1254
- "[15] S. Li and W. Deng. Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Transactions on Image Processing, 28(1):356-370, 2018.",
1255
- "[16] S. Li, W. Deng, and J. Du. Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2852-2861, 2017.",
1256
- "[17] Y. Li, J. Zeng, S. Shan, and X. Chen. Occlusion aware facial expression recognition using CNN with attention mechanism. IEEE Transactions on Image Processing, 28(5):2439-2450, 2018.",
1257
- "[18] Y. Li, J. Zeng, S. Shan, and X. Chen. Patch-gated CNN for occlusion-aware facial expression recognition. In 2018 24th International Conference on Pattern Recognition (ICPR), pages 2209-2214. IEEE, 2018.",
1258
- "[19] M. Liu, S. Li, S. Shan, and X. Chen. AU-aware deep networks for facial expression recognition. In IEEE Interna"
1259
- ],
1260
- "bbox": [
1261
- 503,
1262
- 92,
1263
- 890,
1264
- 900
1265
- ],
1266
- "page_idx": 7
1267
- },
1268
- {
1269
- "type": "list",
1270
- "sub_type": "ref_text",
1271
- "list_items": [
1272
- "tional Conference on Automatic Face & Gesture Recognition Workshops, pages 1-6, 2013.",
1273
- "[20] M. Liu, S. Shan, R. Wang, and X. Chen. Learning expressionlets on spatio-temporal manifold for dynamic facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1749-1756, 2014.",
1274
- "[21] P. Liu, S. Han, Z. Meng, and Y. Tong. Facial expression recognition via a boosted deep belief network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1805-1812, 2014.",
1275
- "[22] P. Liu, J. T. Zhou, I. W.-H. Tsang, Z. Meng, S. Han, and Y. Tong. Feature disentangling machine-a novel approach of feature selection and disentangling in facial expression analysis. In European Conference on Computer Vision (ECCV), pages 151-166. 2014.",
1276
- "[23] P. Lucey, J. F. Cohn, T. Kanade, J. Saragih, Z. Ambadar, and I. Matthews. The extended cohn-kanade dataset (CK+): A complete dataset for action unit and emotion-specified expression. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 94–101. IEEE, 2010.",
1277
- "[24] Z. Luo, J. Hu, and W. Deng. Local subclass constraint for facial expression recognition in the wild. In 2018 24th International Conference on Pattern Recognition (ICPR), pages 3132-3137. IEEE, 2018.",
1278
- "[25] A. Mollahosseini, B. Hasani, and M. H. Mahoor. Affectnet: A database for facial expression, valence, and arousal computing in the wild. IEEE Transactions on Affective Computing, 10(1):18-31, 2017.",
1279
- "[26] B. Pan, S. Wang, and B. Xia. Occluded facial expression recognition enhanced through privileged information. In Proceedings of the 27th ACM International Conference on Multimedia, pages 566-573, 2019.",
1280
- "[27] A. Paszke, S. Gross, F. Massa, A. Lerer, J. Bradbury, G. Chanan, T. Killeen, Z. Lin, N. Gimelshein, L. Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, pages 8024-8035, 2019.",
1281
- "[28] C. Sagonas, G. Tzimiropoulos, S. Zafeiriou, and M. Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCVW), pages 397-403, 2013.",
1282
- "[29] K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. International Conference on Learning Representations (ICLR), 2015.",
1283
- "[30] M. Valstar and M. Pantic. Induced disgust, happiness and surprise: an addition to the MMI facial expression database. In Proc. 3rd Intern. Workshop on EMOTION (satellite of LREC): Corpora for Research on Emotion and Affect, page 65. Paris, France, 2010.",
1284
- "[31] K. Wang, X. Peng, J. Yang, D. Meng, and Y. Qiao. Region attention networks for pose and occlusion robust facial expression recognition. IEEE Transactions on Image Processing, 2020."
1285
- ],
1286
- "bbox": [
1287
- 78,
1288
- 92,
1289
- 468,
1290
- 898
1291
- ],
1292
- "page_idx": 8
1293
- },
1294
- {
1295
- "type": "list",
1296
- "sub_type": "ref_text",
1297
- "list_items": [
1298
- "[32] Y. Wen, K. Zhang, Z. Li, and Y. Qiao. A discriminative feature learning approach for deep face recognition. In European Conference on Computer Vision (ECCV), pages 499-515. Springer, 2016.",
1299
- "[33] H. Yang, U. Ciftci, and L. Yin. Facial expression recognition by de-expression residue learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2168-2177, 2018.",
1300
- "[34] J. Yu, Z. Lin, J. Yang, X. Shen, X. Lu, and T. S. Huang. Generative image inpainting with contextual attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5505-5514, 2018.",
1301
- "[35] J. Zeng, S. Shan, and X. Chen. Facial expression recognition with inconsistently annotated datasets. In Proceedings of the European Conference on Computer Vision (ECCV), pages 222-237, 2018.",
1302
- "[36] F. Zhang, T. Zhang, Q. Mao, and C. Xu. Joint pose and expression modeling for facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3359-3368, 2018.",
1303
- "[37] K. Zhang, Z. Zhang, Z. Li, and Y. Qiao. Joint face detection and alignment using multitask cascaded convolutional networks. IEEE Signal Processing Letters, 23(10):1499-1503, 2016.",
1304
- "[38] G. Zhao, X. Huang, M. Taini, S. Z. Li, and M. PietikäInen. Facial expression recognition from near-infrared videos. Image and Vision Computing, 29(9):607-619, 2011.",
1305
- "[39] L. Zhong, Q. Liu, P. Yang, B. Liu, J. Huang, and D. N. Metaxas. Learning active facial patches for expression analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2562-2569, 2012."
1306
- ],
1307
- "bbox": [
1308
- 501,
1309
- 92,
1310
- 890,
1311
- 542
1312
- ],
1313
- "page_idx": 8
1314
- }
1315
- ]
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89286d7510cd3ede32a8d134b5ae19eee0087198fc8ac3289a62513663e818af
3
+ size 63298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_06xxx/2005.06040/56b116d0-0e46-408a-9475-7cd9f859a9ca_model.json CHANGED
@@ -1,1846 +1,3 @@
1
- [
2
- [
3
- {
4
- "type": "aside_text",
5
- "bbox": [
6
- 0.023,
7
- 0.256,
8
- 0.058,
9
- 0.708
10
- ],
11
- "angle": 270,
12
- "content": "arXiv:2005.06040v1 [cs.CV] 12 May 2020"
13
- },
14
- {
15
- "type": "title",
16
- "bbox": [
17
- 0.1,
18
- 0.13,
19
- 0.872,
20
- 0.153
21
- ],
22
- "angle": 0,
23
- "content": "Occlusion-Adaptive Deep Network for Robust Facial Expression Recognition"
24
- },
25
- {
26
- "type": "text",
27
- "bbox": [
28
- 0.31,
29
- 0.194,
30
- 0.66,
31
- 0.213
32
- ],
33
- "angle": 0,
34
- "content": "Hui Ding, Peng Zhou, and Rama Chellappa"
35
- },
36
- {
37
- "type": "text",
38
- "bbox": [
39
- 0.336,
40
- 0.224,
41
- 0.634,
42
- 0.243
43
- ],
44
- "angle": 0,
45
- "content": "University of Maryland, College Park"
46
- },
47
- {
48
- "type": "title",
49
- "bbox": [
50
- 0.236,
51
- 0.287,
52
- 0.312,
53
- 0.303
54
- ],
55
- "angle": 0,
56
- "content": "Abstract"
57
- },
58
- {
59
- "type": "text",
60
- "bbox": [
61
- 0.079,
62
- 0.32,
63
- 0.47,
64
- 0.635
65
- ],
66
- "angle": 0,
67
- "content": "Recognizing the expressions of partially occluded faces is a challenging computer vision problem. Previous expression recognition methods, either overlooked this issue or resolved it using extreme assumptions. Motivated by the fact that the human visual system is adept at ignoring the occlusion and focus on non-occluded facial areas, we propose a landmark-guided attention branch to find and discard corrupted features from occluded regions so that they are not used for recognition. An attention map is first generated to indicate if a specific facial part is occluded and guide our model to attend to non-occluded regions. To further improve robustness, we propose a facial region branch to partition the feature maps into non-overlapping facial blocks and task each block to predict the expression independently. This results in more diverse and discriminative features, enabling the expression recognition system to recover even though the face is partially occluded. Depending on the synergistic effects of the two branches, our occlusion-adaptive deep network significantly outperforms state-of-the-art methods on two challenging in-the-wild benchmark datasets and three real-world occluded expression datasets."
68
- },
69
- {
70
- "type": "title",
71
- "bbox": [
72
- 0.081,
73
- 0.665,
74
- 0.208,
75
- 0.68
76
- ],
77
- "angle": 0,
78
- "content": "1. Introduction"
79
- },
80
- {
81
- "type": "text",
82
- "bbox": [
83
- 0.08,
84
- 0.69,
85
- 0.468,
86
- 0.779
87
- ],
88
- "angle": 0,
89
- "content": "Facial expressions play an important role in social communication in our daily life. In recent years, automatically recognizing expression has received increasing attention due to its wide applications, including driver safety, health care, video conferencing, virtual reality, and cognitive science etc."
90
- },
91
- {
92
- "type": "text",
93
- "bbox": [
94
- 0.08,
95
- 0.781,
96
- 0.468,
97
- 0.9
98
- ],
99
- "angle": 0,
100
- "content": "Existing methods that address expression recognition can be divided into two categories. One category utilized synthesis techniques to facilitate discriminative feature learning [33, 36, 3, 5]; While the other tried to boost the performance by designing new loss functions or network architectures [16, 35, 6, 1]. In the first category, de-expression residue learning proposed in [33] leveraged the neutral face images to distill the expression information from the cor"
101
- },
102
- {
103
- "type": "text",
104
- "bbox": [
105
- 0.503,
106
- 0.289,
107
- 0.891,
108
- 0.621
109
- ],
110
- "angle": 0,
111
- "content": "responding expressive images. Zhang et al. [36] explored an adversarial autoencoder to generate facial images with different expressions under arbitrary poses to enlarge the training set. However, those works mainly focus on lab-collected datasets captured in controlled environments, such as \\(\\mathrm{CK + }\\) [23], MMI [30] and OULU-CASIA [38]. Although high accuracy results have been obtained on these datasets, they perform poorly when recognizing facial expressions in-the-wild. In the second category, Li et al. [16] proposed a locality preserving loss to enhance deep features by preserving the locality closeness measure while maximizing the inter-class scatters. To address the annotation inconsistencies among different facial expression datasets, Zeng et al. [35] introduced a probability transition layer to recover the latent truths from noisy labels. Although expression datasets under natural and uncontrollable conditions are explored, facial expression recognition under partial occlusions is still a challenging problem that has been relatively unexplored. In real-life images or videos, facial occlusions can often be observed, e.g., facial accessories including sunglasses, scarves, and masks or other random objects like hands, hairs and cups."
112
- },
113
- {
114
- "type": "text",
115
- "bbox": [
116
- 0.504,
117
- 0.625,
118
- 0.891,
119
- 0.835
120
- ],
121
- "angle": 0,
122
- "content": "Recently, some related works have been proposed to solve this challenge. Patch-gated Convolutional Neural Network [18] decomposed a face into different patches and explicitly predicted the occlusion likelihood of the corresponding patch using a patch-gated unit. Wang et al. [31] proposed a self-attention scheme to learn the importance weights for multiple facial regions. However, the unobstructed scores are learned without any ground truth on the occlusion information and may be biased. In this work, we present an Occlusion-Adaptive Deep Network (OADN) to overcome the occlusion problem for robust facial expression recognition in-the-wild. It consists of two branches: a landmark-guided attention branch and a facial region branch."
123
- },
124
- {
125
- "type": "text",
126
- "bbox": [
127
- 0.504,
128
- 0.841,
129
- 0.891,
130
- 0.9
131
- ],
132
- "angle": 0,
133
- "content": "The landmark-guided attention branch is proposed to discard feature elements that have been corrupted by occlusions. The interest points covering the most distinctive facial areas for facial expression recognition are computed"
134
- }
135
- ],
136
- [
137
- {
138
- "type": "text",
139
- "bbox": [
140
- 0.076,
141
- 0.092,
142
- 0.468,
143
- 0.166
144
- ],
145
- "angle": 0,
146
- "content": "based on the domain knowledge. Then the meta information of these points is utilized to generate the attention maps. The global features are modulated by the attention maps to guide the model to focus on the non-occluded facial regions and filter out the occluded regions."
147
- },
148
- {
149
- "type": "text",
150
- "bbox": [
151
- 0.076,
152
- 0.168,
153
- 0.468,
154
- 0.346
155
- ],
156
- "angle": 0,
157
- "content": "To further enhance robustness and learn complementary context information, we introduce a facial region branch to train multiple region-based expression classifiers. This is achieved by first partitioning the global feature maps into non-overlapping facial blocks. Then each block is trained by backpropogating the recognition loss independently. Thus even when the face is partially occluded, the classifiers from other non-occluded regions are still able to function properly. Furthermore, since the expression datasets are usually small, having multiple region-based classifiers adds more supervision and acts as a regularizer to alleviate the overfitting issue."
158
- },
159
- {
160
- "type": "text",
161
- "bbox": [
162
- 0.077,
163
- 0.348,
164
- 0.468,
165
- 0.378
166
- ],
167
- "angle": 0,
168
- "content": "The main contributions in this work are summarized as follows:"
169
- },
170
- {
171
- "type": "text",
172
- "bbox": [
173
- 0.094,
174
- 0.388,
175
- 0.468,
176
- 0.432
177
- ],
178
- "angle": 0,
179
- "content": "- We propose OADN, an effective method to deal with the occlusion problem for facial expression recognition in-the-wild."
180
- },
181
- {
182
- "type": "text",
183
- "bbox": [
184
- 0.094,
185
- 0.443,
186
- 0.468,
187
- 0.487
188
- ],
189
- "angle": 0,
190
- "content": "- We introduce a landmark-guided attention branch to guide the network to attend to non-occluded regions for representation learning."
191
- },
192
- {
193
- "type": "text",
194
- "bbox": [
195
- 0.094,
196
- 0.498,
197
- 0.468,
198
- 0.543
199
- ],
200
- "angle": 0,
201
- "content": "- We design a facial region branch to learn region-based classifiers for complementary context features and further increasing the robustness."
202
- },
203
- {
204
- "type": "text",
205
- "bbox": [
206
- 0.094,
207
- 0.553,
208
- 0.468,
209
- 0.598
210
- ],
211
- "angle": 0,
212
- "content": "- Experimental results on five challenging benchmark datasets show that our proposed OADN obtains significantly better performance than existing methods."
213
- },
214
- {
215
- "type": "list",
216
- "bbox": [
217
- 0.094,
218
- 0.388,
219
- 0.468,
220
- 0.598
221
- ],
222
- "angle": 0,
223
- "content": null
224
- },
225
- {
226
- "type": "title",
227
- "bbox": [
228
- 0.077,
229
- 0.611,
230
- 0.218,
231
- 0.627
232
- ],
233
- "angle": 0,
234
- "content": "2. Related Work"
235
- },
236
- {
237
- "type": "title",
238
- "bbox": [
239
- 0.077,
240
- 0.636,
241
- 0.388,
242
- 0.652
243
- ],
244
- "angle": 0,
245
- "content": "2.1. Deep Facial Expression Recognition"
246
- },
247
- {
248
- "type": "text",
249
- "bbox": [
250
- 0.076,
251
- 0.659,
252
- 0.468,
253
- 0.901
254
- ],
255
- "angle": 0,
256
- "content": "Deep learning methods [39, 19, 22, 21, 20, 33, 5, 36, 16, 6, 35, 24, 1] for facial expression recognition have achieved great success in the past few years. Based on the assumption that a facial expression is the combination of a neutral face image and the expressive component, Yang et al. [33] proposed a de-expression residue learning to learn the residual expressive component in a generative model. To reduce the inter-subject variations, Cai et al. [5] introduced an identity-free generative adversarial network [11] to generate an average identity face image while keep the expression unchanged. Considering the pose variation, Zhang et al. [36] leveraged an adversarial autoencoder to augment the training set with face images under different expression and poses. However, these methods mainly focused on datasets captured in controlled environments, where the facial images are near frontal. Thus the models generalize poorly"
257
- },
258
- {
259
- "type": "text",
260
- "bbox": [
261
- 0.499,
262
- 0.092,
263
- 0.89,
264
- 0.121
265
- ],
266
- "angle": 0,
267
- "content": "when recognizing human expressions under natural and uncontrollable variations."
268
- },
269
- {
270
- "type": "text",
271
- "bbox": [
272
- 0.498,
273
- 0.122,
274
- 0.892,
275
- 0.394
276
- ],
277
- "angle": 0,
278
- "content": "Another line of work focused on designing advanced network architectures [1] or loss functions [16, 6, 35, 24]. Li et al. [16] proposed a deep locality-preserving Convolutional Neural Network, which preserved the local proximity by minimizing the distance to K-nearest neighbors within the same class. Building on this, Cai et al. [6] further introduced an island loss to simultaneously reduce intraclass variations and augment inter-class differences. Zeng et al. [35] studied the annotation error and bias problem among different facial expression datasets. Each image is predicted with multiple pseudo labels and a model is learned to fit the latent truth from these inconsistent labels. Acharya et al. [1] explored a covariance pooling layer to better capture the distortions in regional facial features and temporal evolution of per-frame features. Although the aforementioned approaches achieve good performance on data in the wild, facial expression recognition is still challenging due to the existence of partially occluded faces."
279
- },
280
- {
281
- "type": "title",
282
- "bbox": [
283
- 0.5,
284
- 0.402,
285
- 0.845,
286
- 0.418
287
- ],
288
- "angle": 0,
289
- "content": "2.2. Occlusive Facial Expression Recognition"
290
- },
291
- {
292
- "type": "text",
293
- "bbox": [
294
- 0.498,
295
- 0.426,
296
- 0.892,
297
- 0.622
298
- ],
299
- "angle": 0,
300
- "content": "Recently, there are some works starting to investigate the occlusion issue. Li et al. [17] proposed a gate unit to enable the model to shift attention from the occluded patches to other visible facial regions. The gate unit estimates how informative a face patch is through an attention net, then the features are modulated by the learned weights. Similarly, region attention network [31] cropped multiple face regions and utilized a self-attention based model to learn an important weight for each region. However, the self-attention based methods lack additional supervision to ensure the functionality. Thus, the network may not be able to locate these non-occluded facial regions accurately under large occlusions and poses."
301
- },
302
- {
303
- "type": "title",
304
- "bbox": [
305
- 0.5,
306
- 0.634,
307
- 0.813,
308
- 0.651
309
- ],
310
- "angle": 0,
311
- "content": "3. Occlusion Adaptive Deep Network"
312
- },
313
- {
314
- "type": "text",
315
- "bbox": [
316
- 0.498,
317
- 0.659,
318
- 0.892,
319
- 0.81
320
- ],
321
- "angle": 0,
322
- "content": "To this end, we propose OADN for robust facial expression recognition in-the-wild. To be specific, we use ResNet50 [13] without the average pooling layer and the fully connected layer as the backbone to extract global feature maps from given images. We set the stride of conv4_1 to be 1, so a larger feature map is obtained. For an input image with height \\( H \\) and width \\( W \\), the resolution of the output feature \\( F \\) will be \\( H / 16 \\times W / 16 \\) instead of \\( H / 32 \\times W / 32 \\). This is beneficial to identify the occlusion information and focus on the visible facial regions."
323
- },
324
- {
325
- "type": "text",
326
- "bbox": [
327
- 0.498,
328
- 0.811,
329
- 0.892,
330
- 0.901
331
- ],
332
- "angle": 0,
333
- "content": "As illustrated in Figure 1, OADN mainly consists of two branches: one is the landmark-guided attention branch, which utilizes a landmark detector to locate the landmarks and to guide the network to attend to the non-occluded facial areas. The other one is the facial region branch which divides the global feature maps into blocks and utilizes"
334
- }
335
- ],
336
- [
337
- {
338
- "type": "image",
339
- "bbox": [
340
- 0.081,
341
- 0.088,
342
- 0.892,
343
- 0.372
344
- ],
345
- "angle": 0,
346
- "content": null
347
- },
348
- {
349
- "type": "image_caption",
350
- "bbox": [
351
- 0.076,
352
- 0.375,
353
- 0.893,
354
- 0.458
355
- ],
356
- "angle": 0,
357
- "content": "Figure 1. Pipeline of the Occlusion Adaptive Deep Network. It consists of two branches: a Landmark-guided Attention Branch and a Facial Region Branch. The ResNet50 backbone is shared between the two branches to extract the global features. For the Landmark-guided Attention Branch, the facial landmarks are first detected. Then the interest points are computed to cover the most informative facial areas. The confidence scores of these points are further utilized to generate the attention maps, guiding the model to attend to the visible facial components. For the Facial Region Branch, the feature maps are divided into non-overlapping facial blocks and each block is trained to be a discriminative expression classifier on its own."
358
- },
359
- {
360
- "type": "text",
361
- "bbox": [
362
- 0.076,
363
- 0.484,
364
- 0.47,
365
- 0.528
366
- ],
367
- "angle": 0,
368
- "content": "region-based classifiers to increase robustness. We describe each branch and the structural relationship among the two branches below."
369
- },
370
- {
371
- "type": "image",
372
- "bbox": [
373
- 0.114,
374
- 0.539,
375
- 0.235,
376
- 0.63
377
- ],
378
- "angle": 0,
379
- "content": null
380
- },
381
- {
382
- "type": "image_caption",
383
- "bbox": [
384
- 0.111,
385
- 0.632,
386
- 0.236,
387
- 0.641
388
- ],
389
- "angle": 0,
390
- "content": "(a) Original 68 detected landmarks"
391
- },
392
- {
393
- "type": "image",
394
- "bbox": [
395
- 0.24,
396
- 0.574,
397
- 0.307,
398
- 0.6
399
- ],
400
- "angle": 0,
401
- "content": null
402
- },
403
- {
404
- "type": "image",
405
- "bbox": [
406
- 0.315,
407
- 0.54,
408
- 0.434,
409
- 0.63
410
- ],
411
- "angle": 0,
412
- "content": null
413
- },
414
- {
415
- "type": "image_caption",
416
- "bbox": [
417
- 0.327,
418
- 0.632,
419
- 0.422,
420
- 0.641
421
- ],
422
- "angle": 0,
423
- "content": "(b) Recomputed 24 points"
424
- },
425
- {
426
- "type": "image_caption",
427
- "bbox": [
428
- 0.076,
429
- 0.644,
430
- 0.47,
431
- 0.7
432
- ],
433
- "angle": 0,
434
- "content": "Figure 2. We select 16 points from the original 68 landmarks (a) to cover the regions around eyes, eyebrows, nose and mouth. We further recompute 8 points (b) to cover facial cheeks and the areas between eyes and eyebrows."
435
- },
436
- {
437
- "type": "title",
438
- "bbox": [
439
- 0.077,
440
- 0.727,
441
- 0.394,
442
- 0.743
443
- ],
444
- "angle": 0,
445
- "content": "3.1. Landmark-guided Attention Branch"
446
- },
447
- {
448
- "type": "text",
449
- "bbox": [
450
- 0.076,
451
- 0.75,
452
- 0.471,
453
- 0.902
454
- ],
455
- "angle": 0,
456
- "content": "OADN employs a facial landmark detector [9] to obtain landmarks from face images. The landmark detector is pretrained on the 300W dataset [28]. Given an input image, OADN utilizes the detector to extract \\( N = 68 \\) landmarks. For each landmark, the detector predicts its coordinates and confidence score. Then based on the detected 68 points, we select or recompute \\( M = 24 \\) interest points that cover the distinctive regions of face, including the eyes, nose, mouth and cheeks. Figure 2 illustrates the computation results. For those recomputed points (mainly around eyes and cheeks),"
457
- },
458
- {
459
- "type": "text",
460
- "bbox": [
461
- 0.498,
462
- 0.484,
463
- 0.892,
464
- 0.573
465
- ],
466
- "angle": 0,
467
- "content": "we set their confidence scores to be the minimum confidence score of landmark points that used to compute them. To remove the occluded facial regions, we set a threshold \\( T \\) to filter out the landmarks that have confidence scores smaller than \\( T \\). Specifically, the interest points are obtained by:"
468
- },
469
- {
470
- "type": "equation",
471
- "bbox": [
472
- 0.571,
473
- 0.575,
474
- 0.891,
475
- 0.609
476
- ],
477
- "angle": 0,
478
- "content": "\\[\np _ {i} = \\left\\{ \\begin{array}{l l} \\left(x _ {i}, y _ {i}\\right) & \\text {i f} s _ {i} ^ {\\text {c o n f}} \\geq T \\\\ 0 & \\text {e l s e} \\end{array} \\right. \\tag {1}\n\\]"
479
- },
480
- {
481
- "type": "text",
482
- "bbox": [
483
- 0.498,
484
- 0.617,
485
- 0.891,
486
- 0.661
487
- ],
488
- "angle": 0,
489
- "content": "where \\( p_i \\) denotes the \\( i \\)th interest point, and \\( x_i, y_i \\) denote the coordinates of the \\( i \\)th point. \\( s_i \\) is the confidence score ranged from 0 to 1 and \\( T \\) is the threshold."
490
- },
491
- {
492
- "type": "text",
493
- "bbox": [
494
- 0.498,
495
- 0.663,
496
- 0.892,
497
- 0.844
498
- ],
499
- "angle": 0,
500
- "content": "We then generate the attention heatmaps consisting of a 2D Gaussian distribution, where the centers are the ground truth locations of the visible landmarks. For those occluded landmarks, the corresponding attention maps are set to zero. We further downsample the attention maps by linear interpolation to match the size of the output feature maps. As shown in Figure 1, the attention map \\( A_{i} \\) modulates the global feature maps \\( F \\) to obtain the re-weighted features \\( F_{i}^{A} \\). To achieve this, the feature map \\( F \\) from the backbone is multiplied by each attention map \\( A_{i} \\), \\( i = 1,\\dots,M \\) element-wisely, resulting \\( M \\) landmark-guided feature maps \\( F_{i}^{A} \\):"
501
- },
502
- {
503
- "type": "equation",
504
- "bbox": [
505
- 0.601,
506
- 0.845,
507
- 0.891,
508
- 0.862
509
- ],
510
- "angle": 0,
511
- "content": "\\[\nF _ {i} ^ {A} = F \\odot A _ {i}, i = 1, \\dots , M \\tag {2}\n\\]"
512
- },
513
- {
514
- "type": "text",
515
- "bbox": [
516
- 0.498,
517
- 0.871,
518
- 0.892,
519
- 0.901
520
- ],
521
- "angle": 0,
522
- "content": "where \\(A_{i}\\) is the ith heatmap, and \\(\\odot\\) is element-wise product. Since the attention map indicates the visibility of each"
523
- }
524
- ],
525
- [
526
- {
527
- "type": "text",
528
- "bbox": [
529
- 0.076,
530
- 0.091,
531
- 0.468,
532
- 0.166
533
- ],
534
- "angle": 0,
535
- "content": "facial component, the landmark-guided feature map \\( F_{i}^{A} \\) can attend to the non-occluded facial parts and remove the information from the occluded regions. Thus, the feature from the visible region is emphasized and occluded part is canceled."
536
- },
537
- {
538
- "type": "text",
539
- "bbox": [
540
- 0.076,
541
- 0.168,
542
- 0.468,
543
- 0.332
544
- ],
545
- "angle": 0,
546
- "content": "Then global average pooling is applied to each landmark-guided feature map \\( F_{i}^{A} \\) to obtain a 2048- \\( D \\) feature \\( f_{i}^{A}, i = 1, \\dots, M \\), corresponding to the facial component containing the specific interest point. Finally, the component-wise feature \\( f_{i}^{A} \\) is max-pooled to fuse features from the non-occluded facial areas. A fully-connected layer is further used to reduce the dimension from 2048 to 256, and the output is fed into a softmax layer to predict the expression category of each input face image. We utilize the cross-entropy loss to train the landmark-guided attention branch, which is expressed as follows:"
547
- },
548
- {
549
- "type": "equation",
550
- "bbox": [
551
- 0.192,
552
- 0.344,
553
- 0.468,
554
- 0.385
555
- ],
556
- "angle": 0,
557
- "content": "\\[\nL _ {L A B} = - \\sum_ {i = 1} ^ {C} y _ {i} \\log \\hat {y} _ {i} \\tag {3}\n\\]"
558
- },
559
- {
560
- "type": "text",
561
- "bbox": [
562
- 0.077,
563
- 0.395,
564
- 0.468,
565
- 0.426
566
- ],
567
- "angle": 0,
568
- "content": "where \\(\\hat{y}_i\\) is the prediction, \\(y_i\\) is the ground truth and \\(C\\) is the number of expression classes."
569
- },
570
- {
571
- "type": "title",
572
- "bbox": [
573
- 0.077,
574
- 0.435,
575
- 0.279,
576
- 0.451
577
- ],
578
- "angle": 0,
579
- "content": "3.2. Facial Region Branch"
580
- },
581
- {
582
- "type": "text",
583
- "bbox": [
584
- 0.076,
585
- 0.458,
586
- 0.468,
587
- 0.532
588
- ],
589
- "angle": 0,
590
- "content": "When the face is severely occluded, the landmark detection results may not be accurate. Since relying on the landmark-guided attention branch solely is not enough, OADN utilizes a Facial Region Branch (FRB) to learn useful context information and further increase the robustness."
591
- },
592
- {
593
- "type": "text",
594
- "bbox": [
595
- 0.076,
596
- 0.533,
597
- 0.468,
598
- 0.683
599
- ],
600
- "angle": 0,
601
- "content": "Given the global feature maps \\( F \\in h \\times w \\times c \\), where \\( h, w, c \\) are the height, width and channel dimensions, we first divide them into small \\( m \\times n \\) non-overlapping blocks. Each facial region feature \\( F_{i}^{R} \\in m \\times n \\times c, i = 1, \\dots, K \\) where \\( K = \\lceil \\frac{h}{m} \\rceil \\cdot \\lceil \\frac{w}{n} \\rceil \\) is then fed into a global average pooling layer to obtain a region-level feature \\( f_{i}^{R} \\). Afterwards, a fully-connected layer is employed to reduce the dimension of \\( f_{i}^{R} \\) from 2048 to 256. Finally, a softmax layer is applied to each region to obtain a set of predictions \\( y_{i}^{R} \\), where \\( i = 1, \\dots, K \\)."
602
- },
603
- {
604
- "type": "text",
605
- "bbox": [
606
- 0.076,
607
- 0.685,
608
- 0.468,
609
- 0.729
610
- ],
611
- "angle": 0,
612
- "content": "To train the facial region branch, we minimize the cross-entropy loss over the \\(K\\) regions independently. Formally, the loss is expressed as:"
613
- },
614
- {
615
- "type": "equation",
616
- "bbox": [
617
- 0.173,
618
- 0.74,
619
- 0.468,
620
- 0.782
621
- ],
622
- "angle": 0,
623
- "content": "\\[\nL _ {F R B} = - \\sum_ {i = 1} ^ {C} \\sum_ {j = 1} ^ {K} y _ {i} \\log \\hat {y} _ {i, j} ^ {R} \\tag {4}\n\\]"
624
- },
625
- {
626
- "type": "text",
627
- "bbox": [
628
- 0.076,
629
- 0.795,
630
- 0.468,
631
- 0.841
632
- ],
633
- "angle": 0,
634
- "content": "where \\(K\\) is the number of facial regions, \\(\\hat{y}_{i,j}^{R}\\) is the prediction of the \\(j\\)th region, and \\(y_{i}\\) is the ground truth expression category."
635
- },
636
- {
637
- "type": "text",
638
- "bbox": [
639
- 0.076,
640
- 0.841,
641
- 0.468,
642
- 0.9
643
- ],
644
- "angle": 0,
645
- "content": "To make an accurate prediction based on facial region only, OADN is required to learn more discriminative and diverse features at a finer-level. As a result, the partial occlusion will have a less effect on the network compared to a"
646
- },
647
- {
648
- "type": "text",
649
- "bbox": [
650
- 0.498,
651
- 0.092,
652
- 0.891,
653
- 0.152
654
- ],
655
- "angle": 0,
656
- "content": "standard model. Moreover, the size of the expression recognition dataset is usually not very large. Training multiple region-based classifiers adds more supervision and reduces overfitting."
657
- },
658
- {
659
- "type": "title",
660
- "bbox": [
661
- 0.499,
662
- 0.163,
663
- 0.842,
664
- 0.179
665
- ],
666
- "angle": 0,
667
- "content": "3.3. Relationship between the Two Branches"
668
- },
669
- {
670
- "type": "text",
671
- "bbox": [
672
- 0.498,
673
- 0.187,
674
- 0.891,
675
- 0.322
676
- ],
677
- "angle": 0,
678
- "content": "OADN is specifically designed to handle the occlusion problem for in-the-wild facial expression recognition. The landmark-guided attention branch explicitly guides the model to focus on non-occluded facial areas, learning a clean global feature. While the facial region branch promotes part-level features and enables the model to work robustly when the face is largely occluded. Combining the benefits from each branch, we train OADN using the following loss:"
679
- },
680
- {
681
- "type": "equation",
682
- "bbox": [
683
- 0.596,
684
- 0.336,
685
- 0.891,
686
- 0.352
687
- ],
688
- "angle": 0,
689
- "content": "\\[\nL = \\lambda L _ {L A B} + (1 - \\lambda) L _ {F R B} \\tag {5}\n\\]"
690
- },
691
- {
692
- "type": "text",
693
- "bbox": [
694
- 0.498,
695
- 0.365,
696
- 0.89,
697
- 0.395
698
- ],
699
- "angle": 0,
700
- "content": "where \\(\\lambda\\) is the loss combination weight. \\(L_{LAB}\\) and \\(L_{FRB}\\) are defined in Equation (3) and (4)."
701
- },
702
- {
703
- "type": "title",
704
- "bbox": [
705
- 0.499,
706
- 0.41,
707
- 0.632,
708
- 0.426
709
- ],
710
- "angle": 0,
711
- "content": "4. Experiments"
712
- },
713
- {
714
- "type": "title",
715
- "bbox": [
716
- 0.5,
717
- 0.436,
718
- 0.602,
719
- 0.45
720
- ],
721
- "angle": 0,
722
- "content": "4.1. Datasets"
723
- },
724
- {
725
- "type": "text",
726
- "bbox": [
727
- 0.498,
728
- 0.459,
729
- 0.891,
730
- 0.594
731
- ],
732
- "angle": 0,
733
- "content": "We validate the effectiveness of our method on two largest in-the-wild expression datasets: RAF-DB [16] and AffectNet [25]. The in-the-wild datasets contain facial expression in real world with various poses, illuminations, intensities, and other uncontrolled conditions. We also evaluate our method on three recently proposed real-world occlusion datasets: Occlusion-AffectNet[31], Occlusion-FERPlus [31] and FED-RO [17]. The occlusions are diverse in color, shape, position and occlusion ratio."
734
- },
735
- {
736
- "type": "image",
737
- "bbox": [
738
- 0.502,
739
- 0.608,
740
- 0.892,
741
- 0.673
742
- ],
743
- "angle": 0,
744
- "content": null
745
- },
746
- {
747
- "type": "image_caption",
748
- "bbox": [
749
- 0.498,
750
- 0.676,
751
- 0.891,
752
- 0.716
753
- ],
754
- "angle": 0,
755
- "content": "Figure 3. The interest points with confidence scores greater than the threshold \\( T \\) are shown in red points. We can see that occluded facial areas are removed."
756
- },
757
- {
758
- "type": "title",
759
- "bbox": [
760
- 0.5,
761
- 0.741,
762
- 0.716,
763
- 0.757
764
- ],
765
- "angle": 0,
766
- "content": "4.2. Implementation Details"
767
- },
768
- {
769
- "type": "text",
770
- "bbox": [
771
- 0.498,
772
- 0.765,
773
- 0.891,
774
- 0.901
775
- ],
776
- "angle": 0,
777
- "content": "Preprocessing. The standard MTCNN [37] is used to detect five face landmarks for all the images. After performing a similarity transformation, we obtain the aligned face images and resize them to be \\(224 \\times 224\\) pixels. To detect landmarks from occluded images, we use SAN [9] pretrained on the 300W dataset [28] to get 68 face landmarks. We also try another landmark detector [4] and similar results are obtained. Then we select 18 points covering eyebrows, eyes, nose and mouth, and recompute eight points"
778
- }
779
- ],
780
- [
781
- {
782
- "type": "table_caption",
783
- "bbox": [
784
- 0.149,
785
- 0.089,
786
- 0.398,
787
- 0.101
788
- ],
789
- "angle": 0,
790
- "content": "Table 1. Test set accuracy on RAF dataset"
791
- },
792
- {
793
- "type": "table",
794
- "bbox": [
795
- 0.139,
796
- 0.101,
797
- 0.411,
798
- 0.274
799
- ],
800
- "angle": 0,
801
- "content": "<table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>86.90%</td></tr><tr><td>OADN(ours)</td><td>89.83%</td></tr><tr><td>ResiDen [14]</td><td>76.54%</td></tr><tr><td>ResNet-PL [26]</td><td>81.97%</td></tr><tr><td>PG-CNN [18]</td><td>83.27%</td></tr><tr><td>Center Loss [32]</td><td>83.68%</td></tr><tr><td>DLP-CNN [15]</td><td>84.13%</td></tr><tr><td>ALT [10]</td><td>84.50%</td></tr><tr><td>gACNN [17]</td><td>85.07%</td></tr><tr><td>OADN(ours)</td><td>87.16%</td></tr></table>"
802
- },
803
- {
804
- "type": "text",
805
- "bbox": [
806
- 0.076,
807
- 0.31,
808
- 0.471,
809
- 0.491
810
- ],
811
- "angle": 0,
812
- "content": "from facial cheeks. The confidence scores of these recomputed points are the minimum scores of the points used to compute them. In all experiments, we set the threshold \\( T \\) of the confidence score to be 0.6, thus landmarks with confidence scores smaller than 0.6 are removed. Figure 3 shows the computed interest points after thresholding. From it we can see that the occluded facial regions are discarded. Finally, we generate attention maps consisting of a Gaussian with the centers as the coordinates of the visible points. For those occluded points, the attention maps are all zeros. We resize the attention maps to be \\( 14 \\times 14 \\) to match the size of the global feature maps \\( F \\)."
813
- },
814
- {
815
- "type": "text",
816
- "bbox": [
817
- 0.076,
818
- 0.491,
819
- 0.47,
820
- 0.809
821
- ],
822
- "angle": 0,
823
- "content": "Training and Testing. We employ the ResNet50 as our backbone, removing the average pooling layer and the fully connected layer. We modify the stride of conv4_1 from 2 to 1, so a larger feature map with size \\(14 \\times 14\\) is obtained. We initialize the model with the weights pre-trained on ImageNet [8]. The mini-batch size is set to be 128, the momentum is 0.9, and the weight decay is 0.0005. The learning rate starts at 0.1, and decreased by 10 after 20 epochs. We train the model for a total of 60 epochs. Stochastic Gradient Descent (SGD) is adopted as the optimization algorithm. During training, only random flipping is used for data augmentation. For testing, a single image is used and the predication scores from the landmark-guided attention branch and the facial region branch are averaged to get the final prediction score. The settings are same for all the experiments. For evaluation, the total accuracy metric is adopted. Considering the imbalance of the expression classes, a confusion matrix is also employed to show the average class accuracy. The deep learning software Pytorch [27] is used to conduct the experiments. Upon publication, the codes and trained expression models will be made publicly available."
824
- },
825
- {
826
- "type": "title",
827
- "bbox": [
828
- 0.077,
829
- 0.818,
830
- 0.27,
831
- 0.834
832
- ],
833
- "angle": 0,
834
- "content": "4.3. Results Comparison"
835
- },
836
- {
837
- "type": "text",
838
- "bbox": [
839
- 0.076,
840
- 0.84,
841
- 0.471,
842
- 0.903
843
- ],
844
- "angle": 0,
845
- "content": "RAF [16] contains 30,000 in-the-wild facial expression images, annotated with basic or compound expressions by forty independent human labelers. In this experiment, only images with seven basic expressions are used, including"
846
- },
847
- {
848
- "type": "table_caption",
849
- "bbox": [
850
- 0.538,
851
- 0.088,
852
- 0.855,
853
- 0.101
854
- ],
855
- "angle": 0,
856
- "content": "Table 2. Validation set accuracy on AffectNet dataset"
857
- },
858
- {
859
- "type": "table",
860
- "bbox": [
861
- 0.545,
862
- 0.101,
863
- 0.849,
864
- 0.259
865
- ],
866
- "angle": 0,
867
- "content": "<table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>59.50%</td></tr><tr><td>OADN(ours)</td><td>64.06%</td></tr><tr><td>VGG16 [29]</td><td>51.11%</td></tr><tr><td>GAN-Inpainting [34]</td><td>52.97%</td></tr><tr><td>DLP-CNN [16]</td><td>54.47%</td></tr><tr><td>PG-CNN [18]</td><td>55.33%</td></tr><tr><td>ResNet-PL [26]</td><td>56.42%</td></tr><tr><td>gACNN [17]</td><td>58.78%</td></tr><tr><td>OADN(ours)</td><td>61.89%</td></tr></table>"
868
- },
869
- {
870
- "type": "text",
871
- "bbox": [
872
- 0.5,
873
- 0.296,
874
- 0.771,
875
- 0.311
876
- ],
877
- "angle": 0,
878
- "content": "12,271 for training and 3,068 for testing."
879
- },
880
- {
881
- "type": "text",
882
- "bbox": [
883
- 0.498,
884
- 0.312,
885
- 0.892,
886
- 0.524
887
- ],
888
- "angle": 0,
889
- "content": "Table 1 shows the results of our method and previous works. Our OADN achieves \\(87.16\\%\\) in terms of total accuracy on the test set, outperforming all the previous methods. Compared with the strongest competing method in the same setting gACNN [17], OADN surpasses it by \\(2.1\\%\\). This is because OADN explicitly utilizes the meta information of landmarks to depress the noisy information from the occluded regions and enhances the robustness with multiple region-based classifiers. To have a fair comparison with [31], we also pre-trained our model on a large-scale face recognition dataset VGGFace2 [7]. OADN achieves a new state-of-the-art result with an accuracy of \\(89.83\\%\\) to the best of our knowledge, outperforming RAN by \\(2.93\\%\\). This validates the superiority of the proposed method."
890
- },
891
- {
892
- "type": "image",
893
- "bbox": [
894
- 0.514,
895
- 0.537,
896
- 0.877,
897
- 0.808
898
- ],
899
- "angle": 0,
900
- "content": null
901
- },
902
- {
903
- "type": "image_caption",
904
- "bbox": [
905
- 0.498,
906
- 0.813,
907
- 0.892,
908
- 0.841
909
- ],
910
- "angle": 0,
911
- "content": "Figure 4. Confusion matrix for RAF-DB dataset. The darker the color, the higher the accuracy."
912
- },
913
- {
914
- "type": "text",
915
- "bbox": [
916
- 0.498,
917
- 0.856,
918
- 0.892,
919
- 0.901
920
- ],
921
- "angle": 0,
922
- "content": "We show the confusion matrix in Figure 4. It is observed that Fear and Disgust are the two most confusing expression, where Fear is easily confused with Surprise because"
923
- }
924
- ],
925
- [
926
- {
927
- "type": "table_caption",
928
- "bbox": [
929
- 0.078,
930
- 0.088,
931
- 0.47,
932
- 0.114
933
- ],
934
- "angle": 0,
935
- "content": "Table 3. Validation set accuracy on Occlusion-AffectNet and Pose-AffectNet dataset"
936
- },
937
- {
938
- "type": "table",
939
- "bbox": [
940
- 0.079,
941
- 0.139,
942
- 0.505,
943
- 0.193
944
- ],
945
- "angle": 0,
946
- "content": "<table><tr><td>Method</td><td>Occ. Acc.</td><td>Pose&gt;30 Acc.</td><td>Pose&gt;45 Acc.</td></tr><tr><td>RAN [31]</td><td>58.50%</td><td>53.90%</td><td>53.19%</td></tr><tr><td>OADN(ours)</td><td>64.02%</td><td>61.12%</td><td>61.08%</td></tr></table>"
947
- },
948
- {
949
- "type": "text",
950
- "bbox": [
951
- 0.077,
952
- 0.232,
953
- 0.47,
954
- 0.262
955
- ],
956
- "angle": 0,
957
- "content": "of similar facial appearance while Disgust is mainly confused by Neutral due to the subtleness of the expression."
958
- },
959
- {
960
- "type": "image",
961
- "bbox": [
962
- 0.096,
963
- 0.279,
964
- 0.454,
965
- 0.549
966
- ],
967
- "angle": 0,
968
- "content": null
969
- },
970
- {
971
- "type": "image_caption",
972
- "bbox": [
973
- 0.076,
974
- 0.553,
975
- 0.47,
976
- 0.582
977
- ],
978
- "angle": 0,
979
- "content": "Figure 5. Confusion matrix for Affectnet dataset. The darker the color, the higher the accuracy."
980
- },
981
- {
982
- "type": "text",
983
- "bbox": [
984
- 0.076,
985
- 0.598,
986
- 0.47,
987
- 0.839
988
- ],
989
- "angle": 0,
990
- "content": "AffectNet [25] is currently the largest expression dataset. There are about 400,000 images manually annotated with seven discrete facial expressions and the intensity of valence and arousal. Following the experiment setting in [17], we only used images with neutral and six basic emotions, containing 280,000 images for training and 3,500 images from the validation set for testing since the test set is not publicly available. Very recently, Wang et al. [31] released the Occlusion-AffectNet and Pose-AffectNet datasets where only images with challenging conditions are selected as the test sets. For the Occlusion-Affectnet, each image is occluded with at least one type of occlusion: wearing mask, wearing glasses, etc. There are a total of 682 images. For the Pose-AffectNet, images with pose degrees larger than 30 and 45 are collected. The number of images are 1,949 and 985, respectively."
991
- },
992
- {
993
- "type": "text",
994
- "bbox": [
995
- 0.076,
996
- 0.84,
997
- 0.47,
998
- 0.902
999
- ],
1000
- "angle": 0,
1001
- "content": "As shown in Table 3, OADN achieves the best performance with an accuracy of \\(61.89\\%\\) on the validation set. Compared to the strongest competing method in the same setting gACNN [17], OADN surpasses it by \\(3.1\\%\\), which"
1002
- },
1003
- {
1004
- "type": "table_caption",
1005
- "bbox": [
1006
- 0.559,
1007
- 0.088,
1008
- 0.835,
1009
- 0.1
1010
- ],
1011
- "angle": 0,
1012
- "content": "Table 4. Test set accuracy on FED-RO dataset"
1013
- },
1014
- {
1015
- "type": "table",
1016
- "bbox": [
1017
- 0.546,
1018
- 0.101,
1019
- 0.848,
1020
- 0.259
1021
- ],
1022
- "angle": 0,
1023
- "content": "<table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>67.98%</td></tr><tr><td>OADN(ours)</td><td>71.17%</td></tr><tr><td>VGG16 [29]</td><td>51.11%</td></tr><tr><td>ResNet18 [13]</td><td>64.25%</td></tr><tr><td>GAN-Inpainting [34]</td><td>58.33%</td></tr><tr><td>DLP-CNN [16]</td><td>60.31%</td></tr><tr><td>PG-CNN [18]</td><td>64.25%</td></tr><tr><td>gACNN [17]</td><td>66.50%</td></tr><tr><td>OADN(ours)</td><td>68.11%</td></tr></table>"
1024
- },
1025
- {
1026
- "type": "text",
1027
- "bbox": [
1028
- 0.498,
1029
- 0.297,
1030
- 0.892,
1031
- 0.477
1032
- ],
1033
- "angle": 0,
1034
- "content": "is a reasonable improvement. OADN also outperforms RAN [31] by \\(4.56\\%\\), when both are pre-trained on a large-scale face recognition dataset. On the Occlusion-AffectNet and Pose-AffectNet datasets, the performance gap between OADN and RAN is further increased. As a comparison, OADN exceeds RAN by \\(5.52\\%\\), \\(7.22\\%\\) and \\(7.89\\%\\) on the test sets with occlusion, pose degree greater than 30 and 45, respectively. This validates the effectiveness of the proposed method for the occluded facial expression recognition problem. The confusion matrix is shown in Figure 5. From it we can find both Disgust and Anger are the most difficult expressions to classify."
1035
- },
1036
- {
1037
- "type": "text",
1038
- "bbox": [
1039
- 0.498,
1040
- 0.478,
1041
- 0.892,
1042
- 0.583
1043
- ],
1044
- "angle": 0,
1045
- "content": "FED-RO [17] is a recently released facial expression dataset with real world occlusions. Each image has natural occlusions including sunglasses, medical mask, hands and hair. It contains 400 images labeled with seven expressions for testing. We train our model on the joint training data of RAF and AffectNet, following the protocol suggested in [17]."
1046
- },
1047
- {
1048
- "type": "text",
1049
- "bbox": [
1050
- 0.498,
1051
- 0.584,
1052
- 0.892,
1053
- 0.689
1054
- ],
1055
- "angle": 0,
1056
- "content": "As shown in Table 4, OADN achieves the best performance with an accuracy of \\(68.11\\%\\), improving over gACNN by \\(1.61\\%\\). OADN also outperforms RAN by \\(3.19\\%\\) when pre-trained on a face recognition dataset. From the confusion matrix shown in Figure 6, we can see both Surprise and Happy have high accuracy, while Fear and Disgust are easily confused with Surprise and Sad."
1057
- },
1058
- {
1059
- "type": "text",
1060
- "bbox": [
1061
- 0.498,
1062
- 0.689,
1063
- 0.892,
1064
- 0.885
1065
- ],
1066
- "angle": 0,
1067
- "content": "FERPlus [2] is a real-world facial expression dataset initially introduced in ICML 2013 Challenge [12]. It consists of 28,709 training images, 3,589 validation images and 3,589 test images. Each image is labeled with one of the eight expressions by 10 independent taggers. Recently, Wang et al. [31] released the Occlusion-FERPlus and Pose-FERPlus datasets, where images under occlusion and large pose (\\(>30\\) and \\(>45\\)) are collected from the FERPlus test sets. The Occlusion-FERPlus has a total number of 605 images, while Pose-FERPlus has 1,171 and 634 images with pose larger than 30 and 45 degrees, respectively. Following [31], we trained our model on the training data of FERPlus and test on these challenging datasets."
1068
- },
1069
- {
1070
- "type": "text",
1071
- "bbox": [
1072
- 0.519,
1073
- 0.886,
1074
- 0.892,
1075
- 0.902
1076
- ],
1077
- "angle": 0,
1078
- "content": "Table 5 reports the test accuracy. The OADN sig"
1079
- }
1080
- ],
1081
- [
1082
- {
1083
- "type": "image",
1084
- "bbox": [
1085
- 0.091,
1086
- 0.09,
1087
- 0.452,
1088
- 0.362
1089
- ],
1090
- "angle": 0,
1091
- "content": null
1092
- },
1093
- {
1094
- "type": "image_caption",
1095
- "bbox": [
1096
- 0.076,
1097
- 0.365,
1098
- 0.47,
1099
- 0.395
1100
- ],
1101
- "angle": 0,
1102
- "content": "Figure 6. Confusion matrix for FED-RO dataset. The darker the color, the higher the accuracy."
1103
- },
1104
- {
1105
- "type": "table_caption",
1106
- "bbox": [
1107
- 0.076,
1108
- 0.405,
1109
- 0.47,
1110
- 0.434
1111
- ],
1112
- "angle": 0,
1113
- "content": "Table 5. Test set accuracy on Occlusion-FERPlus and Pose-FERPlus dataset"
1114
- },
1115
- {
1116
- "type": "table",
1117
- "bbox": [
1118
- 0.078,
1119
- 0.456,
1120
- 0.501,
1121
- 0.51
1122
- ],
1123
- "angle": 0,
1124
- "content": "<table><tr><td>Method</td><td>Occ. Acc.</td><td>Pose&gt;30 Acc.</td><td>Pose&gt;45 Acc.</td></tr><tr><td>RAN [31]</td><td>83.63%</td><td>82.23%</td><td>80.40%</td></tr><tr><td>OADN(ours)</td><td>84.57%</td><td>88.52%</td><td>87.50%</td></tr></table>"
1125
- },
1126
- {
1127
- "type": "text",
1128
- "bbox": [
1129
- 0.076,
1130
- 0.548,
1131
- 0.47,
1132
- 0.64
1133
- ],
1134
- "angle": 0,
1135
- "content": "nificantly surpasses RAN by a large margin with \\(6.29\\%\\) and \\(7.10\\%\\) improvements on the Pose-FERPlus datasets. OADN also achieves better performance on the Occlusion-FERPlus dataset. This validates the effectiveness of our method for recognizing facial expressions under challenging conditions."
1136
- },
1137
- {
1138
- "type": "title",
1139
- "bbox": [
1140
- 0.077,
1141
- 0.65,
1142
- 0.231,
1143
- 0.667
1144
- ],
1145
- "angle": 0,
1146
- "content": "4.4. Ablation Study"
1147
- },
1148
- {
1149
- "type": "text",
1150
- "bbox": [
1151
- 0.076,
1152
- 0.674,
1153
- 0.47,
1154
- 0.703
1155
- ],
1156
- "angle": 0,
1157
- "content": "In this section, we conduct ablation studies on the RAF dataset to analyze each component of OADN."
1158
- },
1159
- {
1160
- "type": "text",
1161
- "bbox": [
1162
- 0.076,
1163
- 0.704,
1164
- 0.471,
1165
- 0.884
1166
- ],
1167
- "angle": 0,
1168
- "content": "The impact of the landmark confidence threshold \\( T \\). The confidence scores of the interest points are utilized to select the interest points from non-occluded facial areas. From Equation (1), points with confidence scores higher than \\( T \\) are kept. We can see from Figure 7 (a) that with \\( T = 0.6 \\), OADN achieves the best performance. When \\( T \\) is further increased, the performance drops quickly since some important facial areas which may not be occluded are also thrown away. On the other hand, when \\( T \\) becomes less than 0.6, OADN starts to perform worse. This is because noisy information from the occluded areas are also included, which degrades the clean features."
1169
- },
1170
- {
1171
- "type": "text",
1172
- "bbox": [
1173
- 0.097,
1174
- 0.886,
1175
- 0.47,
1176
- 0.902
1177
- ],
1178
- "angle": 0,
1179
- "content": "The impact of the number of regions \\(K\\). In the facial"
1180
- },
1181
- {
1182
- "type": "image",
1183
- "bbox": [
1184
- 0.505,
1185
- 0.089,
1186
- 0.633,
1187
- 0.174
1188
- ],
1189
- "angle": 0,
1190
- "content": null
1191
- },
1192
- {
1193
- "type": "image",
1194
- "bbox": [
1195
- 0.642,
1196
- 0.089,
1197
- 0.772,
1198
- 0.174
1199
- ],
1200
- "angle": 0,
1201
- "content": null
1202
- },
1203
- {
1204
- "type": "image",
1205
- "bbox": [
1206
- 0.782,
1207
- 0.089,
1208
- 0.91,
1209
- 0.174
1210
- ],
1211
- "angle": 0,
1212
- "content": null
1213
- },
1214
- {
1215
- "type": "image_caption",
1216
- "bbox": [
1217
- 0.499,
1218
- 0.178,
1219
- 0.894,
1220
- 0.22
1221
- ],
1222
- "angle": 0,
1223
- "content": "Figure 7. The impacts of the confidence threshold \\( T \\), number of regions \\( K \\) and the loss combination weight \\( \\lambda \\) on the performance of OADN."
1224
- },
1225
- {
1226
- "type": "text",
1227
- "bbox": [
1228
- 0.498,
1229
- 0.248,
1230
- 0.892,
1231
- 0.43
1232
- ],
1233
- "angle": 0,
1234
- "content": "region branch, we partition the global feature maps into \\( K \\) blocks and train an expression classifier from each block independently. So \\( K \\) decides the granularity of the part-level features. From Figure 7 (b), it is observed that the best accuracy is achieved at \\( K = 4 \\). When \\( K = 1 \\), the facial region branch equals to the standard ResNet50 classifier. The worse performance indicates the necessity to learn features at part-level. However, increasing \\( K \\) to be a large number like 16 does not bring further increasement. This is because when the facial region is too small, it lacks enough information to make the prediction due to the occlusion. Thus the classifiers are confused and the training is stagnated."
1235
- },
1236
- {
1237
- "type": "text",
1238
- "bbox": [
1239
- 0.498,
1240
- 0.432,
1241
- 0.893,
1242
- 0.614
1243
- ],
1244
- "angle": 0,
1245
- "content": "The impact of the loss combination weight \\(\\lambda\\). To train OADN, we jointly optimize the loss from the landmark-guided attention branch (LAB) and the facial region branch (FRB) as defined in Equation (5). The loss weight \\(\\lambda\\) controls the relative importance of each loss. When \\(\\lambda\\) equals 1, only LAB is utilized. While \\(\\lambda = 0\\) means only FRB is used. From Figure 7 (c), we can find that LAB obtains better performance since the network is guided to attend to the most discriminative facial areas. While combining the two branches achieves better performance than using either one branch alone. This validates the effectiveness of the complementary features learned by the two branches."
1246
- },
1247
- {
1248
- "type": "title",
1249
- "bbox": [
1250
- 0.5,
1251
- 0.63,
1252
- 0.638,
1253
- 0.645
1254
- ],
1255
- "angle": 0,
1256
- "content": "4.5. Visualization"
1257
- },
1258
- {
1259
- "type": "text",
1260
- "bbox": [
1261
- 0.498,
1262
- 0.656,
1263
- 0.892,
1264
- 0.746
1265
- ],
1266
- "angle": 0,
1267
- "content": "Figure 8 shows some expression recognition examples of the gACNN [17] and our OADN method on the FEDRO dataset. The classification results show that gACNN is vulnerable to large head poses and heavy facial occlusions. On the contrary, the OADN can work successfully under these conditions."
1268
- },
1269
- {
1270
- "type": "image",
1271
- "bbox": [
1272
- 0.513,
1273
- 0.763,
1274
- 0.882,
1275
- 0.837
1276
- ],
1277
- "angle": 0,
1278
- "content": null
1279
- },
1280
- {
1281
- "type": "image_caption",
1282
- "bbox": [
1283
- 0.498,
1284
- 0.839,
1285
- 0.892,
1286
- 0.881
1287
- ],
1288
- "angle": 0,
1289
- "content": "Figure 8. Comparison of the gACNN method and our OADN method on the FED-RO dataset. Red and green texts indicate the error and correct predictions."
1290
- }
1291
- ],
1292
- [
1293
- {
1294
- "type": "title",
1295
- "bbox": [
1296
- 0.078,
1297
- 0.09,
1298
- 0.205,
1299
- 0.107
1300
- ],
1301
- "angle": 0,
1302
- "content": "5. Conclusions"
1303
- },
1304
- {
1305
- "type": "text",
1306
- "bbox": [
1307
- 0.076,
1308
- 0.115,
1309
- 0.473,
1310
- 0.298
1311
- ],
1312
- "angle": 0,
1313
- "content": "In this paper, we present an occlusion-adaptive deep network to tackle the occluded facial expression recognition problem. The network is composed of two branches: the landmark-guided attention branch guides the network to learn clean features from the non-occluded facial areas. The facial region branch increases the robustness by dividing the last convolutional layer into several part classifiers. We conduct extensive experiments on both challenging in-the-wild expression datasets and real-world occluded expression datasets. The results show that our method outperforms existing methods and achieves robustness against occlusion and various poses."
1314
- },
1315
- {
1316
- "type": "title",
1317
- "bbox": [
1318
- 0.077,
1319
- 0.309,
1320
- 0.258,
1321
- 0.327
1322
- ],
1323
- "angle": 0,
1324
- "content": "6. Acknowledgement"
1325
- },
1326
- {
1327
- "type": "text",
1328
- "bbox": [
1329
- 0.076,
1330
- 0.334,
1331
- 0.473,
1332
- 0.502
1333
- ],
1334
- "angle": 0,
1335
- "content": "This research is based upon work supported by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via IARPA R&D Contract No. 2019-022600002. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies or endorsements, either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes notwithstanding any copyright annotation thereon."
1336
- },
1337
- {
1338
- "type": "title",
1339
- "bbox": [
1340
- 0.078,
1341
- 0.512,
1342
- 0.175,
1343
- 0.529
1344
- ],
1345
- "angle": 0,
1346
- "content": "References"
1347
- },
1348
- {
1349
- "type": "ref_text",
1350
- "bbox": [
1351
- 0.085,
1352
- 0.537,
1353
- 0.47,
1354
- 0.606
1355
- ],
1356
- "angle": 0,
1357
- "content": "[1] D. Acharya, Z. Huang, D. Pani Paudel, and L. Van Gool. Covariance pooling for facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 367-374, 2018."
1358
- },
1359
- {
1360
- "type": "ref_text",
1361
- "bbox": [
1362
- 0.087,
1363
- 0.608,
1364
- 0.472,
1365
- 0.676
1366
- ],
1367
- "angle": 0,
1368
- "content": "[2] E. Barsoum, C. Zhang, C. C. Ferrer, and Z. Zhang. Training deep networks for facial expression recognition with crowdsourced label distribution. In Proceedings of the 18th ACM International Conference on Multimodal Interaction, pages 279-283, 2016."
1369
- },
1370
- {
1371
- "type": "ref_text",
1372
- "bbox": [
1373
- 0.087,
1374
- 0.678,
1375
- 0.47,
1376
- 0.747
1377
- ],
1378
- "angle": 0,
1379
- "content": "[3] B. Bozortabar, M. S. Rad, H. K. Ekenel, and J.-P. Thiran. Using photorealistic face synthesis and domain adaptation to improve facial expression analysis. In 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pages 1-8. IEEE, 2019."
1380
- },
1381
- {
1382
- "type": "ref_text",
1383
- "bbox": [
1384
- 0.087,
1385
- 0.748,
1386
- 0.47,
1387
- 0.815
1388
- ],
1389
- "angle": 0,
1390
- "content": "[4] A. Bulat and G. Tzimiropoulos. How far are we from solving the 2D & 3D face alignment problem? (and a dataset of 230,000 3D facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 1021-1030, 2017."
1391
- },
1392
- {
1393
- "type": "ref_text",
1394
- "bbox": [
1395
- 0.087,
1396
- 0.817,
1397
- 0.47,
1398
- 0.873
1399
- ],
1400
- "angle": 0,
1401
- "content": "[5] J. Cai, Z. Meng, A. S. Khan, Z. Li, J. O'Reilly, and Y. Tong. Identity-free facial expression recognition using conditional generative adversarial network. arXiv preprint arXiv:1903.08051, 2019."
1402
- },
1403
- {
1404
- "type": "ref_text",
1405
- "bbox": [
1406
- 0.087,
1407
- 0.874,
1408
- 0.47,
1409
- 0.901
1410
- ],
1411
- "angle": 0,
1412
- "content": "[6] J. Cai, Z. Meng, A. S. Khan, Z. Li, J. OReilly, and Y. Tong. Island loss for learning discriminative features in facial ex"
1413
- },
1414
- {
1415
- "type": "list",
1416
- "bbox": [
1417
- 0.085,
1418
- 0.537,
1419
- 0.472,
1420
- 0.901
1421
- ],
1422
- "angle": 0,
1423
- "content": null
1424
- },
1425
- {
1426
- "type": "ref_text",
1427
- "bbox": [
1428
- 0.533,
1429
- 0.093,
1430
- 0.892,
1431
- 0.135
1432
- ],
1433
- "angle": 0,
1434
- "content": "pression recognition. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 302-309. IEEE, 2018."
1435
- },
1436
- {
1437
- "type": "ref_text",
1438
- "bbox": [
1439
- 0.51,
1440
- 0.136,
1441
- 0.892,
1442
- 0.205
1443
- ],
1444
- "angle": 0,
1445
- "content": "[7] Q. Cao, L. Shen, W. Xie, O. M. Parkhi, and A. Zisserman. Vggface2: A dataset for recognising faces across pose and age. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 67-74. IEEE, 2018."
1446
- },
1447
- {
1448
- "type": "ref_text",
1449
- "bbox": [
1450
- 0.51,
1451
- 0.207,
1452
- 0.892,
1453
- 0.262
1454
- ],
1455
- "angle": 0,
1456
- "content": "[8] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. Imagenet: A large-scale hierarchical image database. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255. IEEE, 2009."
1457
- },
1458
- {
1459
- "type": "ref_text",
1460
- "bbox": [
1461
- 0.51,
1462
- 0.264,
1463
- 0.892,
1464
- 0.319
1465
- ],
1466
- "angle": 0,
1467
- "content": "[9] X. Dong, Y. Yan, W. Ouyang, and Y. Yang. Style aggregated network for facial landmark detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 379-388, 2018."
1468
- },
1469
- {
1470
- "type": "ref_text",
1471
- "bbox": [
1472
- 0.504,
1473
- 0.321,
1474
- 0.892,
1475
- 0.375
1476
- ],
1477
- "angle": 0,
1478
- "content": "[10] C. Florea, L. Florea, M. Badea, C. Vertan, and A. Racoviteanu. Annealed label transfer for face expression recognition. In British Machine Vision Conference (BMVC), 2019."
1479
- },
1480
- {
1481
- "type": "ref_text",
1482
- "bbox": [
1483
- 0.504,
1484
- 0.377,
1485
- 0.892,
1486
- 0.433
1487
- ],
1488
- "angle": 0,
1489
- "content": "[11] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, pages 2672–2680, 2014."
1490
- },
1491
- {
1492
- "type": "ref_text",
1493
- "bbox": [
1494
- 0.504,
1495
- 0.435,
1496
- 0.892,
1497
- 0.517
1498
- ],
1499
- "angle": 0,
1500
- "content": "[12] I. J. Goodfellow, D. Erhan, P. L. Carrier, A. Courville, M. Mirza, B. Hammer, W. Cukierski, Y. Tang, D. Thaler, D.-H. Lee, et al. Challenges in representation learning: A report on three machine learning contests. In International Conference on Neural Information Processing, pages 117-124. Springer, 2013."
1501
- },
1502
- {
1503
- "type": "ref_text",
1504
- "bbox": [
1505
- 0.504,
1506
- 0.519,
1507
- 0.892,
1508
- 0.574
1509
- ],
1510
- "angle": 0,
1511
- "content": "[13] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016."
1512
- },
1513
- {
1514
- "type": "ref_text",
1515
- "bbox": [
1516
- 0.504,
1517
- 0.576,
1518
- 0.892,
1519
- 0.631
1520
- ],
1521
- "angle": 0,
1522
- "content": "[14] S. Jyoti, G. Sharma, and A. Dhall. Expression empowered resident network for facial action unit detection. In 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pages 1-8. IEEE, 2019."
1523
- },
1524
- {
1525
- "type": "ref_text",
1526
- "bbox": [
1527
- 0.504,
1528
- 0.633,
1529
- 0.892,
1530
- 0.687
1531
- ],
1532
- "angle": 0,
1533
- "content": "[15] S. Li and W. Deng. Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Transactions on Image Processing, 28(1):356-370, 2018."
1534
- },
1535
- {
1536
- "type": "ref_text",
1537
- "bbox": [
1538
- 0.504,
1539
- 0.689,
1540
- 0.892,
1541
- 0.757
1542
- ],
1543
- "angle": 0,
1544
- "content": "[16] S. Li, W. Deng, and J. Du. Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2852-2861, 2017."
1545
- },
1546
- {
1547
- "type": "ref_text",
1548
- "bbox": [
1549
- 0.504,
1550
- 0.759,
1551
- 0.892,
1552
- 0.815
1553
- ],
1554
- "angle": 0,
1555
- "content": "[17] Y. Li, J. Zeng, S. Shan, and X. Chen. Occlusion aware facial expression recognition using CNN with attention mechanism. IEEE Transactions on Image Processing, 28(5):2439-2450, 2018."
1556
- },
1557
- {
1558
- "type": "ref_text",
1559
- "bbox": [
1560
- 0.504,
1561
- 0.817,
1562
- 0.892,
1563
- 0.872
1564
- ],
1565
- "angle": 0,
1566
- "content": "[18] Y. Li, J. Zeng, S. Shan, and X. Chen. Patch-gated CNN for occlusion-aware facial expression recognition. In 2018 24th International Conference on Pattern Recognition (ICPR), pages 2209-2214. IEEE, 2018."
1567
- },
1568
- {
1569
- "type": "ref_text",
1570
- "bbox": [
1571
- 0.504,
1572
- 0.874,
1573
- 0.892,
1574
- 0.901
1575
- ],
1576
- "angle": 0,
1577
- "content": "[19] M. Liu, S. Li, S. Shan, and X. Chen. AU-aware deep networks for facial expression recognition. In IEEE Interna"
1578
- },
1579
- {
1580
- "type": "list",
1581
- "bbox": [
1582
- 0.504,
1583
- 0.093,
1584
- 0.892,
1585
- 0.901
1586
- ],
1587
- "angle": 0,
1588
- "content": null
1589
- }
1590
- ],
1591
- [
1592
- {
1593
- "type": "ref_text",
1594
- "bbox": [
1595
- 0.111,
1596
- 0.093,
1597
- 0.468,
1598
- 0.12
1599
- ],
1600
- "angle": 0,
1601
- "content": "tional Conference on Automatic Face & Gesture Recognition Workshops, pages 1-6, 2013."
1602
- },
1603
- {
1604
- "type": "ref_text",
1605
- "bbox": [
1606
- 0.08,
1607
- 0.123,
1608
- 0.469,
1609
- 0.191
1610
- ],
1611
- "angle": 0,
1612
- "content": "[20] M. Liu, S. Shan, R. Wang, and X. Chen. Learning expressionlets on spatio-temporal manifold for dynamic facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1749-1756, 2014."
1613
- },
1614
- {
1615
- "type": "ref_text",
1616
- "bbox": [
1617
- 0.08,
1618
- 0.195,
1619
- 0.469,
1620
- 0.25
1621
- ],
1622
- "angle": 0,
1623
- "content": "[21] P. Liu, S. Han, Z. Meng, and Y. Tong. Facial expression recognition via a boosted deep belief network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1805-1812, 2014."
1624
- },
1625
- {
1626
- "type": "ref_text",
1627
- "bbox": [
1628
- 0.08,
1629
- 0.254,
1630
- 0.469,
1631
- 0.322
1632
- ],
1633
- "angle": 0,
1634
- "content": "[22] P. Liu, J. T. Zhou, I. W.-H. Tsang, Z. Meng, S. Han, and Y. Tong. Feature disentangling machine-a novel approach of feature selection and disentangling in facial expression analysis. In European Conference on Computer Vision (ECCV), pages 151-166. 2014."
1635
- },
1636
- {
1637
- "type": "ref_text",
1638
- "bbox": [
1639
- 0.08,
1640
- 0.325,
1641
- 0.469,
1642
- 0.407
1643
- ],
1644
- "angle": 0,
1645
- "content": "[23] P. Lucey, J. F. Cohn, T. Kanade, J. Saragih, Z. Ambadar, and I. Matthews. The extended cohn-kanade dataset (CK+): A complete dataset for action unit and emotion-specified expression. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 94–101. IEEE, 2010."
1646
- },
1647
- {
1648
- "type": "ref_text",
1649
- "bbox": [
1650
- 0.08,
1651
- 0.411,
1652
- 0.469,
1653
- 0.465
1654
- ],
1655
- "angle": 0,
1656
- "content": "[24] Z. Luo, J. Hu, and W. Deng. Local subclass constraint for facial expression recognition in the wild. In 2018 24th International Conference on Pattern Recognition (ICPR), pages 3132-3137. IEEE, 2018."
1657
- },
1658
- {
1659
- "type": "ref_text",
1660
- "bbox": [
1661
- 0.08,
1662
- 0.469,
1663
- 0.469,
1664
- 0.524
1665
- ],
1666
- "angle": 0,
1667
- "content": "[25] A. Mollahosseini, B. Hasani, and M. H. Mahoor. Affectnet: A database for facial expression, valence, and arousal computing in the wild. IEEE Transactions on Affective Computing, 10(1):18-31, 2017."
1668
- },
1669
- {
1670
- "type": "ref_text",
1671
- "bbox": [
1672
- 0.08,
1673
- 0.527,
1674
- 0.469,
1675
- 0.582
1676
- ],
1677
- "angle": 0,
1678
- "content": "[26] B. Pan, S. Wang, and B. Xia. Occluded facial expression recognition enhanced through privileged information. In Proceedings of the 27th ACM International Conference on Multimedia, pages 566-573, 2019."
1679
- },
1680
- {
1681
- "type": "ref_text",
1682
- "bbox": [
1683
- 0.08,
1684
- 0.585,
1685
- 0.469,
1686
- 0.655
1687
- ],
1688
- "angle": 0,
1689
- "content": "[27] A. Paszke, S. Gross, F. Massa, A. Lerer, J. Bradbury, G. Chanan, T. Killeen, Z. Lin, N. Gimelshein, L. Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, pages 8024-8035, 2019."
1690
- },
1691
- {
1692
- "type": "ref_text",
1693
- "bbox": [
1694
- 0.08,
1695
- 0.657,
1696
- 0.469,
1697
- 0.726
1698
- ],
1699
- "angle": 0,
1700
- "content": "[28] C. Sagonas, G. Tzimiropoulos, S. Zafeiriou, and M. Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCVW), pages 397-403, 2013."
1701
- },
1702
- {
1703
- "type": "ref_text",
1704
- "bbox": [
1705
- 0.08,
1706
- 0.729,
1707
- 0.469,
1708
- 0.771
1709
- ],
1710
- "angle": 0,
1711
- "content": "[29] K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. International Conference on Learning Representations (ICLR), 2015."
1712
- },
1713
- {
1714
- "type": "ref_text",
1715
- "bbox": [
1716
- 0.08,
1717
- 0.774,
1718
- 0.469,
1719
- 0.843
1720
- ],
1721
- "angle": 0,
1722
- "content": "[30] M. Valstar and M. Pantic. Induced disgust, happiness and surprise: an addition to the MMI facial expression database. In Proc. 3rd Intern. Workshop on EMOTION (satellite of LREC): Corpora for Research on Emotion and Affect, page 65. Paris, France, 2010."
1723
- },
1724
- {
1725
- "type": "ref_text",
1726
- "bbox": [
1727
- 0.08,
1728
- 0.846,
1729
- 0.469,
1730
- 0.9
1731
- ],
1732
- "angle": 0,
1733
- "content": "[31] K. Wang, X. Peng, J. Yang, D. Meng, and Y. Qiao. Region attention networks for pose and occlusion robust facial expression recognition. IEEE Transactions on Image Processing, 2020."
1734
- },
1735
- {
1736
- "type": "list",
1737
- "bbox": [
1738
- 0.08,
1739
- 0.093,
1740
- 0.469,
1741
- 0.9
1742
- ],
1743
- "angle": 0,
1744
- "content": null
1745
- },
1746
- {
1747
- "type": "ref_text",
1748
- "bbox": [
1749
- 0.503,
1750
- 0.093,
1751
- 0.892,
1752
- 0.147
1753
- ],
1754
- "angle": 0,
1755
- "content": "[32] Y. Wen, K. Zhang, Z. Li, and Y. Qiao. A discriminative feature learning approach for deep face recognition. In European Conference on Computer Vision (ECCV), pages 499-515. Springer, 2016."
1756
- },
1757
- {
1758
- "type": "ref_text",
1759
- "bbox": [
1760
- 0.503,
1761
- 0.149,
1762
- 0.892,
1763
- 0.204
1764
- ],
1765
- "angle": 0,
1766
- "content": "[33] H. Yang, U. Ciftci, and L. Yin. Facial expression recognition by de-expression residue learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2168-2177, 2018."
1767
- },
1768
- {
1769
- "type": "ref_text",
1770
- "bbox": [
1771
- 0.503,
1772
- 0.206,
1773
- 0.892,
1774
- 0.261
1775
- ],
1776
- "angle": 0,
1777
- "content": "[34] J. Yu, Z. Lin, J. Yang, X. Shen, X. Lu, and T. S. Huang. Generative image inpainting with contextual attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5505-5514, 2018."
1778
- },
1779
- {
1780
- "type": "ref_text",
1781
- "bbox": [
1782
- 0.503,
1783
- 0.263,
1784
- 0.892,
1785
- 0.318
1786
- ],
1787
- "angle": 0,
1788
- "content": "[35] J. Zeng, S. Shan, and X. Chen. Facial expression recognition with inconsistently annotated datasets. In Proceedings of the European Conference on Computer Vision (ECCV), pages 222-237, 2018."
1789
- },
1790
- {
1791
- "type": "ref_text",
1792
- "bbox": [
1793
- 0.503,
1794
- 0.32,
1795
- 0.892,
1796
- 0.374
1797
- ],
1798
- "angle": 0,
1799
- "content": "[36] F. Zhang, T. Zhang, Q. Mao, and C. Xu. Joint pose and expression modeling for facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3359-3368, 2018."
1800
- },
1801
- {
1802
- "type": "ref_text",
1803
- "bbox": [
1804
- 0.503,
1805
- 0.376,
1806
- 0.892,
1807
- 0.431
1808
- ],
1809
- "angle": 0,
1810
- "content": "[37] K. Zhang, Z. Zhang, Z. Li, and Y. Qiao. Joint face detection and alignment using multitask cascaded convolutional networks. IEEE Signal Processing Letters, 23(10):1499-1503, 2016."
1811
- },
1812
- {
1813
- "type": "ref_text",
1814
- "bbox": [
1815
- 0.503,
1816
- 0.433,
1817
- 0.892,
1818
- 0.473
1819
- ],
1820
- "angle": 0,
1821
- "content": "[38] G. Zhao, X. Huang, M. Taini, S. Z. Li, and M. PietikäInen. Facial expression recognition from near-infrared videos. Image and Vision Computing, 29(9):607-619, 2011."
1822
- },
1823
- {
1824
- "type": "ref_text",
1825
- "bbox": [
1826
- 0.503,
1827
- 0.475,
1828
- 0.892,
1829
- 0.543
1830
- ],
1831
- "angle": 0,
1832
- "content": "[39] L. Zhong, Q. Liu, P. Yang, B. Liu, J. Huang, and D. N. Metaxas. Learning active facial patches for expression analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2562-2569, 2012."
1833
- },
1834
- {
1835
- "type": "list",
1836
- "bbox": [
1837
- 0.503,
1838
- 0.093,
1839
- 0.892,
1840
- 0.543
1841
- ],
1842
- "angle": 0,
1843
- "content": null
1844
- }
1845
- ]
1846
- ]
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb5bdf9307e51656347df2d0245050747c94f126b58655b2ed9cf52f4b433a2
3
+ size 78731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_06xxx/2005.06040/full.md CHANGED
@@ -1,279 +1,3 @@
1
- # Occlusion-Adaptive Deep Network for Robust Facial Expression Recognition
2
-
3
- Hui Ding, Peng Zhou, and Rama Chellappa
4
-
5
- University of Maryland, College Park
6
-
7
- # Abstract
8
-
9
- Recognizing the expressions of partially occluded faces is a challenging computer vision problem. Previous expression recognition methods, either overlooked this issue or resolved it using extreme assumptions. Motivated by the fact that the human visual system is adept at ignoring the occlusion and focus on non-occluded facial areas, we propose a landmark-guided attention branch to find and discard corrupted features from occluded regions so that they are not used for recognition. An attention map is first generated to indicate if a specific facial part is occluded and guide our model to attend to non-occluded regions. To further improve robustness, we propose a facial region branch to partition the feature maps into non-overlapping facial blocks and task each block to predict the expression independently. This results in more diverse and discriminative features, enabling the expression recognition system to recover even though the face is partially occluded. Depending on the synergistic effects of the two branches, our occlusion-adaptive deep network significantly outperforms state-of-the-art methods on two challenging in-the-wild benchmark datasets and three real-world occluded expression datasets.
10
-
11
- # 1. Introduction
12
-
13
- Facial expressions play an important role in social communication in our daily life. In recent years, automatically recognizing expression has received increasing attention due to its wide applications, including driver safety, health care, video conferencing, virtual reality, and cognitive science etc.
14
-
15
- Existing methods that address expression recognition can be divided into two categories. One category utilized synthesis techniques to facilitate discriminative feature learning [33, 36, 3, 5]; While the other tried to boost the performance by designing new loss functions or network architectures [16, 35, 6, 1]. In the first category, de-expression residue learning proposed in [33] leveraged the neutral face images to distill the expression information from the cor
16
-
17
- responding expressive images. Zhang et al. [36] explored an adversarial autoencoder to generate facial images with different expressions under arbitrary poses to enlarge the training set. However, those works mainly focus on lab-collected datasets captured in controlled environments, such as $\mathrm{CK + }$ [23], MMI [30] and OULU-CASIA [38]. Although high accuracy results have been obtained on these datasets, they perform poorly when recognizing facial expressions in-the-wild. In the second category, Li et al. [16] proposed a locality preserving loss to enhance deep features by preserving the locality closeness measure while maximizing the inter-class scatters. To address the annotation inconsistencies among different facial expression datasets, Zeng et al. [35] introduced a probability transition layer to recover the latent truths from noisy labels. Although expression datasets under natural and uncontrollable conditions are explored, facial expression recognition under partial occlusions is still a challenging problem that has been relatively unexplored. In real-life images or videos, facial occlusions can often be observed, e.g., facial accessories including sunglasses, scarves, and masks or other random objects like hands, hairs and cups.
18
-
19
- Recently, some related works have been proposed to solve this challenge. Patch-gated Convolutional Neural Network [18] decomposed a face into different patches and explicitly predicted the occlusion likelihood of the corresponding patch using a patch-gated unit. Wang et al. [31] proposed a self-attention scheme to learn the importance weights for multiple facial regions. However, the unobstructed scores are learned without any ground truth on the occlusion information and may be biased. In this work, we present an Occlusion-Adaptive Deep Network (OADN) to overcome the occlusion problem for robust facial expression recognition in-the-wild. It consists of two branches: a landmark-guided attention branch and a facial region branch.
20
-
21
- The landmark-guided attention branch is proposed to discard feature elements that have been corrupted by occlusions. The interest points covering the most distinctive facial areas for facial expression recognition are computed
22
-
23
- based on the domain knowledge. Then the meta information of these points is utilized to generate the attention maps. The global features are modulated by the attention maps to guide the model to focus on the non-occluded facial regions and filter out the occluded regions.
24
-
25
- To further enhance robustness and learn complementary context information, we introduce a facial region branch to train multiple region-based expression classifiers. This is achieved by first partitioning the global feature maps into non-overlapping facial blocks. Then each block is trained by backpropogating the recognition loss independently. Thus even when the face is partially occluded, the classifiers from other non-occluded regions are still able to function properly. Furthermore, since the expression datasets are usually small, having multiple region-based classifiers adds more supervision and acts as a regularizer to alleviate the overfitting issue.
26
-
27
- The main contributions in this work are summarized as follows:
28
-
29
- - We propose OADN, an effective method to deal with the occlusion problem for facial expression recognition in-the-wild.
30
- - We introduce a landmark-guided attention branch to guide the network to attend to non-occluded regions for representation learning.
31
- - We design a facial region branch to learn region-based classifiers for complementary context features and further increasing the robustness.
32
- - Experimental results on five challenging benchmark datasets show that our proposed OADN obtains significantly better performance than existing methods.
33
-
34
- # 2. Related Work
35
-
36
- # 2.1. Deep Facial Expression Recognition
37
-
38
- Deep learning methods [39, 19, 22, 21, 20, 33, 5, 36, 16, 6, 35, 24, 1] for facial expression recognition have achieved great success in the past few years. Based on the assumption that a facial expression is the combination of a neutral face image and the expressive component, Yang et al. [33] proposed a de-expression residue learning to learn the residual expressive component in a generative model. To reduce the inter-subject variations, Cai et al. [5] introduced an identity-free generative adversarial network [11] to generate an average identity face image while keep the expression unchanged. Considering the pose variation, Zhang et al. [36] leveraged an adversarial autoencoder to augment the training set with face images under different expression and poses. However, these methods mainly focused on datasets captured in controlled environments, where the facial images are near frontal. Thus the models generalize poorly
39
-
40
- when recognizing human expressions under natural and uncontrollable variations.
41
-
42
- Another line of work focused on designing advanced network architectures [1] or loss functions [16, 6, 35, 24]. Li et al. [16] proposed a deep locality-preserving Convolutional Neural Network, which preserved the local proximity by minimizing the distance to K-nearest neighbors within the same class. Building on this, Cai et al. [6] further introduced an island loss to simultaneously reduce intraclass variations and augment inter-class differences. Zeng et al. [35] studied the annotation error and bias problem among different facial expression datasets. Each image is predicted with multiple pseudo labels and a model is learned to fit the latent truth from these inconsistent labels. Acharya et al. [1] explored a covariance pooling layer to better capture the distortions in regional facial features and temporal evolution of per-frame features. Although the aforementioned approaches achieve good performance on data in the wild, facial expression recognition is still challenging due to the existence of partially occluded faces.
43
-
44
- # 2.2. Occlusive Facial Expression Recognition
45
-
46
- Recently, there are some works starting to investigate the occlusion issue. Li et al. [17] proposed a gate unit to enable the model to shift attention from the occluded patches to other visible facial regions. The gate unit estimates how informative a face patch is through an attention net, then the features are modulated by the learned weights. Similarly, region attention network [31] cropped multiple face regions and utilized a self-attention based model to learn an important weight for each region. However, the self-attention based methods lack additional supervision to ensure the functionality. Thus, the network may not be able to locate these non-occluded facial regions accurately under large occlusions and poses.
47
-
48
- # 3. Occlusion Adaptive Deep Network
49
-
50
- To this end, we propose OADN for robust facial expression recognition in-the-wild. To be specific, we use ResNet50 [13] without the average pooling layer and the fully connected layer as the backbone to extract global feature maps from given images. We set the stride of conv4_1 to be 1, so a larger feature map is obtained. For an input image with height $H$ and width $W$ , the resolution of the output feature $F$ will be $H / 16 \times W / 16$ instead of $H / 32 \times W / 32$ . This is beneficial to identify the occlusion information and focus on the visible facial regions.
51
-
52
- As illustrated in Figure 1, OADN mainly consists of two branches: one is the landmark-guided attention branch, which utilizes a landmark detector to locate the landmarks and to guide the network to attend to the non-occluded facial areas. The other one is the facial region branch which divides the global feature maps into blocks and utilizes
53
-
54
- ![](images/6cf5afb4cd07944180064478fb35a61cbf2c6315e00d563af784260909b3a106.jpg)
55
- Figure 1. Pipeline of the Occlusion Adaptive Deep Network. It consists of two branches: a Landmark-guided Attention Branch and a Facial Region Branch. The ResNet50 backbone is shared between the two branches to extract the global features. For the Landmark-guided Attention Branch, the facial landmarks are first detected. Then the interest points are computed to cover the most informative facial areas. The confidence scores of these points are further utilized to generate the attention maps, guiding the model to attend to the visible facial components. For the Facial Region Branch, the feature maps are divided into non-overlapping facial blocks and each block is trained to be a discriminative expression classifier on its own.
56
-
57
- region-based classifiers to increase robustness. We describe each branch and the structural relationship among the two branches below.
58
-
59
- ![](images/d543ef302c21c60a57d610862d9786cca14c19e675d2c703d88b0bd199d88ed0.jpg)
60
- (a) Original 68 detected landmarks
61
-
62
- ![](images/e15278b81833ae8d71cbdadd0fd89c9c673dc5cafd9fd5680bd33f1832a38b10.jpg)
63
-
64
- ![](images/b2c94855bdf1766d4df9ba7902f61618b1eb63a9fe857ddaf3982acef549b5b2.jpg)
65
- (b) Recomputed 24 points
66
- Figure 2. We select 16 points from the original 68 landmarks (a) to cover the regions around eyes, eyebrows, nose and mouth. We further recompute 8 points (b) to cover facial cheeks and the areas between eyes and eyebrows.
67
-
68
- # 3.1. Landmark-guided Attention Branch
69
-
70
- OADN employs a facial landmark detector [9] to obtain landmarks from face images. The landmark detector is pretrained on the 300W dataset [28]. Given an input image, OADN utilizes the detector to extract $N = 68$ landmarks. For each landmark, the detector predicts its coordinates and confidence score. Then based on the detected 68 points, we select or recompute $M = 24$ interest points that cover the distinctive regions of face, including the eyes, nose, mouth and cheeks. Figure 2 illustrates the computation results. For those recomputed points (mainly around eyes and cheeks),
71
-
72
- we set their confidence scores to be the minimum confidence score of landmark points that used to compute them. To remove the occluded facial regions, we set a threshold $T$ to filter out the landmarks that have confidence scores smaller than $T$ . Specifically, the interest points are obtained by:
73
-
74
- $$
75
- p _ {i} = \left\{ \begin{array}{l l} \left(x _ {i}, y _ {i}\right) & \text {i f} s _ {i} ^ {\text {c o n f}} \geq T \\ 0 & \text {e l s e} \end{array} \right. \tag {1}
76
- $$
77
-
78
- where $p_i$ denotes the $i$ th interest point, and $x_i, y_i$ denote the coordinates of the $i$ th point. $s_i$ is the confidence score ranged from 0 to 1 and $T$ is the threshold.
79
-
80
- We then generate the attention heatmaps consisting of a 2D Gaussian distribution, where the centers are the ground truth locations of the visible landmarks. For those occluded landmarks, the corresponding attention maps are set to zero. We further downsample the attention maps by linear interpolation to match the size of the output feature maps. As shown in Figure 1, the attention map $A_{i}$ modulates the global feature maps $F$ to obtain the re-weighted features $F_{i}^{A}$ . To achieve this, the feature map $F$ from the backbone is multiplied by each attention map $A_{i}$ , $i = 1,\dots,M$ element-wisely, resulting $M$ landmark-guided feature maps $F_{i}^{A}$ :
81
-
82
- $$
83
- F _ {i} ^ {A} = F \odot A _ {i}, i = 1, \dots , M \tag {2}
84
- $$
85
-
86
- where $A_{i}$ is the ith heatmap, and $\odot$ is element-wise product. Since the attention map indicates the visibility of each
87
-
88
- facial component, the landmark-guided feature map $F_{i}^{A}$ can attend to the non-occluded facial parts and remove the information from the occluded regions. Thus, the feature from the visible region is emphasized and occluded part is canceled.
89
-
90
- Then global average pooling is applied to each landmark-guided feature map $F_{i}^{A}$ to obtain a 2048- $D$ feature $f_{i}^{A}, i = 1, \dots, M$ , corresponding to the facial component containing the specific interest point. Finally, the component-wise feature $f_{i}^{A}$ is max-pooled to fuse features from the non-occluded facial areas. A fully-connected layer is further used to reduce the dimension from 2048 to 256, and the output is fed into a softmax layer to predict the expression category of each input face image. We utilize the cross-entropy loss to train the landmark-guided attention branch, which is expressed as follows:
91
-
92
- $$
93
- L _ {L A B} = - \sum_ {i = 1} ^ {C} y _ {i} \log \hat {y} _ {i} \tag {3}
94
- $$
95
-
96
- where $\hat{y}_i$ is the prediction, $y_i$ is the ground truth and $C$ is the number of expression classes.
97
-
98
- # 3.2. Facial Region Branch
99
-
100
- When the face is severely occluded, the landmark detection results may not be accurate. Since relying on the landmark-guided attention branch solely is not enough, OADN utilizes a Facial Region Branch (FRB) to learn useful context information and further increase the robustness.
101
-
102
- Given the global feature maps $F \in h \times w \times c$ , where $h, w, c$ are the height, width and channel dimensions, we first divide them into small $m \times n$ non-overlapping blocks. Each facial region feature $F_{i}^{R} \in m \times n \times c, i = 1, \dots, K$ where $K = \lceil \frac{h}{m} \rceil \cdot \lceil \frac{w}{n} \rceil$ is then fed into a global average pooling layer to obtain a region-level feature $f_{i}^{R}$ . Afterwards, a fully-connected layer is employed to reduce the dimension of $f_{i}^{R}$ from 2048 to 256. Finally, a softmax layer is applied to each region to obtain a set of predictions $y_{i}^{R}$ , where $i = 1, \dots, K$ .
103
-
104
- To train the facial region branch, we minimize the cross-entropy loss over the $K$ regions independently. Formally, the loss is expressed as:
105
-
106
- $$
107
- L _ {F R B} = - \sum_ {i = 1} ^ {C} \sum_ {j = 1} ^ {K} y _ {i} \log \hat {y} _ {i, j} ^ {R} \tag {4}
108
- $$
109
-
110
- where $K$ is the number of facial regions, $\hat{y}_{i,j}^{R}$ is the prediction of the $j$ th region, and $y_{i}$ is the ground truth expression category.
111
-
112
- To make an accurate prediction based on facial region only, OADN is required to learn more discriminative and diverse features at a finer-level. As a result, the partial occlusion will have a less effect on the network compared to a
113
-
114
- standard model. Moreover, the size of the expression recognition dataset is usually not very large. Training multiple region-based classifiers adds more supervision and reduces overfitting.
115
-
116
- # 3.3. Relationship between the Two Branches
117
-
118
- OADN is specifically designed to handle the occlusion problem for in-the-wild facial expression recognition. The landmark-guided attention branch explicitly guides the model to focus on non-occluded facial areas, learning a clean global feature. While the facial region branch promotes part-level features and enables the model to work robustly when the face is largely occluded. Combining the benefits from each branch, we train OADN using the following loss:
119
-
120
- $$
121
- L = \lambda L _ {L A B} + (1 - \lambda) L _ {F R B} \tag {5}
122
- $$
123
-
124
- where $\lambda$ is the loss combination weight. $L_{LAB}$ and $L_{FRB}$ are defined in Equation (3) and (4).
125
-
126
- # 4. Experiments
127
-
128
- # 4.1. Datasets
129
-
130
- We validate the effectiveness of our method on two largest in-the-wild expression datasets: RAF-DB [16] and AffectNet [25]. The in-the-wild datasets contain facial expression in real world with various poses, illuminations, intensities, and other uncontrolled conditions. We also evaluate our method on three recently proposed real-world occlusion datasets: Occlusion-AffectNet[31], Occlusion-FERPlus [31] and FED-RO [17]. The occlusions are diverse in color, shape, position and occlusion ratio.
131
-
132
- ![](images/9b4901edcfa7a1e2ca4e83b6efe49a12d0ba0dd3d9720c50393a2c24d3a418c1.jpg)
133
- Figure 3. The interest points with confidence scores greater than the threshold $T$ are shown in red points. We can see that occluded facial areas are removed.
134
-
135
- # 4.2. Implementation Details
136
-
137
- Preprocessing. The standard MTCNN [37] is used to detect five face landmarks for all the images. After performing a similarity transformation, we obtain the aligned face images and resize them to be $224 \times 224$ pixels. To detect landmarks from occluded images, we use SAN [9] pretrained on the 300W dataset [28] to get 68 face landmarks. We also try another landmark detector [4] and similar results are obtained. Then we select 18 points covering eyebrows, eyes, nose and mouth, and recompute eight points
138
-
139
- Table 1. Test set accuracy on RAF dataset
140
-
141
- <table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>86.90%</td></tr><tr><td>OADN(ours)</td><td>89.83%</td></tr><tr><td>ResiDen [14]</td><td>76.54%</td></tr><tr><td>ResNet-PL [26]</td><td>81.97%</td></tr><tr><td>PG-CNN [18]</td><td>83.27%</td></tr><tr><td>Center Loss [32]</td><td>83.68%</td></tr><tr><td>DLP-CNN [15]</td><td>84.13%</td></tr><tr><td>ALT [10]</td><td>84.50%</td></tr><tr><td>gACNN [17]</td><td>85.07%</td></tr><tr><td>OADN(ours)</td><td>87.16%</td></tr></table>
142
-
143
- from facial cheeks. The confidence scores of these recomputed points are the minimum scores of the points used to compute them. In all experiments, we set the threshold $T$ of the confidence score to be 0.6, thus landmarks with confidence scores smaller than 0.6 are removed. Figure 3 shows the computed interest points after thresholding. From it we can see that the occluded facial regions are discarded. Finally, we generate attention maps consisting of a Gaussian with the centers as the coordinates of the visible points. For those occluded points, the attention maps are all zeros. We resize the attention maps to be $14 \times 14$ to match the size of the global feature maps $F$ .
144
-
145
- Training and Testing. We employ the ResNet50 as our backbone, removing the average pooling layer and the fully connected layer. We modify the stride of conv4_1 from 2 to 1, so a larger feature map with size $14 \times 14$ is obtained. We initialize the model with the weights pre-trained on ImageNet [8]. The mini-batch size is set to be 128, the momentum is 0.9, and the weight decay is 0.0005. The learning rate starts at 0.1, and decreased by 10 after 20 epochs. We train the model for a total of 60 epochs. Stochastic Gradient Descent (SGD) is adopted as the optimization algorithm. During training, only random flipping is used for data augmentation. For testing, a single image is used and the predication scores from the landmark-guided attention branch and the facial region branch are averaged to get the final prediction score. The settings are same for all the experiments. For evaluation, the total accuracy metric is adopted. Considering the imbalance of the expression classes, a confusion matrix is also employed to show the average class accuracy. The deep learning software Pytorch [27] is used to conduct the experiments. Upon publication, the codes and trained expression models will be made publicly available.
146
-
147
- # 4.3. Results Comparison
148
-
149
- RAF [16] contains 30,000 in-the-wild facial expression images, annotated with basic or compound expressions by forty independent human labelers. In this experiment, only images with seven basic expressions are used, including
150
-
151
- Table 2. Validation set accuracy on AffectNet dataset
152
-
153
- <table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>59.50%</td></tr><tr><td>OADN(ours)</td><td>64.06%</td></tr><tr><td>VGG16 [29]</td><td>51.11%</td></tr><tr><td>GAN-Inpainting [34]</td><td>52.97%</td></tr><tr><td>DLP-CNN [16]</td><td>54.47%</td></tr><tr><td>PG-CNN [18]</td><td>55.33%</td></tr><tr><td>ResNet-PL [26]</td><td>56.42%</td></tr><tr><td>gACNN [17]</td><td>58.78%</td></tr><tr><td>OADN(ours)</td><td>61.89%</td></tr></table>
154
-
155
- 12,271 for training and 3,068 for testing.
156
-
157
- Table 1 shows the results of our method and previous works. Our OADN achieves $87.16\%$ in terms of total accuracy on the test set, outperforming all the previous methods. Compared with the strongest competing method in the same setting gACNN [17], OADN surpasses it by $2.1\%$ . This is because OADN explicitly utilizes the meta information of landmarks to depress the noisy information from the occluded regions and enhances the robustness with multiple region-based classifiers. To have a fair comparison with [31], we also pre-trained our model on a large-scale face recognition dataset VGGFace2 [7]. OADN achieves a new state-of-the-art result with an accuracy of $89.83\%$ to the best of our knowledge, outperforming RAN by $2.93\%$ . This validates the superiority of the proposed method.
158
-
159
- ![](images/c6f240893a6796b93dc549574ae64107e182416e57a519fcd99862dca836f875.jpg)
160
- Figure 4. Confusion matrix for RAF-DB dataset. The darker the color, the higher the accuracy.
161
-
162
- We show the confusion matrix in Figure 4. It is observed that Fear and Disgust are the two most confusing expression, where Fear is easily confused with Surprise because
163
-
164
- Table 3. Validation set accuracy on Occlusion-AffectNet and Pose-AffectNet dataset
165
-
166
- <table><tr><td>Method</td><td>Occ. Acc.</td><td>Pose&gt;30 Acc.</td><td>Pose&gt;45 Acc.</td></tr><tr><td>RAN [31]</td><td>58.50%</td><td>53.90%</td><td>53.19%</td></tr><tr><td>OADN(ours)</td><td>64.02%</td><td>61.12%</td><td>61.08%</td></tr></table>
167
-
168
- of similar facial appearance while Disgust is mainly confused by Neutral due to the subtleness of the expression.
169
-
170
- ![](images/9372ee4566d1fd1ed3d1cd150413fa463037b882f8c2e60391928dde4e6e69d1.jpg)
171
- Figure 5. Confusion matrix for Affectnet dataset. The darker the color, the higher the accuracy.
172
-
173
- AffectNet [25] is currently the largest expression dataset. There are about 400,000 images manually annotated with seven discrete facial expressions and the intensity of valence and arousal. Following the experiment setting in [17], we only used images with neutral and six basic emotions, containing 280,000 images for training and 3,500 images from the validation set for testing since the test set is not publicly available. Very recently, Wang et al. [31] released the Occlusion-AffectNet and Pose-AffectNet datasets where only images with challenging conditions are selected as the test sets. For the Occlusion-Affectnet, each image is occluded with at least one type of occlusion: wearing mask, wearing glasses, etc. There are a total of 682 images. For the Pose-AffectNet, images with pose degrees larger than 30 and 45 are collected. The number of images are 1,949 and 985, respectively.
174
-
175
- As shown in Table 3, OADN achieves the best performance with an accuracy of $61.89\%$ on the validation set. Compared to the strongest competing method in the same setting gACNN [17], OADN surpasses it by $3.1\%$ , which
176
-
177
- Table 4. Test set accuracy on FED-RO dataset
178
-
179
- <table><tr><td>Method</td><td>Average Accuracy</td></tr><tr><td>RAN [31]</td><td>67.98%</td></tr><tr><td>OADN(ours)</td><td>71.17%</td></tr><tr><td>VGG16 [29]</td><td>51.11%</td></tr><tr><td>ResNet18 [13]</td><td>64.25%</td></tr><tr><td>GAN-Inpainting [34]</td><td>58.33%</td></tr><tr><td>DLP-CNN [16]</td><td>60.31%</td></tr><tr><td>PG-CNN [18]</td><td>64.25%</td></tr><tr><td>gACNN [17]</td><td>66.50%</td></tr><tr><td>OADN(ours)</td><td>68.11%</td></tr></table>
180
-
181
- is a reasonable improvement. OADN also outperforms RAN [31] by $4.56\%$ , when both are pre-trained on a large-scale face recognition dataset. On the Occlusion-AffectNet and Pose-AffectNet datasets, the performance gap between OADN and RAN is further increased. As a comparison, OADN exceeds RAN by $5.52\%$ , $7.22\%$ and $7.89\%$ on the test sets with occlusion, pose degree greater than 30 and 45, respectively. This validates the effectiveness of the proposed method for the occluded facial expression recognition problem. The confusion matrix is shown in Figure 5. From it we can find both Disgust and Anger are the most difficult expressions to classify.
182
-
183
- FED-RO [17] is a recently released facial expression dataset with real world occlusions. Each image has natural occlusions including sunglasses, medical mask, hands and hair. It contains 400 images labeled with seven expressions for testing. We train our model on the joint training data of RAF and AffectNet, following the protocol suggested in [17].
184
-
185
- As shown in Table 4, OADN achieves the best performance with an accuracy of $68.11\%$ , improving over gACNN by $1.61\%$ . OADN also outperforms RAN by $3.19\%$ when pre-trained on a face recognition dataset. From the confusion matrix shown in Figure 6, we can see both Surprise and Happy have high accuracy, while Fear and Disgust are easily confused with Surprise and Sad.
186
-
187
- FERPlus [2] is a real-world facial expression dataset initially introduced in ICML 2013 Challenge [12]. It consists of 28,709 training images, 3,589 validation images and 3,589 test images. Each image is labeled with one of the eight expressions by 10 independent taggers. Recently, Wang et al. [31] released the Occlusion-FERPlus and Pose-FERPlus datasets, where images under occlusion and large pose ( $>30$ and $>45$ ) are collected from the FERPlus test sets. The Occlusion-FERPlus has a total number of 605 images, while Pose-FERPlus has 1,171 and 634 images with pose larger than 30 and 45 degrees, respectively. Following [31], we trained our model on the training data of FERPlus and test on these challenging datasets.
188
-
189
- Table 5 reports the test accuracy. The OADN sig
190
-
191
- ![](images/d8f3bcd34e5663c23177191284fb51262210fbf408bb65a694e0aea4bfaf2a04.jpg)
192
- Figure 6. Confusion matrix for FED-RO dataset. The darker the color, the higher the accuracy.
193
-
194
- Table 5. Test set accuracy on Occlusion-FERPlus and Pose-FERPlus dataset
195
-
196
- <table><tr><td>Method</td><td>Occ. Acc.</td><td>Pose&gt;30 Acc.</td><td>Pose&gt;45 Acc.</td></tr><tr><td>RAN [31]</td><td>83.63%</td><td>82.23%</td><td>80.40%</td></tr><tr><td>OADN(ours)</td><td>84.57%</td><td>88.52%</td><td>87.50%</td></tr></table>
197
-
198
- nificantly surpasses RAN by a large margin with $6.29\%$ and $7.10\%$ improvements on the Pose-FERPlus datasets. OADN also achieves better performance on the Occlusion-FERPlus dataset. This validates the effectiveness of our method for recognizing facial expressions under challenging conditions.
199
-
200
- # 4.4. Ablation Study
201
-
202
- In this section, we conduct ablation studies on the RAF dataset to analyze each component of OADN.
203
-
204
- The impact of the landmark confidence threshold $T$ . The confidence scores of the interest points are utilized to select the interest points from non-occluded facial areas. From Equation (1), points with confidence scores higher than $T$ are kept. We can see from Figure 7 (a) that with $T = 0.6$ , OADN achieves the best performance. When $T$ is further increased, the performance drops quickly since some important facial areas which may not be occluded are also thrown away. On the other hand, when $T$ becomes less than 0.6, OADN starts to perform worse. This is because noisy information from the occluded areas are also included, which degrades the clean features.
205
-
206
- The impact of the number of regions $K$ . In the facial
207
-
208
- ![](images/b7475d30cf4d387aacd8ac721b475389e30f4be18dfeed10ef002a0b259741ec.jpg)
209
- Figure 7. The impacts of the confidence threshold $T$ , number of regions $K$ and the loss combination weight $\lambda$ on the performance of OADN.
210
-
211
- ![](images/ac689a519a42f4017274d0ba7ceac4dcdc87f6fd078ee448ecaa1e2287776b26.jpg)
212
-
213
- ![](images/b7af8bddaec2afc2f3f8d609bd4633f5b368144be64f1e214d6d4a7d36a8a453.jpg)
214
-
215
- region branch, we partition the global feature maps into $K$ blocks and train an expression classifier from each block independently. So $K$ decides the granularity of the part-level features. From Figure 7 (b), it is observed that the best accuracy is achieved at $K = 4$ . When $K = 1$ , the facial region branch equals to the standard ResNet50 classifier. The worse performance indicates the necessity to learn features at part-level. However, increasing $K$ to be a large number like 16 does not bring further increasement. This is because when the facial region is too small, it lacks enough information to make the prediction due to the occlusion. Thus the classifiers are confused and the training is stagnated.
216
-
217
- The impact of the loss combination weight $\lambda$ . To train OADN, we jointly optimize the loss from the landmark-guided attention branch (LAB) and the facial region branch (FRB) as defined in Equation (5). The loss weight $\lambda$ controls the relative importance of each loss. When $\lambda$ equals 1, only LAB is utilized. While $\lambda = 0$ means only FRB is used. From Figure 7 (c), we can find that LAB obtains better performance since the network is guided to attend to the most discriminative facial areas. While combining the two branches achieves better performance than using either one branch alone. This validates the effectiveness of the complementary features learned by the two branches.
218
-
219
- # 4.5. Visualization
220
-
221
- Figure 8 shows some expression recognition examples of the gACNN [17] and our OADN method on the FEDRO dataset. The classification results show that gACNN is vulnerable to large head poses and heavy facial occlusions. On the contrary, the OADN can work successfully under these conditions.
222
-
223
- ![](images/04e3dab619a8e5a2046a56eb56b1d1e6c0eaf03c0ba0d7ddd77b2fa9c8469ea8.jpg)
224
- Figure 8. Comparison of the gACNN method and our OADN method on the FED-RO dataset. Red and green texts indicate the error and correct predictions.
225
-
226
- # 5. Conclusions
227
-
228
- In this paper, we present an occlusion-adaptive deep network to tackle the occluded facial expression recognition problem. The network is composed of two branches: the landmark-guided attention branch guides the network to learn clean features from the non-occluded facial areas. The facial region branch increases the robustness by dividing the last convolutional layer into several part classifiers. We conduct extensive experiments on both challenging in-the-wild expression datasets and real-world occluded expression datasets. The results show that our method outperforms existing methods and achieves robustness against occlusion and various poses.
229
-
230
- # 6. Acknowledgement
231
-
232
- This research is based upon work supported by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via IARPA R&D Contract No. 2019-022600002. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies or endorsements, either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes notwithstanding any copyright annotation thereon.
233
-
234
- # References
235
-
236
- [1] D. Acharya, Z. Huang, D. Pani Paudel, and L. Van Gool. Covariance pooling for facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 367-374, 2018.
237
- [2] E. Barsoum, C. Zhang, C. C. Ferrer, and Z. Zhang. Training deep networks for facial expression recognition with crowdsourced label distribution. In Proceedings of the 18th ACM International Conference on Multimodal Interaction, pages 279-283, 2016.
238
- [3] B. Bozortabar, M. S. Rad, H. K. Ekenel, and J.-P. Thiran. Using photorealistic face synthesis and domain adaptation to improve facial expression analysis. In 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pages 1-8. IEEE, 2019.
239
- [4] A. Bulat and G. Tzimiropoulos. How far are we from solving the 2D & 3D face alignment problem? (and a dataset of 230,000 3D facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 1021-1030, 2017.
240
- [5] J. Cai, Z. Meng, A. S. Khan, Z. Li, J. O'Reilly, and Y. Tong. Identity-free facial expression recognition using conditional generative adversarial network. arXiv preprint arXiv:1903.08051, 2019.
241
- [6] J. Cai, Z. Meng, A. S. Khan, Z. Li, J. OReilly, and Y. Tong. Island loss for learning discriminative features in facial ex
242
-
243
- pression recognition. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 302-309. IEEE, 2018.
244
- [7] Q. Cao, L. Shen, W. Xie, O. M. Parkhi, and A. Zisserman. Vggface2: A dataset for recognising faces across pose and age. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pages 67-74. IEEE, 2018.
245
- [8] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. Imagenet: A large-scale hierarchical image database. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255. IEEE, 2009.
246
- [9] X. Dong, Y. Yan, W. Ouyang, and Y. Yang. Style aggregated network for facial landmark detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 379-388, 2018.
247
- [10] C. Florea, L. Florea, M. Badea, C. Vertan, and A. Racoviteanu. Annealed label transfer for face expression recognition. In British Machine Vision Conference (BMVC), 2019.
248
- [11] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, pages 2672–2680, 2014.
249
- [12] I. J. Goodfellow, D. Erhan, P. L. Carrier, A. Courville, M. Mirza, B. Hammer, W. Cukierski, Y. Tang, D. Thaler, D.-H. Lee, et al. Challenges in representation learning: A report on three machine learning contests. In International Conference on Neural Information Processing, pages 117-124. Springer, 2013.
250
- [13] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016.
251
- [14] S. Jyoti, G. Sharma, and A. Dhall. Expression empowered resident network for facial action unit detection. In 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pages 1-8. IEEE, 2019.
252
- [15] S. Li and W. Deng. Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Transactions on Image Processing, 28(1):356-370, 2018.
253
- [16] S. Li, W. Deng, and J. Du. Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2852-2861, 2017.
254
- [17] Y. Li, J. Zeng, S. Shan, and X. Chen. Occlusion aware facial expression recognition using CNN with attention mechanism. IEEE Transactions on Image Processing, 28(5):2439-2450, 2018.
255
- [18] Y. Li, J. Zeng, S. Shan, and X. Chen. Patch-gated CNN for occlusion-aware facial expression recognition. In 2018 24th International Conference on Pattern Recognition (ICPR), pages 2209-2214. IEEE, 2018.
256
- [19] M. Liu, S. Li, S. Shan, and X. Chen. AU-aware deep networks for facial expression recognition. In IEEE Interna
257
-
258
- tional Conference on Automatic Face & Gesture Recognition Workshops, pages 1-6, 2013.
259
- [20] M. Liu, S. Shan, R. Wang, and X. Chen. Learning expressionlets on spatio-temporal manifold for dynamic facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1749-1756, 2014.
260
- [21] P. Liu, S. Han, Z. Meng, and Y. Tong. Facial expression recognition via a boosted deep belief network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1805-1812, 2014.
261
- [22] P. Liu, J. T. Zhou, I. W.-H. Tsang, Z. Meng, S. Han, and Y. Tong. Feature disentangling machine-a novel approach of feature selection and disentangling in facial expression analysis. In European Conference on Computer Vision (ECCV), pages 151-166. 2014.
262
- [23] P. Lucey, J. F. Cohn, T. Kanade, J. Saragih, Z. Ambadar, and I. Matthews. The extended cohn-kanade dataset (CK+): A complete dataset for action unit and emotion-specified expression. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 94–101. IEEE, 2010.
263
- [24] Z. Luo, J. Hu, and W. Deng. Local subclass constraint for facial expression recognition in the wild. In 2018 24th International Conference on Pattern Recognition (ICPR), pages 3132-3137. IEEE, 2018.
264
- [25] A. Mollahosseini, B. Hasani, and M. H. Mahoor. Affectnet: A database for facial expression, valence, and arousal computing in the wild. IEEE Transactions on Affective Computing, 10(1):18-31, 2017.
265
- [26] B. Pan, S. Wang, and B. Xia. Occluded facial expression recognition enhanced through privileged information. In Proceedings of the 27th ACM International Conference on Multimedia, pages 566-573, 2019.
266
- [27] A. Paszke, S. Gross, F. Massa, A. Lerer, J. Bradbury, G. Chanan, T. Killeen, Z. Lin, N. Gimelshein, L. Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, pages 8024-8035, 2019.
267
- [28] C. Sagonas, G. Tzimiropoulos, S. Zafeiriou, and M. Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCVW), pages 397-403, 2013.
268
- [29] K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. International Conference on Learning Representations (ICLR), 2015.
269
- [30] M. Valstar and M. Pantic. Induced disgust, happiness and surprise: an addition to the MMI facial expression database. In Proc. 3rd Intern. Workshop on EMOTION (satellite of LREC): Corpora for Research on Emotion and Affect, page 65. Paris, France, 2010.
270
- [31] K. Wang, X. Peng, J. Yang, D. Meng, and Y. Qiao. Region attention networks for pose and occlusion robust facial expression recognition. IEEE Transactions on Image Processing, 2020.
271
-
272
- [32] Y. Wen, K. Zhang, Z. Li, and Y. Qiao. A discriminative feature learning approach for deep face recognition. In European Conference on Computer Vision (ECCV), pages 499-515. Springer, 2016.
273
- [33] H. Yang, U. Ciftci, and L. Yin. Facial expression recognition by de-expression residue learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2168-2177, 2018.
274
- [34] J. Yu, Z. Lin, J. Yang, X. Shen, X. Lu, and T. S. Huang. Generative image inpainting with contextual attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5505-5514, 2018.
275
- [35] J. Zeng, S. Shan, and X. Chen. Facial expression recognition with inconsistently annotated datasets. In Proceedings of the European Conference on Computer Vision (ECCV), pages 222-237, 2018.
276
- [36] F. Zhang, T. Zhang, Q. Mao, and C. Xu. Joint pose and expression modeling for facial expression recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3359-3368, 2018.
277
- [37] K. Zhang, Z. Zhang, Z. Li, and Y. Qiao. Joint face detection and alignment using multitask cascaded convolutional networks. IEEE Signal Processing Letters, 23(10):1499-1503, 2016.
278
- [38] G. Zhao, X. Huang, M. Taini, S. Z. Li, and M. PietikäInen. Facial expression recognition from near-infrared videos. Image and Vision Computing, 29(9):607-619, 2011.
279
- [39] L. Zhong, Q. Liu, P. Yang, B. Liu, J. Huang, and D. N. Metaxas. Learning active facial patches for expression analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2562-2569, 2012.
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfe0585c9a9532b050a09faff6be377345708da6ef513937e54dc51c9c446d45
3
+ size 41591
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_06xxx/2005.06040/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2d846d59eb8495acd160dedf516e5e767d7d80c586c7cd6e3b4080bb5a4a1e1
3
  size 379388
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b3385df750243e44b8635fd97c57ded30b9cdedfbed87ec6d12ce459f4623aa
3
  size 379388
data/2020/2005_06xxx/2005.06040/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06209/96df9e36-3e85-4a55-a3b1-bc6eb5c65f79_content_list.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06209/96df9e36-3e85-4a55-a3b1-bc6eb5c65f79_model.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06209/full.md CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06209/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f1b97fea8e5c0caed2b3fd19e6473cf4ba4c8ed8e57890760657fc9ba4acc9d
3
  size 3769882
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c374e9ac260e4dbc474445187b9eff56d901b793018ac2816fb11d89a9b58f5
3
  size 3769882
data/2020/2005_06xxx/2005.06209/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06227/76517cb5-7ba7-44f7-855a-b106df86ca7b_content_list.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06227/76517cb5-7ba7-44f7-855a-b106df86ca7b_model.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06227/full.md CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06227/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d05675f2c0468e91a165c509e2c0ead86c7b1e0f86817a8423717dca8427a47c
3
  size 676718
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a073a082fde6d8aa374ccc582f079df1b88c2e1712df665c1f8a569bc399a73
3
  size 676718
data/2020/2005_06xxx/2005.06227/layout.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06247/1dbe5a8e-513b-4268-b9d4-023a41101438_content_list.json CHANGED
@@ -1,1722 +1,3 @@
1
- [
2
- {
3
- "type": "text",
4
- "text": "Explainable Reinforcement Learning: A Survey",
5
- "text_level": 1,
6
- "bbox": [
7
- 225,
8
- 141,
9
- 774,
10
- 162
11
- ],
12
- "page_idx": 0
13
- },
14
- {
15
- "type": "text",
16
- "text": "Erika Puiutta $^{1}$ [0000-0003-3796-8931] and Eric MSP Veith $^{1}$ [0000-0003-2487-7475]",
17
- "bbox": [
18
- 223,
19
- 186,
20
- 774,
21
- 204
22
- ],
23
- "page_idx": 0
24
- },
25
- {
26
- "type": "text",
27
- "text": "OFFIS - Institute for Information Technology, Escherweg 2, 26121 Oldenburg, Germany erika.puiutta@offis.de, eric.veith@offis.de",
28
- "bbox": [
29
- 238,
30
- 217,
31
- 761,
32
- 244
33
- ],
34
- "page_idx": 0
35
- },
36
- {
37
- "type": "text",
38
- "text": "Abstract. Explainable Artificial Intelligence (XAI), i.e., the development of more transparent and interpretable AI models, has gained increased traction over the last few years. This is due to the fact that, in conjunction with their growth into powerful and ubiquitous tools, AI models exhibit one detrimental characteristic: a performance-transparency trade-off. This describes the fact that the more complex a model's inner workings, the less clear it is how its predictions or decisions were achieved. But, especially considering Machine Learning (ML) methods like Reinforcement Learning (RL) where the system learns autonomously, the necessity to understand the underlying reasoning for their decisions becomes apparent. Since, to the best of our knowledge, there exists no single work offering an overview of Explainable Reinforcement Learning (XRL) methods, this survey attempts to address this gap. We give a short summary of the problem, a definition of important terms, and offer a classification and assessment of current XRL methods. We found that a) the majority of XRL methods function by mimicking and simplifying a complex model instead of designing an inherently simple one, and b) XRL (and XAI) methods often neglect to consider the human side of the equation, not taking into account research from related fields like psychology or philosophy. Thus, an interdisciplinary effort is needed to adapt the generated explanations to a (non-expert) human user in order to effectively progress in the field of XRL and XAI in general.",
39
- "bbox": [
40
- 261,
41
- 282,
42
- 738,
43
- 589
44
- ],
45
- "page_idx": 0
46
- },
47
- {
48
- "type": "text",
49
- "text": "Keywords: Machine Learning $\\cdot$ Explanable $\\cdot$ Reinforcement Learning $\\cdot$ Human-Computer Interaction $\\cdot$ Interpretable.",
50
- "bbox": [
51
- 261,
52
- 602,
53
- 738,
54
- 630
55
- ],
56
- "page_idx": 0
57
- },
58
- {
59
- "type": "text",
60
- "text": "1 Introduction",
61
- "text_level": 1,
62
- "bbox": [
63
- 215,
64
- 656,
65
- 375,
66
- 672
67
- ],
68
- "page_idx": 0
69
- },
70
- {
71
- "type": "text",
72
- "text": "Over the past decades, AI has become ubiquitous in many areas of our everyday lives. Especially Machine Learning (ML) as one branch of AI has numerous fields of application, be it transportation [59], advertisement and content recommendation [47], or medicine [39]. Unfortunately, the more powerful and flexible those models are, the more opaque they become, essentially making them black boxes (see figure 1). This trade-off is referred to by different terms in the literature, e.g. readability-performance trade-off [12], accuracy-comprehensibility trade-off [16], or accuracy-interpretability trade-off [50]. This work aims to, first, establish the need for explainable AI in general and explainable RL specifically. After that, the general concept of RL is briefly explained and the most important terms",
73
- "bbox": [
74
- 212,
75
- 688,
76
- 787,
77
- 840
78
- ],
79
- "page_idx": 0
80
- },
81
- {
82
- "type": "aside_text",
83
- "text": "arXiv:2005.06247v1 [cs.LG] 13 May 2020",
84
- "bbox": [
85
- 22,
86
- 229,
87
- 60,
88
- 681
89
- ],
90
- "page_idx": 0
91
- },
92
- {
93
- "type": "image",
94
- "img_path": "images/1abb04a364c8381831aa3120bcb6343685c3f2b57697196d76a841bfe6cc7b9d.jpg",
95
- "image_caption": [
96
- "Fig. 1. Schematic representation of the performance-readability trade-off. Simpler, linear models are easy to understand and interpret, but suffer from a lack of performance, while non-linear, more flexible models are too complex to be understood easily. Adopted from Martens et al. [42]."
97
- ],
98
- "image_footnote": [],
99
- "bbox": [
100
- 357,
101
- 142,
102
- 647,
103
- 313
104
- ],
105
- "page_idx": 1
106
- },
107
- {
108
- "type": "text",
109
- "text": "related to XAI are defined. Then, a classification of XAI models is presented and selected XRL models are sorted into these categories. Since there already is an abundance of sources on XAI but less so about XRL specifically, the focus of this work lies on providing information about and presenting sample methods of XRL models<sup>1</sup>. Thus, we present one method for each category in more detail and give a critical evaluation over the existing XRL methods.",
110
- "bbox": [
111
- 212,
112
- 412,
113
- 787,
114
- 505
115
- ],
116
- "page_idx": 1
117
- },
118
- {
119
- "type": "text",
120
- "text": "1.1 The importance of explainability",
121
- "text_level": 1,
122
- "bbox": [
123
- 215,
124
- 530,
125
- 532,
126
- 546
127
- ],
128
- "page_idx": 1
129
- },
130
- {
131
- "type": "text",
132
- "text": "Why is explainability so crucial? First, there is one obvious psychology-related reason: 'if the users do not trust a model or a prediction, they will not use it' [49, p. 1]. Trust is an essential prerequisite of using a model or system [26, 12], and transparency has been identified as one key component both in increasing users' trust [18], as well as users' acceptance of a system [24] (for a formal definition of transparency and related terms, see section 1.3). Transparency also justifies a system's decisions and enables them to be fair and ethical [2]. Thus, in order to confidently use a system, it needs to be trusted, and in order to be trusted, it needs to be transparent and its decisions need to be justifiable.",
133
- "bbox": [
134
- 212,
135
- 559,
136
- 787,
137
- 694
138
- ],
139
- "page_idx": 1
140
- },
141
- {
142
- "type": "text",
143
- "text": "Second, AI technologies have become an essential part in almost all domains of Cyber-Physical Systems (CPSs). Reasons include the thrive for increased efficiency, business model innovations, or the necessity to accommodate volatile parts of today's critical infrastructures, such as a high share of renewable energy sources. In time, AI technologies evolved from being an additional input to an otherwise soundly defined control system, to increasing the state awareness of",
144
- "bbox": [
145
- 212,
146
- 696,
147
- 787,
148
- 787
149
- ],
150
- "page_idx": 1
151
- },
152
- {
153
- "type": "page_number",
154
- "text": "2",
155
- "bbox": [
156
- 217,
157
- 114,
158
- 228,
159
- 126
160
- ],
161
- "page_idx": 1
162
- },
163
- {
164
- "type": "header",
165
- "text": "E. Puiutta & E. Veith",
166
- "bbox": [
167
- 271,
168
- 114,
169
- 421,
170
- 126
171
- ],
172
- "page_idx": 1
173
- },
174
- {
175
- "type": "page_footnote",
176
- "text": "<sup>1</sup> Please note that, while there is a distinction between Reinforcement Learning and Deep Reinforcement Learning (DRL), for the sake of simplicity, we will refer to both as just Reinforcement Learning going forward.",
177
- "bbox": [
178
- 217,
179
- 797,
180
- 787,
181
- 839
182
- ],
183
- "page_idx": 1
184
- },
185
- {
186
- "type": "text",
187
- "text": "a CPS—e.g., Neural State Estimation [9]—to fully decentralized, but still rule-governed systems—such as the Universal Smart Grid Agent [62]—, to a system where all behavior originates from machine learning. AlphaGo, AlphaGo Zero, and MuZero are probably being the most widely-known representatives of the last category [55, 52], but for CPS analysis and operation, Adversarial Resilience Learning (ARL) has emerged as a novel methodology based on DRL [15, 61]. It is specifically designed to analyse and control critical infrastructures; obviously, explainability is tantamount here.",
188
- "bbox": [
189
- 212,
190
- 145,
191
- 787,
192
- 267
193
- ],
194
- "page_idx": 2
195
- },
196
- {
197
- "type": "text",
198
- "text": "There is also a legal component to be considered; the EU General Data Protection Regulation (GDPR) [14], which came into effect in May 2018, aims to ensure a 'right to explanation' [19, p. 1] concerning automated decision-making and profiling. It states that '[...] such processing should subject to suitable safeguards, which should include [...] the right to obtain human intervention [...] [and] an explanation of the decision reached after such assessment' [14, recital 71]. Additionally, the European Commission set out an AI strategy with transparency and accountability as important principles to be respected [57], and in their Guidelines on trustworthy AI [58] they state seven key requirements, with transparency and accountability as two of them.",
199
- "bbox": [
200
- 212,
201
- 267,
202
- 787,
203
- 417
204
- ],
205
- "page_idx": 2
206
- },
207
- {
208
- "type": "text",
209
- "text": "Finally, there are important practical reasons to consider; despite the increasing efficiency and versatility of AI, its incomprehensibility reduces its usefulness, since 'incomprehensible decision-making can still be effective, but its effectiveness does not mean that it cannot be faulty' [33, p. 1]. For example, in [56], neural nets successfully learnt to classify pictures but could be led to misclassification by (to humans) nearly imperceptible perturbations, and in [46], deep neural nets classified unrecognizable images with $>99\\%$ certainty. This shows that a high level of effectiveness (under standard conditions) or even confidence does not imply that the decisions are correct or based on appropriately-learnt data.",
210
- "bbox": [
211
- 212,
212
- 417,
213
- 787,
214
- 568
215
- ],
216
- "page_idx": 2
217
- },
218
- {
219
- "type": "text",
220
- "text": "Bearing this in mind, and considering the fact that, nowadays, AI can act increasingly autonomous, explaining and justifying the decisions is now more crucial than ever, especially in the domain of RL where an agent learns by itself, without human interaction.",
221
- "bbox": [
222
- 212,
223
- 569,
224
- 787,
225
- 630
226
- ],
227
- "page_idx": 2
228
- },
229
- {
230
- "type": "text",
231
- "text": "1.2 Reinforcement Learning",
232
- "text_level": 1,
233
- "bbox": [
234
- 215,
235
- 654,
236
- 460,
237
- 669
238
- ],
239
- "page_idx": 2
240
- },
241
- {
242
- "type": "text",
243
- "text": "Reinforcement Learning is a trial-and-error learning algorithm in which an autonomous agent tries to find the optimal solution to a problem through automated learning [53]. Possible applications for the use of RL are teaching neural networks to play games like Go [55], teaching robots to perform certain tasks [32], or intelligent transport systems [4]. RL is usually introduced as a Markov Decision Process (MDP) if it satisfies the Markov property: the next state depends only on the current state and the agent's action(s), not on past states $[28]^2$ .",
244
- "bbox": [
245
- 212,
246
- 680,
247
- 787,
248
- 801
249
- ],
250
- "page_idx": 2
251
- },
252
- {
253
- "type": "header",
254
- "text": "Explainable Reinforcement Learning: A Survey",
255
- "bbox": [
256
- 416,
257
- 114,
258
- 732,
259
- 128
260
- ],
261
- "page_idx": 2
262
- },
263
- {
264
- "type": "page_number",
265
- "text": "3",
266
- "bbox": [
267
- 774,
268
- 116,
269
- 785,
270
- 126
271
- ],
272
- "page_idx": 2
273
- },
274
- {
275
- "type": "page_footnote",
276
- "text": "2 To be exact, MDPs assume that the complete world state is visible to the agent which is, naturally, not always true. In these cases, a partially observable Markov decision",
277
- "bbox": [
278
- 217,
279
- 810,
280
- 787,
281
- 839
282
- ],
283
- "page_idx": 2
284
- },
285
- {
286
- "type": "image",
287
- "img_path": "images/b71c8c1ff9a42b972a90f76059fe74dd9c01ae02c7d5e4b243473ec9ea3ca053.jpg",
288
- "image_caption": [
289
- "Fig. 2. Interaction between agent and environment in RL. The agent performs a certain action which is rewarded by the 'critic' in the environment, and it receives an update on the environment's states. Adapted from Barto et al. [5]"
290
- ],
291
- "image_footnote": [],
292
- "bbox": [
293
- 339,
294
- 143,
295
- 668,
296
- 316
297
- ],
298
- "page_idx": 3
299
- },
300
- {
301
- "type": "text",
302
- "text": "The learning process is initiated by an agent randomly performing an action which leads to a certain environmental state. This state has a reward assigned to it depending on how desirable this outcome is, set by the designer of the task (see also figure 2). The algorithm will then learn a policy, i.e., an action-state-relation, in order to maximize the cumulative reward and be able to select the most optimal action in each situation. For more information on RL, see also [53, 34].",
303
- "bbox": [
304
- 212,
305
- 402,
306
- 787,
307
- 508
308
- ],
309
- "page_idx": 3
310
- },
311
- {
312
- "type": "text",
313
- "text": "1.3 Definition of important terms",
314
- "text_level": 1,
315
- "bbox": [
316
- 215,
317
- 532,
318
- 508,
319
- 549
320
- ],
321
- "page_idx": 3
322
- },
323
- {
324
- "type": "text",
325
- "text": "As already mentioned in section 1, the more complex a systems becomes, the less obvious its inner workings become. Additionally, there is no uniform term for this trade-off in the literature; XAI methods use an abundance of related, but distinct terms like transparency, reachability, etc... This inconsistency can be due to one or both of the following reasons: a) different terms are used in the same sense due to a lack of official definition of these terms, or b) different terms are used because the authors (subjectively) draw a distinction between them, without an official accounting of these differences. In any case, a uniform understanding and definition of what it means if a method is described as 'interpretable' or 'transparent' is important in order to clarify the potential, capacity and intention of a model. This is not an easy task, since there is no unique definition for the different terms to be found in the literature; even for 'interpretability', the concept which is most commonly used, 'the term [...] holds no agreed upon meaning, and yet machine learning conferences frequently publish papers which",
326
- "bbox": [
327
- 212,
328
- 561,
329
- 787,
330
- 773
331
- ],
332
- "page_idx": 3
333
- },
334
- {
335
- "type": "page_number",
336
- "text": "4",
337
- "bbox": [
338
- 217,
339
- 114,
340
- 228,
341
- 126
342
- ],
343
- "page_idx": 3
344
- },
345
- {
346
- "type": "header",
347
- "text": "E. Puiutta & E. Veith",
348
- "bbox": [
349
- 271,
350
- 114,
351
- 421,
352
- 126
353
- ],
354
- "page_idx": 3
355
- },
356
- {
357
- "type": "page_footnote",
358
- "text": "process (POMDP) can be used where, instead of observing the current state directly, we have a probability distribution over the possible states instead [37]. For the sake of simplicity, we do not go into further detail and refer the reader to Kaelbling et al. or Kimura et al. [29, 31] for more information.",
359
- "bbox": [
360
- 228,
361
- 784,
362
- 787,
363
- 839
364
- ],
365
- "page_idx": 3
366
- },
367
- {
368
- "type": "text",
369
- "text": "wield the term in a quasi-mathematical way' [35]. In Doshi-Velez and Kim [11, p. 2], interpretability is 'the ability to explain or to present in understandable terms to a human', however, according to Kim et al. [30, p. 7] 'a method is interpretable if a user can correctly and efficiently predict the methods result'. Some authors use transparency as a synonym for interpretability [35], some use comprehensibility as a synonym [16], then again others draw a distinction between the two [10] (for more information on how the different terms are used in the literature, we refer the reader to [35, 36, 11, 16, 30, 10, 45, 7]). If we tackle this issue in a more fundamental way, we can look at the definition of 'to interpret' or 'interpretation'. The Oxford Learners Dictionary<sup>3</sup> defines it as follows:",
370
- "bbox": [
371
- 212,
372
- 146,
373
- 787,
374
- 311
375
- ],
376
- "page_idx": 4
377
- },
378
- {
379
- "type": "list",
380
- "sub_type": "text",
381
- "list_items": [
382
- "to explain the meaning of something",
383
- "to decide that something has a particular meaning and to understand it in this way",
384
- "- to translate one language into another as it is spoken",
385
- "- the particular way in which something is understood or explained"
386
- ],
387
- "bbox": [
388
- 225,
389
- 320,
390
- 782,
391
- 393
392
- ],
393
- "page_idx": 4
394
- },
395
- {
396
- "type": "text",
397
- "text": "Seeing that, according to the definition, interpretation contains an explanation, we can look at the definition for 'to explain'/'explanation':",
398
- "bbox": [
399
- 214,
400
- 402,
401
- 785,
402
- 433
403
- ],
404
- "page_idx": 4
405
- },
406
- {
407
- "type": "list",
408
- "sub_type": "text",
409
- "list_items": [
410
- "- to tell somebody about something in a way that makes it easy to understand",
411
- "to give a reason, or be a reason, for something",
412
- "- a statement, fact, or situation that tells you why something happened",
413
- "- a statement or piece of writing that tells you how something works or makes something easier to understand"
414
- ],
415
- "bbox": [
416
- 225,
417
- 440,
418
- 784,
419
- 513
420
- ],
421
- "page_idx": 4
422
- },
423
- {
424
- "type": "text",
425
- "text": "Both definitions share the notion of conveying the reason and meaning of something in order to make someone understand, but while an explanation is focused on what to explain, an interpretation has the additional value of considering how to explain something; it translates and conveys the information in a way that is more easily understood. And that is, in our opinion, essential in the frame of XAI/XRL: not only extracting the necessary information, but also presenting it in an appropriate manner, translating it from the 'raw data' into something humans and especially laypersons can understand.",
426
- "bbox": [
427
- 212,
428
- 523,
429
- 787,
430
- 643
431
- ],
432
- "page_idx": 4
433
- },
434
- {
435
- "type": "text",
436
- "text": "So, because we deem a shared consensus on the nomenclature important, we suggest the use of this one uniform term, interpretability, to refer to the ability to not only extract or generate explanations for the decisions of the model, but also to present this information in a way that is understandable by human (non-expert) users to, ultimately, enable them to predict a model's behaviour.",
437
- "bbox": [
438
- 212,
439
- 645,
440
- 787,
441
- 720
442
- ],
443
- "page_idx": 4
444
- },
445
- {
446
- "type": "text",
447
- "text": "2 XAI Taxonomy",
448
- "text_level": 1,
449
- "bbox": [
450
- 214,
451
- 739,
452
- 401,
453
- 758
454
- ],
455
- "page_idx": 4
456
- },
457
- {
458
- "type": "text",
459
- "text": "XAI methods can be categorized based on two factors; first, based on when the information is extracted, the method can be intrinsic or post-hoc, and second, the scope can be either global or local (see figure 3, and figure 4 for examples).",
460
- "bbox": [
461
- 212,
462
- 771,
463
- 785,
464
- 816
465
- ],
466
- "page_idx": 4
467
- },
468
- {
469
- "type": "header",
470
- "text": "Explainable Reinforcement Learning: A Survey",
471
- "bbox": [
472
- 416,
473
- 114,
474
- 732,
475
- 128
476
- ],
477
- "page_idx": 4
478
- },
479
- {
480
- "type": "page_number",
481
- "text": "5",
482
- "bbox": [
483
- 774,
484
- 116,
485
- 785,
486
- 126
487
- ],
488
- "page_idx": 4
489
- },
490
- {
491
- "type": "page_footnote",
492
- "text": "<sup>3</sup> https://www.oxfordlearnersdictionaries.com/",
493
- "bbox": [
494
- 217,
495
- 824,
496
- 532,
497
- 840
498
- ],
499
- "page_idx": 4
500
- },
501
- {
502
- "type": "text",
503
- "text": "Global and local interpretability refer to the scope of the explanation; global models explain the entire, general model behaviour, while local models offer explanations for a specific decision [44]. Global models try to explain the whole logic of a model by inspecting the structures of the model [2, 13]. Local explanations try to answer the question: 'Why did the model make a certain prediction/decision for an instance/for a group of instances?' [44, 2]. They also try to identify the contributions of each feature in the input towards a specific output [13]. Additionally, global interpretability techniques lead to users trusting a model, while local techniques lead to trusting a prediction [13].",
504
- "bbox": [
505
- 212,
506
- 146,
507
- 787,
508
- 282
509
- ],
510
- "page_idx": 5
511
- },
512
- {
513
- "type": "text",
514
- "text": "Intrinsic vs. post-hoc interpretability depend on the time when the explanation is extracted/generated; An intrinsic model is a ML model that is constructed to be inherently interpretable or self-explanatory at the time of training by restricting the complexity of the model [13]. Decision trees, for example, have a simple structure and can be easily understood [44]. Post-hoc interpretability, in contrast, is achieved by analyzing the model after training by creating a second, simpler model, to provide explanations for the original model [13, 44]. Surrogate models or saliency maps are examples for this type [2]. Post-hoc interpretation models can be applied to intrinsic interpretation models, but not necessarily vice versa. Just like the models themselves, these interpretability models also suffer from a transparency-accuracy-trade-off; intrinsic models usually offer accurate explanations, but, due to their simplicity, their prediction performance suffers. Post-hoc interpretability models, in contrast, usually keep the accuracy of the original model intact, but are harder to derive satisfying and simple explanations from [13].",
515
- "bbox": [
516
- 212,
517
- 285,
518
- 787,
519
- 510
520
- ],
521
- "page_idx": 5
522
- },
523
- {
524
- "type": "text",
525
- "text": "Another distinction, which usually coincides with the classification into intrinsic and post-hoc interpretability, is the classification into model-specific or model-agnostic. Techniques are model-specific if they are limited to a specific model or model class [44], and they are model-agnostic if they can be used on any model [44]. As you can also see in figure 3, intrinsic models are model-specific, while post-hoc interpretability models are usually model-agnostic.",
526
- "bbox": [
527
- 212,
528
- 511,
529
- 787,
530
- 602
531
- ],
532
- "page_idx": 5
533
- },
534
- {
535
- "type": "text",
536
- "text": "Adadi and Berrada [2] offer an overview of common explainability techniques and their rough (i.e., neither mutually exclusive nor exhaustive) classifications into these categories. In section 3, we follow their example and provide classifications for a list of selected XRL method papers.",
537
- "bbox": [
538
- 212,
539
- 603,
540
- 787,
541
- 664
542
- ],
543
- "page_idx": 5
544
- },
545
- {
546
- "type": "text",
547
- "text": "3 Non-exhaustive list of XRL methods",
548
- "text_level": 1,
549
- "bbox": [
550
- 214,
551
- 695,
552
- 607,
553
- 710
554
- ],
555
- "page_idx": 5
556
- },
557
- {
558
- "type": "text",
559
- "text": "A literature review was conducted using the database Google Scholar. Certain combinations of keywords were used to select papers; first, 'explainable reinforcement learning', and 'XRL' together with 'reinforcement learning' and 'machine learning' were used. Then, we substituted 'explainable' for common variations used in literature like 'explainable', 'transparent', and 'understandable'. We then scanned the papers for relevance and consulted their citations and reference lists for additional papers. Because we only wanted to focus on current methods, we",
560
- "bbox": [
561
- 212,
562
- 734,
563
- 787,
564
- 840
565
- ],
566
- "page_idx": 5
567
- },
568
- {
569
- "type": "page_number",
570
- "text": "6",
571
- "bbox": [
572
- 217,
573
- 114,
574
- 228,
575
- 126
576
- ],
577
- "page_idx": 5
578
- },
579
- {
580
- "type": "header",
581
- "text": "E. Puiutta & E. Veith",
582
- "bbox": [
583
- 271,
584
- 114,
585
- 421,
586
- 127
587
- ],
588
- "page_idx": 5
589
- },
590
- {
591
- "type": "image",
592
- "img_path": "images/767f27d710d6ea6bce0b7ee7e7211a60a59cdab2eab33a5c38170c2e7ab71f49.jpg",
593
- "image_caption": [
594
- "Fig.3. A pseudo ontology of XAI methods taxonomy. Adapted from Adadi and Berrada [2]."
595
- ],
596
- "image_footnote": [],
597
- "bbox": [
598
- 349,
599
- 167,
600
- 653,
601
- 348
602
- ],
603
- "page_idx": 6
604
- },
605
- {
606
- "type": "text",
607
- "text": "restricted the search to papers from 2010-2020. Table 1 shows the list of selected papers and their classification according to section 2 based on our understanding.",
608
- "bbox": [
609
- 212,
610
- 440,
611
- 784,
612
- 470
613
- ],
614
- "page_idx": 6
615
- },
616
- {
617
- "type": "text",
618
- "text": "For a more extensive demonstration of the different approaches, we chose the latest paper of each quadrant $^4$ and explain them in more detail in the following sections as an example for the different XRL methods.",
619
- "bbox": [
620
- 212,
621
- 470,
622
- 785,
623
- 516
624
- ],
625
- "page_idx": 6
626
- },
627
- {
628
- "type": "text",
629
- "text": "3.1 Method A: Programmatically Interpretable Reinforcement Learning",
630
- "text_level": 1,
631
- "bbox": [
632
- 214,
633
- 537,
634
- 750,
635
- 570
636
- ],
637
- "page_idx": 6
638
- },
639
- {
640
- "type": "text",
641
- "text": "Verma et al. [63] have developed 'PIRL', a Programmatically Interpretable Reinforcement Learning framework, as an alternative to DRL. In DRL, the policies are represented by neural networks, making them very hard (if not impossible) to interpret. The policies in PIRL, on the other hand, while still mimicking the ones from the DRL model, are represented using a high-level, human-readable programming language. Here, the problem stays the same as in traditional RL (i.e., finding a policy that maximises the long-term reward), but in addition, they restrict the vast amount of target policies with the help of a (policy) sketch. To find these policies, they employ a framework which was inspired by imitation learning, called Neurally Directed Program Search (NDPS). This framework first uses DRL to compute a policy which is used as a neural 'oracle' to direct the policy search for a policy that is as close as possible to the neural oracle. Doing this, the performances of the resulting policies are not as high than the ones from the",
642
- "bbox": [
643
- 212,
644
- 578,
645
- 787,
646
- 777
647
- ],
648
- "page_idx": 6
649
- },
650
- {
651
- "type": "header",
652
- "text": "Explainable Reinforcement Learning: A Survey",
653
- "bbox": [
654
- 416,
655
- 114,
656
- 732,
657
- 128
658
- ],
659
- "page_idx": 6
660
- },
661
- {
662
- "type": "page_number",
663
- "text": "7",
664
- "bbox": [
665
- 774,
666
- 116,
667
- 785,
668
- 126
669
- ],
670
- "page_idx": 6
671
- },
672
- {
673
- "type": "page_footnote",
674
- "text": "<sup>4</sup> With the exception of method C in section 3.3 where we present a Linear Model U-Tree method although another paper with a different, but related method was published slightly later. See the last paragraph of that section for our reasoning for this decision.",
675
- "bbox": [
676
- 217,
677
- 782,
678
- 787,
679
- 839
680
- ],
681
- "page_idx": 6
682
- },
683
- {
684
- "type": "image",
685
- "img_path": "images/cfcdc4a64522564774d92b20f883aa3a6c8572253b04f4f0f352247468852b75.jpg",
686
- "image_caption": [
687
- "Intrinsic Explanation (global or local)"
688
- ],
689
- "image_footnote": [],
690
- "bbox": [
691
- 258,
692
- 141,
693
- 403,
694
- 362
695
- ],
696
- "page_idx": 7
697
- },
698
- {
699
- "type": "image",
700
- "img_path": "images/570ce97dd59c9e2d25b523452bec5430ff2d4696dc2f01c464d112eeaf448b1b.jpg",
701
- "image_caption": [
702
- "Post-hoc Global Explanation",
703
- "Post-hoc Local Explanation"
704
- ],
705
- "image_footnote": [],
706
- "bbox": [
707
- 429,
708
- 141,
709
- 573,
710
- 367
711
- ],
712
- "page_idx": 7
713
- },
714
- {
715
- "type": "image",
716
- "img_path": "images/b9757f97980d8a90597520c7c5b39918a290cba2544b77f0d41699fdb7cc0554.jpg",
717
- "image_caption": [
718
- "Fig. 4. An illustration of global vs. local, and intrinsic vs. post-hoc interpretable machine learning techniques, with a deep neural network as an example. On the left, the model and the layers' constraints are built in a way that is inherently interpretable (intrinsic interpretability). The middle and right column show post-hoc interpretability, achieved by a global and local explanation, respectively. The global explanation explains the different representations corresponding to the different layers in general, while the local explanation illustrates the contribution of the different input features to a certain output. Adopted from Du et al. [13]."
719
- ],
720
- "image_footnote": [],
721
- "bbox": [
722
- 601,
723
- 141,
724
- 746,
725
- 329
726
- ],
727
- "page_idx": 7
728
- },
729
- {
730
- "type": "text",
731
- "text": "DRL, but they are still satisfactory and, additionally, more easily interpretable. They evaluate this framework by comparing its performance with, among others, a traditional DRL framework in The Open Racing Car Simulator (TORCS) [65]. Here, the controller has to set five parameters (acceleration, brake, clutch, gear and steering of the car) to steer a car around a race track as fast as possible. Their results show that, while the DRL leads to quicker lap time, the NDPS still outperforms this for several reasons: it shows much smoother driving (i.e., less steering actions) and is less perturbed by noise and blocked sensors. It also is easier to interpret and is better at generalization, i.e., it performs better in situations (in this case, tracks) not encountered during training than a DRL model. Concerning restrictions of this method, it is worth noting that the authors only considered environments with symbolic inputs, not perceptual, in their experiments. They also only considered deterministic policies, not stochastic policies.",
732
- "bbox": [
733
- 212,
734
- 555,
735
- 787,
736
- 752
737
- ],
738
- "page_idx": 7
739
- },
740
- {
741
- "type": "text",
742
- "text": "3.2 Method B: Hierarchical and Interpretable Skill Acquisition in Multi-task Reinforcement Learning",
743
- "text_level": 1,
744
- "bbox": [
745
- 212,
746
- 771,
747
- 774,
748
- 801
749
- ],
750
- "page_idx": 7
751
- },
752
- {
753
- "type": "text",
754
- "text": "Shu et al.[54] proposed a new framework for multi-task RL using hierarchical policies that addressed the issue of solving complex tasks that require different",
755
- "bbox": [
756
- 212,
757
- 809,
758
- 785,
759
- 839
760
- ],
761
- "page_idx": 7
762
- },
763
- {
764
- "type": "page_number",
765
- "text": "8",
766
- "bbox": [
767
- 217,
768
- 114,
769
- 228,
770
- 126
771
- ],
772
- "page_idx": 7
773
- },
774
- {
775
- "type": "header",
776
- "text": "E. Puiutta & E. Veith",
777
- "bbox": [
778
- 271,
779
- 114,
780
- 421,
781
- 126
782
- ],
783
- "page_idx": 7
784
- },
785
- {
786
- "type": "table",
787
- "img_path": "images/95979053726beb093959637c26c99789b1383eb65187687416366a4730170dd6.jpg",
788
- "table_caption": [
789
- "Table 1. Selected XRL methods and their categorization according to the taxonomy described in section 2."
790
- ],
791
- "table_footnote": [
792
- "Notes. Methods in bold are presented in detail in this work."
793
- ],
794
- "table_body": "<table><tr><td>Time\\Scope</td><td>Global</td><td>Local</td></tr><tr><td>Intrinsic</td><td>PIRL (Verma et al. [63])Fuzzy RL policies (Hein et al. [22])</td><td>Hierarchical Policies (Shu et al. [54])</td></tr><tr><td>Post-hoc</td><td>Genetic Programming (Hein et al. [23])Reward Decomposition (Juozapaitis et al. [27])Expected Consequences (van der Waa et al. [64])Soft Decision Trees (Coppens et al. [8])Deep Q-Networks (Zahavy et al. [66])Autonomous Policy Explanation (Hayes and Shah [21])Policy Distillation (Rusu et al. [51])Linear Model U-Trees (Liu et al. [38])</td><td>Interestingness Elements (Se-queira and Gervasio [53])Autonomous Self-Explanation (Fukuchi et al. [17])Structural Causal Model (Madumal et al. [41])Complementary RL (Lee [33])Expected Consequences (van der Waa et al. [64])Soft Decision Trees (Coppens et al. [8])Linear Model U-Trees (Liu et al. [38])</td></tr></table>",
795
- "bbox": [
796
- 215,
797
- 176,
798
- 802,
799
- 522
800
- ],
801
- "page_idx": 8
802
- },
803
- {
804
- "type": "text",
805
- "text": "skills and are composed of several (simpler) subtasks. It is based on and extends multi-task RL with modular policy design through a two-layer hierarchical policy [3] by incorporating less assumptions, and, thus, less restrictions. They trained and evaluated their model with object manipulation tasks in a Minecraft game setting (e.g. finding, getting, or stacking blocks of a certain color), employing advantage actor-critic as policy optimization using off-policy learning. The model is hierarchical because each top-level policy (e.g., 'stack x') consists of several lower levels of actions ('find x' $\\rightarrow$ 'get x' $\\rightarrow$ 'put x', see also figure 5). The novelty of this method is the fact that each task is described by a human instruction (e.g. 'stack blue'), and agents can only access learnt skills through these descriptions, making its policies and decisions inherently human-interpretable.",
806
- "bbox": [
807
- 212,
808
- 575,
809
- 787,
810
- 743
811
- ],
812
- "page_idx": 8
813
- },
814
- {
815
- "type": "text",
816
- "text": "Additionally, a key idea of their framework is that a complex task could be decomposed into several simpler subtasks. If these sub-tasks could be fulfilled by employing an already learnt 'base policy', no new skill had to be learnt; otherwise, it would learn a new skill and perform a different, novel action. To boost efficiency and accuracy, the framework also incorporated a stochastic temporal grammar model that was used to model temporal relationships and priorities of",
817
- "bbox": [
818
- 212,
819
- 750,
820
- 787,
821
- 840
822
- ],
823
- "page_idx": 8
824
- },
825
- {
826
- "type": "header",
827
- "text": "Explainable Reinforcement Learning: A Survey",
828
- "bbox": [
829
- 416,
830
- 114,
831
- 732,
832
- 128
833
- ],
834
- "page_idx": 8
835
- },
836
- {
837
- "type": "page_number",
838
- "text": "9",
839
- "bbox": [
840
- 774,
841
- 116,
842
- 785,
843
- 126
844
- ],
845
- "page_idx": 8
846
- },
847
- {
848
- "type": "image",
849
- "img_path": "images/e7c5d1396aaf7c5329460949e2ac6b51ff1f43c7453debb604f46400ad23a3f5.jpg",
850
- "image_caption": [
851
- "Fig. 5. Example for the multi-level hierarchical policy for the task to stack two blue boxes on top of each other. The top-level policy $(\\pi_3, \\text{in red})$ encompasses the high-level plan 'get blue' $\\rightarrow$ 'find blue' $\\rightarrow$ 'put blue'. Each step (i.e., arrow) either initiates another policy (marked by a different color) or directly executes an action. Adopted from [54]."
852
- ],
853
- "image_footnote": [],
854
- "bbox": [
855
- 217,
856
- 142,
857
- 785,
858
- 357
859
- ],
860
- "page_idx": 9
861
- },
862
- {
863
- "type": "text",
864
- "text": "tasks (e.g., before stacking a block on top of another block, you must first obtain said block).",
865
- "bbox": [
866
- 212,
867
- 455,
868
- 785,
869
- 487
870
- ],
871
- "page_idx": 9
872
- },
873
- {
874
- "type": "text",
875
- "text": "The resulting framework could efficiently learn hierarchical policies and representations in multi-task RL, only needing weak human supervision during training to decide which skills to learn. Compared to a flat policy that directly maps the state and instruction to an action, the hierarchical model showed a higher learning efficiency, could generalize well in new environments, and was inherently interpretable.",
876
- "bbox": [
877
- 212,
878
- 487,
879
- 785,
880
- 578
881
- ],
882
- "page_idx": 9
883
- },
884
- {
885
- "type": "text",
886
- "text": "3.3 Method C: Toward Interpretable Deep Reinforcement Learning with Linear Model U-Trees",
887
- "text_level": 1,
888
- "bbox": [
889
- 214,
890
- 601,
891
- 785,
892
- 633
893
- ],
894
- "page_idx": 9
895
- },
896
- {
897
- "type": "text",
898
- "text": "In Liu et al. [38], a mimic learning framework based on stochastic gradient descent is introduced. This framework approximates the predictions of an accurate, but complex model by mimicking the model's Q-function using Linear Model U-Trees (LMUTs). LMUTs are an extension of Continuous U-Trees (CUTs) which were developed to approximate continuous functions [60]. The difference between CUTs and LMUTs is that, instead of constants, LMUTs have a linear model at each leaf node which also improves its generalization ability. They also generally have fewer leaves and are therefore simpler and more easily understandable. The novelty of this method lies in the fact that other tree representations used for interpretations were only developed for supervised learning, not for DRL.",
899
- "bbox": [
900
- 212,
901
- 642,
902
- 787,
903
- 792
904
- ],
905
- "page_idx": 9
906
- },
907
- {
908
- "type": "text",
909
- "text": "The framework can be used to analyze the importance of input features, extract rules, and calculate 'super-pixels' ('contiguous patch[es] of similar pixels' [49, p. 1]) in image inputs (see table 2 and figure 6 for an example). It has two",
910
- "bbox": [
911
- 212,
912
- 795,
913
- 785,
914
- 842
915
- ],
916
- "page_idx": 9
917
- },
918
- {
919
- "type": "page_number",
920
- "text": "10",
921
- "bbox": [
922
- 217,
923
- 114,
924
- 235,
925
- 126
926
- ],
927
- "page_idx": 9
928
- },
929
- {
930
- "type": "header",
931
- "text": "E. Puiutta & E. Veith",
932
- "bbox": [
933
- 271,
934
- 114,
935
- 421,
936
- 126
937
- ],
938
- "page_idx": 9
939
- },
940
- {
941
- "type": "image",
942
- "img_path": "images/2583f128de077ba17543a0a3c6ec30bc58e2d3a07aa7b0c5d4055c4af7af10aa.jpg",
943
- "image_caption": [
944
- "a)"
945
- ],
946
- "image_footnote": [],
947
- "bbox": [
948
- 272,
949
- 155,
950
- 531,
951
- 289
952
- ],
953
- "page_idx": 10
954
- },
955
- {
956
- "type": "image",
957
- "img_path": "images/9e0d39adec5f760a0dc17b0c55c08f5855b0f8e6ce7986e7706669b8e78d8939.jpg",
958
- "image_caption": [
959
- "b)",
960
- "Fig. 6. Examples of a) rule extraction, and b) super-pixels extracted by the LMUTs in Liu et al. [38]. a) Extracted rules for the mountain Cart scenario. Values at the top are the range of velocity and position and a Q vector ( $Q_{move\\_left}, Q_{no\\_push}, Q_{move\\_right}$ representing the average Q-value). In this example, the cart is moving to the left to the top of the hill. The car should be pushed left ( $Q_{move\\_left}$ is highest) to prepare for the final rush to the target on the right side. b) Super-pixels for the Flappy Bird scenario, marked by red stars. This is the first of four sequential pictures where the focus lies on the location of the bird and obstacles (i.e., pipes). In later pictures the focus would shift towards the bird's location and velocity."
961
- ],
962
- "image_footnote": [],
963
- "bbox": [
964
- 598,
965
- 160,
966
- 756,
967
- 282
968
- ],
969
- "page_idx": 10
970
- },
971
- {
972
- "type": "text",
973
- "text": "approaches to generate data and mimic the Q-function; the first one is an experience training setting which records and generates data during the training process for batch training. It records the state-action pairs and the resulting Q-values as 'soft supervision labels' [38, p. 1] during training. In cases where the mimic learning model cannot be applied to the training process, the second approach can be used: active play setting, which generates mimic data by applying the mature DRL to interact with the environment. Here, an online algorithm is required which uses stochastic gradient descent to dynamically update the linear models as more data is generated.",
974
- "bbox": [
975
- 212,
976
- 460,
977
- 787,
978
- 597
979
- ],
980
- "page_idx": 10
981
- },
982
- {
983
- "type": "table",
984
- "img_path": "images/338d5bde4afdcb47876fb5cdc96c0a21da2f5cccfc317e481f4fc4403f0f52f8.jpg",
985
- "table_caption": [
986
- "Table 2. Examples of feature influences in the Mountain Car and Cart Pole scenario, extracted by the LMUTs in Liu et al. [38]"
987
- ],
988
- "table_footnote": [],
989
- "table_body": "<table><tr><td></td><td>Feature</td><td>Influence</td></tr><tr><td>Mountain</td><td>Velocity</td><td>376.86</td></tr><tr><td>Car</td><td>Position</td><td>171.28</td></tr><tr><td></td><td>Pole Angle</td><td>30541.54</td></tr><tr><td>Cart</td><td>Cart Velocity</td><td>8087.68</td></tr><tr><td>Pole</td><td>Cart Position</td><td>7171.71</td></tr><tr><td></td><td>Pole Velocity At Tip</td><td>2953.73</td></tr></table>",
990
- "bbox": [
991
- 338,
992
- 662,
993
- 658,
994
- 765
995
- ],
996
- "page_idx": 10
997
- },
998
- {
999
- "type": "text",
1000
- "text": "They evaluate the framework in three benchmark environments: Mountain Car, Cart Pole, and Flappy Bird, all simulated by the OpenAI Gym toolkit [6]. Mountain Car and Cart Pole have a discrete action space and a continuous",
1001
- "bbox": [
1002
- 212,
1003
- 794,
1004
- 787,
1005
- 842
1006
- ],
1007
- "page_idx": 10
1008
- },
1009
- {
1010
- "type": "header",
1011
- "text": "Explainable Reinforcement Learning: A Survey",
1012
- "bbox": [
1013
- 416,
1014
- 114,
1015
- 732,
1016
- 128
1017
- ],
1018
- "page_idx": 10
1019
- },
1020
- {
1021
- "type": "page_number",
1022
- "text": "11",
1023
- "bbox": [
1024
- 767,
1025
- 116,
1026
- 782,
1027
- 126
1028
- ],
1029
- "page_idx": 10
1030
- },
1031
- {
1032
- "type": "image",
1033
- "img_path": "images/bd6820f0c8fc7fb53b7fb36d3d05cedc3a5f4ee9bfd4fbf219de5f2c33e0942e.jpg",
1034
- "image_caption": [
1035
- "Fig. 7. Placement of the different tree models on the axes data coverage vs. data optimality. Adapted from Liu et al. [38]."
1036
- ],
1037
- "image_footnote": [],
1038
- "bbox": [
1039
- 321,
1040
- 157,
1041
- 669,
1042
- 320
1043
- ],
1044
- "page_idx": 11
1045
- },
1046
- {
1047
- "type": "text",
1048
- "text": "feature space, while Flappy Bird has two discrete actions and four consecutive images as inputs which result in 80x80 pixels each, so 6400 features. The LMUT method is compared to five other tree methods: a CART regression tree [40], M5 trees [48] with regression tree options (M5-RT) and with model tree options (M5-MT), and Fast Incremental Model Trees (FIMT, [25]) in the basic version, and in the advanced version with adaptive filters (FIMT-AF). The two parameters fidelity (how well the predictions of the mimic model match those from the mimicked model) and play performance (how well the average return in the mimic model matches that of the mimicked model) are used as evaluation metrics. Compared to CART and FIMT (-AF), the LMUT model showed higher fidelity with fewer leaves. For the Cart Pole environment, LMUT showed the highest fidelity, while the M5 trees showed higher performance for the other two environments, although LMUT was comparable. Concerning the play performance, the LMUT model performs best out of all the models. This was likely due to the fact that, contrary to the LMUTs, the M5 and CART trees fit equally over the whole training experience which includes sub-optimal actions in the beginning of training, while the FIMT only adapts to the most recent input and thus cannot build linear models appropriately. In their work, this is represented by sorting the methods on an axis between 'data coverage' (when the mimic model matches the mimicked model on a large section of the state space) and 'data optimality' (when it matches the states most important for performance) with the LMUT at the, as they call it, 'sweet spot between optimality and coverage' (p. 12, see also figure 7).",
1049
- "bbox": [
1050
- 217,
1051
- 410,
1052
- 787,
1053
- 758
1054
- ],
1055
- "page_idx": 11
1056
- },
1057
- {
1058
- "type": "text",
1059
- "text": "There is a similar, newer tree method that uses Soft Decision Trees (SDTs) to extract DRL polices [8]. This method was not presented in this paper because, for one thing, it is less versatile (not offering rule extraction, for example), and for another, it was not clear whether the SDTs actually adequately explained the underlying, mimicked policy for their used benchmark.",
1060
- "bbox": [
1061
- 215,
1062
- 763,
1063
- 785,
1064
- 839
1065
- ],
1066
- "page_idx": 11
1067
- },
1068
- {
1069
- "type": "page_number",
1070
- "text": "12",
1071
- "bbox": [
1072
- 217,
1073
- 114,
1074
- 235,
1075
- 126
1076
- ],
1077
- "page_idx": 11
1078
- },
1079
- {
1080
- "type": "header",
1081
- "text": "E. Puiutta & E. Veith",
1082
- "bbox": [
1083
- 271,
1084
- 114,
1085
- 421,
1086
- 126
1087
- ],
1088
- "page_idx": 11
1089
- },
1090
- {
1091
- "type": "text",
1092
- "text": "3.4 Method D: Explainable RL Through a Causal Lens",
1093
- "text_level": 1,
1094
- "bbox": [
1095
- 215,
1096
- 146,
1097
- 683,
1098
- 161
1099
- ],
1100
- "page_idx": 12
1101
- },
1102
- {
1103
- "type": "text",
1104
- "text": "According to Madumal et al. [41], not only is it important for a RL agent to explain itself and its actions, but also to bear in mind the human user at the receiving end of this explanation. Thus, they took advantage of the prominent theory that humans develop and deploy causal models to explain the world around them, and have adapted a structural causal model (SCM) based on Halpern [20] to mimic this for model-free RL. SCMs represent the world with random exogenous (external) and endogenous (internal) variables, some of which might exert a causal influence over others. These influences can be described with a set of structural equations.",
1105
- "bbox": [
1106
- 212,
1107
- 183,
1108
- 787,
1109
- 319
1110
- ],
1111
- "page_idx": 12
1112
- },
1113
- {
1114
- "type": "text",
1115
- "text": "Since Madumal et al. [41] focused on providing explanations for an agent's behaviour based on the knowledge of how its actions influence the environment, they extend the SCM to include the agent's actions, making it an action influence model. More specifically, they offer 'actuals' and 'counterfactuals', that is, their explanations answer 'Why?' as well as 'Why not?' questions (e.g. 'Why (not) action A?'). This is noticeable because, contrary to most XAI models, it not only considers actual events occurred, but also hypothetical events that did not happen, but could have.",
1116
- "bbox": [
1117
- 212,
1118
- 321,
1119
- 787,
1120
- 443
1121
- ],
1122
- "page_idx": 12
1123
- },
1124
- {
1125
- "type": "text",
1126
- "text": "In more detail, the process of generating explanations consists of three phases; first, an action influence model in the form of a directed acyclic graph (DAG) is required (see figure 8 for an example). Next, since it is difficult to uncover the true structural equations describing the relationships between the variables, this problem is circumvented by only approximating the equations so that they are exact enough to simulate the counterfactuals. In Madumal et al. [41], this is done by multivariate regression models during the training of the RL agent, but any regression learner can be used. The last phase is generating the explanations, more specifically, minimally complete contrastive explanations. This means that, first, instead of including the vectors of variables of ALL nodes in the explanation, it only includes the absolute minimum variables necessary. Moreover, it explains the actual (e.g. 'Why action A?') by simulating the counterfactual (e.g. 'Why not action B?') through the structural equations and finding the differences between the two. The explanation can then be obtained through a simple NLP template (for an example of an explanation, again, see figure 8).",
1127
- "bbox": [
1128
- 212,
1129
- 445,
1130
- 787,
1131
- 671
1132
- ],
1133
- "page_idx": 12
1134
- },
1135
- {
1136
- "type": "text",
1137
- "text": "Madumal et al. [41]'s evaluations of the action influence model show promising results; in a comparison between six RL benchmark domains measuring accuracy ('Can the model accurately predict what the agent will do next?') and performance (training time), the model shows reasonable task prediction accuracy and negligible training time. In a human study, comparing the action influence model with two different models that have learnt how to play Starcraft II ( a real-time strategy game), they assessed task prediction by humans, explanation satisfaction, and trust in the model. Results showed that the action influence model performs significantly better for task prediction and explanation satisfaction, but not for trust. The authors propose that, in order to increase trust, further interaction might be needed. In the future, advancements to the model",
1138
- "bbox": [
1139
- 212,
1140
- 672,
1141
- 787,
1142
- 839
1143
- ],
1144
- "page_idx": 12
1145
- },
1146
- {
1147
- "type": "header",
1148
- "text": "Explainable Reinforcement Learning: A Survey",
1149
- "bbox": [
1150
- 416,
1151
- 114,
1152
- 730,
1153
- 128
1154
- ],
1155
- "page_idx": 12
1156
- },
1157
- {
1158
- "type": "page_number",
1159
- "text": "13",
1160
- "bbox": [
1161
- 767,
1162
- 116,
1163
- 785,
1164
- 126
1165
- ],
1166
- "page_idx": 12
1167
- },
1168
- {
1169
- "type": "image",
1170
- "img_path": "images/28a355dda5b6ecbd3682206ecba93c3f729fc2aa334796b5cd2fb3d3276b39cf.jpg",
1171
- "image_caption": [
1172
- "Fig. 8. Action influence graph of an agent playing Starcraft II, a real-time strategy game with a large state and action space, reduced to four actions and nine state variables for the purpose of generating the explanations. In this case, the causal chain for the actual action 'Why $A_s$ ?' is shown in bold, and the chain for the counterfactual action 'Why not $A_b$ ?' would be $B \\rightarrow A_n \\rightarrow [D_u, D_b]$ . The explanation to the question 'Why not build_barracks ( $A_b$ )?' would be 'Because it is more desirable to do action build_supply_depot ( $A_s$ ) to have more Supply Depots ( $S$ ) as the goal is to have more Destroyed Units ( $D_u$ ) and Destroyed buildings ( $D_b$ )'. Adopted from Madumal et al. [41]."
1173
- ],
1174
- "image_footnote": [],
1175
- "bbox": [
1176
- 261,
1177
- 150,
1178
- 584,
1179
- 321
1180
- ],
1181
- "page_idx": 13
1182
- },
1183
- {
1184
- "type": "text",
1185
- "text": "State variables:",
1186
- "text_level": 1,
1187
- "bbox": [
1188
- 593,
1189
- 148,
1190
- 702,
1191
- 159
1192
- ],
1193
- "page_idx": 13
1194
- },
1195
- {
1196
- "type": "list",
1197
- "sub_type": "text",
1198
- "list_items": [
1199
- "W - Worker number",
1200
- "S - Supply depot number",
1201
- "B - barracks number",
1202
- "E - enemay location",
1203
- "$A_{n}$ - Ally unit number",
1204
- "$A_{b}$ - Ally unit health",
1205
- "$A_{l}$ - Ally unit location",
1206
- "$D_{u}$ - Destoryed units",
1207
- "$D_{b}$ - Destroyed buildings"
1208
- ],
1209
- "bbox": [
1210
- 593,
1211
- 160,
1212
- 750,
1213
- 267
1214
- ],
1215
- "page_idx": 13
1216
- },
1217
- {
1218
- "type": "text",
1219
- "text": "Actions:",
1220
- "text_level": 1,
1221
- "bbox": [
1222
- 594,
1223
- 268,
1224
- 653,
1225
- 277
1226
- ],
1227
- "page_idx": 13
1228
- },
1229
- {
1230
- "type": "list",
1231
- "sub_type": "text",
1232
- "list_items": [
1233
- "$A_{s}$ - build supply depot",
1234
- "$A_{b}$ - build barracks",
1235
- "$A_{m}$ - train offensive unit",
1236
- "$A_{a}$ -attack"
1237
- ],
1238
- "bbox": [
1239
- 594,
1240
- 279,
1241
- 746,
1242
- 325
1243
- ],
1244
- "page_idx": 13
1245
- },
1246
- {
1247
- "type": "text",
1248
- "text": "can be made including extending the model to continuous domains or targeting the explanations to users with different levels of knowledge.",
1249
- "bbox": [
1250
- 212,
1251
- 497,
1252
- 785,
1253
- 527
1254
- ],
1255
- "page_idx": 13
1256
- },
1257
- {
1258
- "type": "text",
1259
- "text": "4 Discussion",
1260
- "text_level": 1,
1261
- "bbox": [
1262
- 214,
1263
- 551,
1264
- 354,
1265
- 566
1266
- ],
1267
- "page_idx": 13
1268
- },
1269
- {
1270
- "type": "text",
1271
- "text": "In this paper, inspired by the current interest in and demand for XAI, we focused on a particular field of AI: Reinforcement Learning. Since most XAI methods are tailored for supervised learning, we wanted to give an overview of methods employed only on RL algorithms, since, to the best of our knowledge, there is no work present at the current point in time addressing this.",
1272
- "bbox": [
1273
- 212,
1274
- 583,
1275
- 785,
1276
- 657
1277
- ],
1278
- "page_idx": 13
1279
- },
1280
- {
1281
- "type": "text",
1282
- "text": "First, we gave an overview over XAI, its importance and issues, and explained related terms. We stressed the importance of a uniform terminology and have thus suggested and defined a term to use from here on out. The focus, however, lay on collecting and providing an overview over the aforementioned XRL methods. Based on Adadi and Terrada [2]'s work, we have sorted selected methods according to the scope of the method and the time of information extraction. We then chose four methods, one for each possible combination of those categorizations, to be presented in detail.",
1283
- "bbox": [
1284
- 212,
1285
- 659,
1286
- 785,
1287
- 779
1288
- ],
1289
- "page_idx": 13
1290
- },
1291
- {
1292
- "type": "text",
1293
- "text": "Looking at the collected XRL methods, it becomes clear that post-hoc interpretability models are much more prevalent than intrinsic models. This makes sense, considering the fact that RL models were developed to solve tasks without human supervision that were too difficult for un-/supervised learning and",
1294
- "bbox": [
1295
- 212,
1296
- 780,
1297
- 785,
1298
- 840
1299
- ],
1300
- "page_idx": 13
1301
- },
1302
- {
1303
- "type": "page_number",
1304
- "text": "14",
1305
- "bbox": [
1306
- 217,
1307
- 114,
1308
- 235,
1309
- 126
1310
- ],
1311
- "page_idx": 13
1312
- },
1313
- {
1314
- "type": "header",
1315
- "text": "E. Puiutta & E. Veith",
1316
- "bbox": [
1317
- 271,
1318
- 114,
1319
- 421,
1320
- 126
1321
- ],
1322
- "page_idx": 13
1323
- },
1324
- {
1325
- "type": "text",
1326
- "text": "are thus highly complex; it is, apparently, easier to simplify an already existing, complex model than it is to construct it to be simple in the first place. It seems that the performance-interpretability trade-off is present not only for the AI methods themselves, but also for the explainability models applied to them.",
1327
- "bbox": [
1328
- 212,
1329
- 146,
1330
- 782,
1331
- 205
1332
- ],
1333
- "page_idx": 14
1334
- },
1335
- {
1336
- "type": "text",
1337
- "text": "The allocation to global vs. local scope, however, seems to be more or less balanced. Of course, the decision to develop a global or a local method is greatly dependent on the complexity of the model and the task being solved, but one should also address the question if one of the two is more useful or preferable to human users. In van der Waa et al.'s study [64], for example, 'human users tend to favor explanations about policy rather than about single actions' (p. 1).",
1338
- "bbox": [
1339
- 212,
1340
- 207,
1341
- 784,
1342
- 296
1343
- ],
1344
- "page_idx": 14
1345
- },
1346
- {
1347
- "type": "text",
1348
- "text": "In general, the form of the explanation and the consideration of the intended target audience is a very important aspect in the development of XAI/XRL methods that is too often neglected [1]. XAI methods need to exhibit context-awareness: adapting to environmental and user changes like the level of experience, cultural or educational differences, domain knowledge, etc., in order to be more human-centric [2]. The form and presentation of the explanation is essential as XAI 'can benefit from existing models of how people define, generate, select, present, and evaluate explanations' [43, p. 59]. For example, research shows that (causal) explanations are contrastive, i.e., humans answer a 'Why X?' question through the answer to the -often only implied- counterfactual 'Why not Y instead?'. This is due to the fact that a complete explanation for a certain event (instead of an explanation against the counterevent) involves a higher cognitive load [43]. Not only that, but a layperson also seems to be more receptive to a contrastive explanation, finding it 'more intuitive and more valuable' [43, p. 20]).",
1349
- "bbox": [
1350
- 212,
1351
- 297,
1352
- 785,
1353
- 507
1354
- ],
1355
- "page_idx": 14
1356
- },
1357
- {
1358
- "type": "text",
1359
- "text": "Out of the papers covered in this work, we highlight Madumal et al.'s work [41], but also Sequeira and Gervasio [53] and van der Waa et al. [64]; of all thirteen selected XRL methods, only five evaluate (non-expert) user satisfaction and/or utility of a method [53, 27, 64, 17, 41], and only three of these offer contrastive explanations [41, 53, 64]. So, of all selected papers, only these free provide a combination of both, not only offering useful contrastive explanations, but also explicitly bearing in mind the human user at the end of an explanation.",
1360
- "bbox": [
1361
- 212,
1362
- 508,
1363
- 785,
1364
- 613
1365
- ],
1366
- "page_idx": 14
1367
- },
1368
- {
1369
- "type": "text",
1370
- "text": "4.1 Conclusion",
1371
- "text_level": 1,
1372
- "bbox": [
1373
- 214,
1374
- 635,
1375
- 354,
1376
- 648
1377
- ],
1378
- "page_idx": 14
1379
- },
1380
- {
1381
- "type": "text",
1382
- "text": "For practical, legal, and psychological reasons, XRL (and XAI) is a quickly advancing field in research that has to address some key challenges to prove even more beneficial and useful. In order to have a common understanding about the goals and capabilities of an XAI/XRL model, a ubiquitous terminology is important; due to this, we suggest the term interpretability to be used from here on out and have defined it as 'the ability to not only extract or generate explanations for the decisions of the model, but also to present this information in a way that is understandable by human (non-expert) users to, ultimately, enable them to predict a model's behaviour'. Different approaches are possible to achieve this interpretability, depending on the scope (global vs. local) and the time of information extraction (intrinsic vs. post-hoc). Due to the complexity of a RL model, post-hoc interpretability seems to be easier to achieve than intrinsic",
1383
- "bbox": [
1384
- 212,
1385
- 657,
1386
- 787,
1387
- 839
1388
- ],
1389
- "page_idx": 14
1390
- },
1391
- {
1392
- "type": "header",
1393
- "text": "Explainable Reinforcement Learning: A Survey",
1394
- "bbox": [
1395
- 416,
1396
- 114,
1397
- 732,
1398
- 128
1399
- ],
1400
- "page_idx": 14
1401
- },
1402
- {
1403
- "type": "page_number",
1404
- "text": "15",
1405
- "bbox": [
1406
- 767,
1407
- 116,
1408
- 785,
1409
- 126
1410
- ],
1411
- "page_idx": 14
1412
- },
1413
- {
1414
- "type": "text",
1415
- "text": "interpretability: simplifying the original model (for example with the use of a surrogate model) instead of developing a simple model in the first place seems to be easier to achieve, but comes at the cost of accuracy/performance.",
1416
- "bbox": [
1417
- 212,
1418
- 146,
1419
- 785,
1420
- 191
1421
- ],
1422
- "page_idx": 15
1423
- },
1424
- {
1425
- "type": "text",
1426
- "text": "What many models lack, however, is to consider the human user at the receiving end of an explanation and to adapt the model to them for maximum benefit. Research shows that contrastive explanations are more intuitive and valuable [43], and there is evidence that human users favor a global approach over a local one [64]. A context-aware system design is also important in order to cater to users with different characteristics, goals, and needs [2]. Especially considering the growing role of AI in critical infrastructures (for example analyzing and controlling power grids with models such as ARL [15, 61]), where the AI model might have to act autonomously or in cooperation with a human user, being able to explain and justify the model's decisions is crucial.",
1427
- "bbox": [
1428
- 212,
1429
- 191,
1430
- 787,
1431
- 343
1432
- ],
1433
- "page_idx": 15
1434
- },
1435
- {
1436
- "type": "text",
1437
- "text": "To achieve this and be able to develop human-centered models for optimal and efficient human-computer interaction and cooperation, a bigger focus on interdisciplinary work is necessary, combining efforts from the fields of AI/ML, psychology, philosophy, and human-computer interaction.",
1438
- "bbox": [
1439
- 212,
1440
- 343,
1441
- 787,
1442
- 405
1443
- ],
1444
- "page_idx": 15
1445
- },
1446
- {
1447
- "type": "text",
1448
- "text": "5 Acknowledgements",
1449
- "text_level": 1,
1450
- "bbox": [
1451
- 214,
1452
- 426,
1453
- 434,
1454
- 445
1455
- ],
1456
- "page_idx": 15
1457
- },
1458
- {
1459
- "type": "text",
1460
- "text": "This work was supported by the German Research Foundation under the grant GZ: JI 140/7-1. We thank our colleagues Stephan Balduin, Johannes Gerster, Lasse Hammer, Daniel Lange and Nils Wenninghoff for their helpful comments and contributions.",
1461
- "bbox": [
1462
- 212,
1463
- 458,
1464
- 787,
1465
- 520
1466
- ],
1467
- "page_idx": 15
1468
- },
1469
- {
1470
- "type": "page_number",
1471
- "text": "16",
1472
- "bbox": [
1473
- 217,
1474
- 114,
1475
- 235,
1476
- 126
1477
- ],
1478
- "page_idx": 15
1479
- },
1480
- {
1481
- "type": "header",
1482
- "text": "E. Puiutta & E. Veith",
1483
- "bbox": [
1484
- 271,
1485
- 114,
1486
- 419,
1487
- 126
1488
- ],
1489
- "page_idx": 15
1490
- },
1491
- {
1492
- "type": "text",
1493
- "text": "Bibliography",
1494
- "text_level": 1,
1495
- "bbox": [
1496
- 424,
1497
- 141,
1498
- 578,
1499
- 162
1500
- ],
1501
- "page_idx": 16
1502
- },
1503
- {
1504
- "type": "list",
1505
- "sub_type": "ref_text",
1506
- "list_items": [
1507
- "[1] Abdul, A., Vermeulen, J., Wang, D., Lim, B.Y., Kankanhalli, M.: Trends and trajectories for explainable, accountable and intelligible systems. In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems - CHI '18. ACM Press (2018)",
1508
- "[2] Adadi, A., Berrada, M.: Peeking inside the black-box: A survey on explainable artificial intelligence (XAI). IEEE Access 6, 52138-52160 (2018), https://doi.org/10.1109/access.2018.2870052, 10.1109/access.2018.2870052",
1509
- "[3] Andreas, J., Klein, D., Levine, S.: Modular multitask reinforcement learning with policy sketches. In: Proceedings of the 34th International Conference on Machine Learning - Volume 70. p. 166175. ICML17, JMLR.org (2017)",
1510
- "[4] Arel, I., Liu, C., Urbanik, T., Kohls, A.: Reinforcement learning-based multi-agent system for network traffic signal control. IET Intelligent Transport Systems 4(2), 128 (2010)",
1511
- "[5] Barto, A.G., Singh, S., Chentanez, N.: Intrinsically motivated learning of hierarchical collections of skills. In: Proceedings of the 3rd International Conference on Development and Learning. pp. 112-19 (2004)",
1512
- "[6] Brockman, G., Cheung, V., Pettersson, L., Schneider, J., Schulman, J., Tang, J., Zaremba, W.: Openai gym (2016)",
1513
- "[7] Chakraborty, S., Tomsett, R., Raghavendra, R., Harborne, D., Alzantot, M., Cerutti, F., Srivastava, M., Preece, A., Julier, S., Rao, R.M., Kelley, T.D., Braines, D., Sensoy, M., Willis, C.J., Gurram, P.: Interpretability of deep learning models: A survey of results. In: 2017 IEEE SmartWorld, Ubiquitous Intelligence & Computing, Advanced & Trusted Computed, Scalable Computing & Communications, Cloud & Big Data Computing, Internet of People and Smart City Innovation (SmartWorld/SCALCOM/UIC/ATC/CBDCom/IOP/SCI). IEEE (2017)",
1514
- "[8] Coppens, Y., Efthymiadis, K., Lenaerts, T., Nowé, A., Miller, T., Weber, R., Magazzeni, D.: Distilling deep reinforcement learning policies in soft decision trees. In: Proceedings of the IJCAI 2019 Workshop on Explainable Artificial Intelligence. pp. 1-6 (2019)",
1515
- "[9] Dehghanpour, K., Wang, Z., Wang, J., Yuan, Y., Bu, F.: A survey on state estimation techniques and challenges in smart distribution systems. IEEE Transactions on Smart Grid 10(2), 2312-2322 (2018)",
1516
- "[10] Doran, D., Schulz, S., Besold, T.R.: What does explainable ai really mean? a new conceptualization of perspectives (2017)",
1517
- "[11] Doshi-Velez, F., Kim, B.: Towards a rigorous science of interpretable machine learning (2017)",
1518
- "[12] Dosilovic, F.K., Brcic, M., Hlupic, N.: Explainable artificial intelligence: A survey. In: 2018 41st International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO). IEEE (2018), 10.23919/mipro.2018.8400040"
1519
- ],
1520
- "bbox": [
1521
- 218,
1522
- 210,
1523
- 785,
1524
- 839
1525
- ],
1526
- "page_idx": 16
1527
- },
1528
- {
1529
- "type": "list",
1530
- "sub_type": "ref_text",
1531
- "list_items": [
1532
- "[13] Du, M., Liu, N., Hu, X.: Techniques for interpretable machine learning. Communications of the ACM 63(1), 68-77 (2019)",
1533
- "[14] European Commission, Parliament: Regulation (EU) 2016/679 of the European parliament and of the council of 27 april 2016 on the protection of natural persons with regard to the processing of personal data and on the free movement of such data, and repealing Directive 95/46/EC (General Data Protection Regulation). OJ L 119, 1-88 (2016)",
1534
- "[15] Fischer, L., Memmen, J.M., Veith, E.M., Tröschel, M.: Adversarial resilience learning—towards systemic vulnerability analysis for large and complex systems. In: The Ninth International Conference on Smart Grids, Green Communications and IT Energy-aware Technologies (ENERGY 2019). vol. 9, pp. 24-32 (2019)",
1535
- "[16] Freitas, A.A.: Comprehensive classification models. ACM SIGKDD Explorations Newsletter 15(1), 1-10 (2014)",
1536
- "[17] Fukuchi, Y., Osawa, M., Yamakawa, H., Imai, M.: Autonomous self-explanation of behavior for interactive reinforcement learning agents. In: Proceedings of the 5th International Conference on Human Agent Interaction - HAI '17. ACM Press (2017)",
1537
- "[18] Glass, A., McGuinness, D.L., Wolverton, M.: Toward establishing trust in adaptive agents. In: Proceedings of the 13th international conference on Intelligent user interfaces - IUI '08. ACM Press (2008)",
1538
- "[19] Goodman, B., Flaxman, S.: European union regulations on algorithmic decision-making and a “right to explanation”. AI Magazine 38(3), 50-57 (2017)",
1539
- "[20] Halpern, J.Y.: Causes and explanations: A structural-model approach. part II: Explanations. The British Journal for the Philosophy of Science 56(4), 889-911 (2005)",
1540
- "[21] Hayes, B., Shah, J.A.: Improving robot controller transparency through autonomous policy explanation. In: Proceedings of the 2017 ACM/IEEE International Conference on Human-Robot Interaction - HRI '17. ACM Press (2017)",
1541
- "[22] Hein, D., Hentschel, A., Runkler, T., Udluft, S.: Particle swarm optimization for generating interpretable fuzzy reinforcement learning policies. Engineering Applications of Artificial Intelligence 65, 87-98 (2017), https://doi.org/10.1016/j.engappai.2017.07.005",
1542
- "[23] Hein, D., Udluft, S., Runkler, T.A.: Interpretable policies for reinforcement learning by genetic programming. Engineering Applications of Artificial Intelligence 76, 158-169 (2018)",
1543
- "[24] Herlocker, J.L., Konstan, J.A., Riedl, J.: Explaining collaborative filtering recommendations. In: Proceedings of the 2000 ACM conference on Computer supported cooperative work - CSCW '00. ACM Press (2000)",
1544
- "[25] Ikonomovska, E., Gama, J., Džeroski, S.: Learning model trees from evolving data streams. Data Mining and Knowledge Discovery 23(1), 128-168 (2010)",
1545
- "[26] Israelsen, B.W., Ahmed, N.R.: “dave...i can assure you ...that it's going to be all right ...” a definition, case for, and survey of algorithmic assurances in human-autonomy trust relationships. ACM Computing Surveys 51(6), 1-37 (2019)"
1546
- ],
1547
- "bbox": [
1548
- 217,
1549
- 145,
1550
- 785,
1551
- 839
1552
- ],
1553
- "page_idx": 17
1554
- },
1555
- {
1556
- "type": "page_number",
1557
- "text": "18",
1558
- "bbox": [
1559
- 217,
1560
- 114,
1561
- 235,
1562
- 126
1563
- ],
1564
- "page_idx": 17
1565
- },
1566
- {
1567
- "type": "header",
1568
- "text": "E. Puiutta & E. Veith",
1569
- "bbox": [
1570
- 271,
1571
- 113,
1572
- 419,
1573
- 126
1574
- ],
1575
- "page_idx": 17
1576
- },
1577
- {
1578
- "type": "list",
1579
- "sub_type": "ref_text",
1580
- "list_items": [
1581
- "[27] Juozapaitis, Z., Koul, A., Fern, A., Erwig, M., Doshi-Velez, F.: Explainable reinforcement learning via reward decomposition. In: Proceedings of the IJCAI 2019 Workshop on Explainable Artificial Intelligence. pp. 47-53 (2019)",
1582
- "[28] Kaelbling, L.P., Littman, M.L., Moore, A.W.: Reinforcement learning: A survey (1996)",
1583
- "[29] Kaelbling, L.P., Littman, M.L., Cassandra, A.R.: Planning and acting in partially observable stochastic domains. Artificial Intelligence 101(1-2), 99-134 (1998)",
1584
- "[30] Kim, B., Khanna, R., Koyejo, O.O.: Examples are not enough, learn to criticize! criticism for interpretability. In: Lee, D.D., Sugiyama, M., Luxburg, U.V., Guyon, I., Garnett, R. (eds.) Advances in Neural Information Processing Systems 29. pp. 2280-2288. Curran Associates, Inc. (2016), http://papers.nips.cc/paper/6300-examples-are-not-enough-learn-to-critici",
1585
- "[31] Kimura, H., Miyazaki, K., Kobayashi, S.: Reinforcement learning in pomdpps with function approximation. In: ICML. vol. 97, pp. 152-160 (1997)",
1586
- "[32] Kober, J., Bagnell, J.A., Peters, J.: Reinforcement learning in robotics: A survey. The International Journal of Robotics Research 32(11), 1238-1274 (2013)",
1587
- "[33] Lee, J.H.: Complementary reinforcement learning towards explainable agents (2019)",
1588
- "[34] Li, Y.: Deep reinforcement learning (2018)",
1589
- "[35] Lipton, Z.C.: The mythos of model interpretability (2016)",
1590
- "[36] Lipton, Z.C.: The mythos of model interpretability. Communications of the ACM 61(10), 36-43 (2018)",
1591
- "[37] Littman, M., Kaelbling, L.: Background on pomdp's (1999), https://cs.brown.edu/research/ai/pomdp/tutorial/pomdp-background.html, [Retrieved: 2020-04-15]",
1592
- "[38] Liu, G., Schulte, O., Zhu, W., Li, Q.: Toward interpretable deep reinforcement learning with linear model u-trees. In: Machine Learning and Knowledge Discovery in Databases, pp. 414-429. Springer International Publishing (2019)",
1593
- "[39] Liu, Y., Gadepalli, K., Norouzi, M., Dahl, G.E., Kohlberger, T., Boyko, A., Venugopalan, S., Timofeev, A., Nelson, P.Q., Corrado, G.S., Hipp, J.D., Peng, L., Stumpe, M.C.: Detecting cancer metastases on gigapixel pathology images (2017)",
1594
- "[40] Loh, W.Y.: Classification and regression trees. WIREs Data Mining and Knowledge Discovery 1(1), 14-23 (2011)",
1595
- "[41] Madumal, P., Miller, T., Sonenberg, L., Vetere, F.: Explainable reinforcement learning through a causal lens (2019)",
1596
- "[42] Martens, D., Vanthienen, J., Verbeke, W., Baesens, B.: Performance of classification models from a user perspective. Decision Support Systems 51(4), 782-793 (2011)",
1597
- "[43] Miller, T.: Explanation in artificial intelligence: Insights from the social sciences. Artificial Intelligence 267, 1-38 (2019)"
1598
- ],
1599
- "bbox": [
1600
- 212,
1601
- 143,
1602
- 795,
1603
- 840
1604
- ],
1605
- "page_idx": 18
1606
- },
1607
- {
1608
- "type": "header",
1609
- "text": "Explainable Reinforcement Learning: A Survey",
1610
- "bbox": [
1611
- 416,
1612
- 114,
1613
- 730,
1614
- 128
1615
- ],
1616
- "page_idx": 18
1617
- },
1618
- {
1619
- "type": "page_number",
1620
- "text": "19",
1621
- "bbox": [
1622
- 767,
1623
- 114,
1624
- 785,
1625
- 126
1626
- ],
1627
- "page_idx": 18
1628
- },
1629
- {
1630
- "type": "list",
1631
- "sub_type": "ref_text",
1632
- "list_items": [
1633
- "[44] Molar, C.: Interpretable machine learning (2018), https://christophm.github.io/interpretable-ml-book/, [Retrieved: 2020-03-31]",
1634
- "[45] Montavon, G., Samek, W., Müller, K.R.: Methods for interpreting and understanding deep neural networks. Digital Signal Processing 73, 1-15 (2018)",
1635
- "[46] Nguyen, A., Yosinski, J., Clune, J.: Deep neural networks are easily fooled: High confidence predictions for unrecognizable images. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)",
1636
- "[47] Nguyen, T.T., Hui, P.M., Harper, F.M., Terveen, L., Konstan, J.A.: Exploring the filter bubble. In: Proceedings of the 23rd international conference on World wide web - WWW '14. ACM Press (2014)",
1637
- "[48] Quinlan, J.R., et al.: Learning with continuous classes. In: 5th Australian joint conference on artificial intelligence. vol. 92, pp. 343-348. World Scientific (1992)",
1638
- "[49] Ribeiro, M.T., Singh, S., Guestrin, C.: \"why should i trust you?\". In: Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining - KDD '16. ACM Press (2016)",
1639
- "[50] Rudin, C.: Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence 1(5), 206-215 (2019)",
1640
- "[51] Rusu, A.A., Colmenarejo, S.G., Gulcehre, C., Desjardins, G., Kirkpatrick, J., Pascanu, R., Mnih, V., Kavukcuoglu, K., Hadsell, R.: Policy distillation (2015)",
1641
- "[52] Schrittwieser, J., Antonoglou, I., Hubert, T., Simonyan, K., Sifre, L., Schmitt, S., Guez, A., Lockhart, E., Hassabis, D., Graepel, T., et al.: Mastering ATARI, go, chess and shogi by planning with a learned model (2019)",
1642
- "[53] Sequeira, P., Gervasio, M.: Interestingness elements for explainable reinforcement learning: Understanding agents' capabilities and limitations (2019)",
1643
- "[54] Shu, T., Xiong, C., Socher, R.: Hierarchical and interpretable skill acquisition in multi-task reinforcement learning (2017)",
1644
- "[55] Silver, D., Schrittwieser, J., Simonyan, K., Antonoglou, I., Huang, A., Guez, A., Hubert, T., Baker, L., Lai, M., Bolton, A., et al.: Mastering the game of go without human knowledge. nature 550(7676), 354-359 (2017)",
1645
- "[56] Szegedy, C., Zaremba, W., Sutskever, I., Bruna, J., Erhan, D., Goodfellow, I., Fergus, R.: Intriguing properties of neural networks (2013)",
1646
- "[57] The European Commission: Communication from the Commission to the European Parliament, the European Council, the Council, the European Economic and Social Committee and the Committee of the Regions. The European Commission (2018), https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe, article; accessed 27.03.2020",
1647
- "[58] The European Commission: Independent High-Level Expert Group on Artificial Intelligence set up by the European Commission. The European Commission (2018), https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe, article; accessed 27.04.2020"
1648
- ],
1649
- "bbox": [
1650
- 217,
1651
- 145,
1652
- 949,
1653
- 839
1654
- ],
1655
- "page_idx": 19
1656
- },
1657
- {
1658
- "type": "page_number",
1659
- "text": "20",
1660
- "bbox": [
1661
- 217,
1662
- 114,
1663
- 235,
1664
- 126
1665
- ],
1666
- "page_idx": 19
1667
- },
1668
- {
1669
- "type": "header",
1670
- "text": "E. Puiutta & E. Veith",
1671
- "bbox": [
1672
- 271,
1673
- 114,
1674
- 419,
1675
- 126
1676
- ],
1677
- "page_idx": 19
1678
- },
1679
- {
1680
- "type": "list",
1681
- "sub_type": "ref_text",
1682
- "list_items": [
1683
- "[59] Tomzcak, K., Pelter, A., Gutierrez, C., Stretch, T., Hilf, D., Donadio, B., Tenhundfeld, N.L., de Visser, E.J., Tossell, C.C.: Let Tesla park your Tesla: Driver trust in a semi-automated car. In: 2019 Systems and Information Engineering Design Symposium (SIEDS). IEEE (2019)",
1684
- "[60] Uther, W.T., Veloso, M.M.: Tree based discretization for continuous state space reinforcement learning. In: Aaiai/iaai. pp. 769-774 (1998)",
1685
- "[61] Veith, E., Fischer, L., Tröschel, M., Niefe, A.: Analyzing cyber-physical systems from the perspective of artificial intelligence. In: Proceedings of the 2019 International Conference on Artificial Intelligence, Robotics and Control. ACM (2019)",
1686
- "[62] Veith, E.M.: Universal Smart Grid Agent for Distributed Power Generation Management. Logos Verlag Berlin GmbH (2017)",
1687
- "[63] Verma, A., Murali, V., Singh, R., Kohli, P., Chaudhuri, S.: Programmatically interpretable reinforcement learning. PMLR 80:5045-5054 (2018)",
1688
- "[64] van der Waa, J., van Diggelen, J., van den Bosch, K., Neerincx, M.: Contrastive explanations for reinforcement learning in terms of expected consequences. IJCAI-18 Workshop on Explainable AI (XAI). Vol. 37. 2018 (2018)",
1689
- "[65] Wymann, B., Espié, E., Guionneau, C., Dimitrakakis, C., Coulom, R., Sumner, A.: Torcs, the open racing car simulator. Software available at http://torcs.sourceforge.net 4(6), 2 (2000)",
1690
- "[66] Zahavy, T., Zrihem, N.B., Mannor, S.: Graying the black box: Understanding dqns (2016)"
1691
- ],
1692
- "bbox": [
1693
- 215,
1694
- 145,
1695
- 785,
1696
- 494
1697
- ],
1698
- "page_idx": 20
1699
- },
1700
- {
1701
- "type": "header",
1702
- "text": "Explainable Reinforcement Learning: A Survey",
1703
- "bbox": [
1704
- 416,
1705
- 114,
1706
- 730,
1707
- 128
1708
- ],
1709
- "page_idx": 20
1710
- },
1711
- {
1712
- "type": "page_number",
1713
- "text": "21",
1714
- "bbox": [
1715
- 767,
1716
- 114,
1717
- 782,
1718
- 126
1719
- ],
1720
- "page_idx": 20
1721
- }
1722
- ]
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:692f0480f52d7383661f57a1a6723b0d90df012cd18b3805ac58b71e9cd174b8
3
+ size 82366
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_06xxx/2005.06247/1dbe5a8e-513b-4268-b9d4-023a41101438_model.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/2020/2005_06xxx/2005.06247/full.md CHANGED
@@ -1,288 +1,3 @@
1
- # Explainable Reinforcement Learning: A Survey
2
-
3
- Erika Puiutta $^{1}$ [0000-0003-3796-8931] and Eric MSP Veith $^{1}$ [0000-0003-2487-7475]
4
-
5
- OFFIS - Institute for Information Technology, Escherweg 2, 26121 Oldenburg, Germany erika.puiutta@offis.de, eric.veith@offis.de
6
-
7
- Abstract. Explainable Artificial Intelligence (XAI), i.e., the development of more transparent and interpretable AI models, has gained increased traction over the last few years. This is due to the fact that, in conjunction with their growth into powerful and ubiquitous tools, AI models exhibit one detrimental characteristic: a performance-transparency trade-off. This describes the fact that the more complex a model's inner workings, the less clear it is how its predictions or decisions were achieved. But, especially considering Machine Learning (ML) methods like Reinforcement Learning (RL) where the system learns autonomously, the necessity to understand the underlying reasoning for their decisions becomes apparent. Since, to the best of our knowledge, there exists no single work offering an overview of Explainable Reinforcement Learning (XRL) methods, this survey attempts to address this gap. We give a short summary of the problem, a definition of important terms, and offer a classification and assessment of current XRL methods. We found that a) the majority of XRL methods function by mimicking and simplifying a complex model instead of designing an inherently simple one, and b) XRL (and XAI) methods often neglect to consider the human side of the equation, not taking into account research from related fields like psychology or philosophy. Thus, an interdisciplinary effort is needed to adapt the generated explanations to a (non-expert) human user in order to effectively progress in the field of XRL and XAI in general.
8
-
9
- Keywords: Machine Learning $\cdot$ Explanable $\cdot$ Reinforcement Learning $\cdot$ Human-Computer Interaction $\cdot$ Interpretable.
10
-
11
- # 1 Introduction
12
-
13
- Over the past decades, AI has become ubiquitous in many areas of our everyday lives. Especially Machine Learning (ML) as one branch of AI has numerous fields of application, be it transportation [59], advertisement and content recommendation [47], or medicine [39]. Unfortunately, the more powerful and flexible those models are, the more opaque they become, essentially making them black boxes (see figure 1). This trade-off is referred to by different terms in the literature, e.g. readability-performance trade-off [12], accuracy-comprehensibility trade-off [16], or accuracy-interpretability trade-off [50]. This work aims to, first, establish the need for explainable AI in general and explainable RL specifically. After that, the general concept of RL is briefly explained and the most important terms
14
-
15
- ![](images/1abb04a364c8381831aa3120bcb6343685c3f2b57697196d76a841bfe6cc7b9d.jpg)
16
- Fig. 1. Schematic representation of the performance-readability trade-off. Simpler, linear models are easy to understand and interpret, but suffer from a lack of performance, while non-linear, more flexible models are too complex to be understood easily. Adopted from Martens et al. [42].
17
-
18
- related to XAI are defined. Then, a classification of XAI models is presented and selected XRL models are sorted into these categories. Since there already is an abundance of sources on XAI but less so about XRL specifically, the focus of this work lies on providing information about and presenting sample methods of XRL models<sup>1</sup>. Thus, we present one method for each category in more detail and give a critical evaluation over the existing XRL methods.
19
-
20
- # 1.1 The importance of explainability
21
-
22
- Why is explainability so crucial? First, there is one obvious psychology-related reason: 'if the users do not trust a model or a prediction, they will not use it' [49, p. 1]. Trust is an essential prerequisite of using a model or system [26, 12], and transparency has been identified as one key component both in increasing users' trust [18], as well as users' acceptance of a system [24] (for a formal definition of transparency and related terms, see section 1.3). Transparency also justifies a system's decisions and enables them to be fair and ethical [2]. Thus, in order to confidently use a system, it needs to be trusted, and in order to be trusted, it needs to be transparent and its decisions need to be justifiable.
23
-
24
- Second, AI technologies have become an essential part in almost all domains of Cyber-Physical Systems (CPSs). Reasons include the thrive for increased efficiency, business model innovations, or the necessity to accommodate volatile parts of today's critical infrastructures, such as a high share of renewable energy sources. In time, AI technologies evolved from being an additional input to an otherwise soundly defined control system, to increasing the state awareness of
25
-
26
- a CPS—e.g., Neural State Estimation [9]—to fully decentralized, but still rule-governed systems—such as the Universal Smart Grid Agent [62]—, to a system where all behavior originates from machine learning. AlphaGo, AlphaGo Zero, and MuZero are probably being the most widely-known representatives of the last category [55, 52], but for CPS analysis and operation, Adversarial Resilience Learning (ARL) has emerged as a novel methodology based on DRL [15, 61]. It is specifically designed to analyse and control critical infrastructures; obviously, explainability is tantamount here.
27
-
28
- There is also a legal component to be considered; the EU General Data Protection Regulation (GDPR) [14], which came into effect in May 2018, aims to ensure a 'right to explanation' [19, p. 1] concerning automated decision-making and profiling. It states that '[...] such processing should subject to suitable safeguards, which should include [...] the right to obtain human intervention [...] [and] an explanation of the decision reached after such assessment' [14, recital 71]. Additionally, the European Commission set out an AI strategy with transparency and accountability as important principles to be respected [57], and in their Guidelines on trustworthy AI [58] they state seven key requirements, with transparency and accountability as two of them.
29
-
30
- Finally, there are important practical reasons to consider; despite the increasing efficiency and versatility of AI, its incomprehensibility reduces its usefulness, since 'incomprehensible decision-making can still be effective, but its effectiveness does not mean that it cannot be faulty' [33, p. 1]. For example, in [56], neural nets successfully learnt to classify pictures but could be led to misclassification by (to humans) nearly imperceptible perturbations, and in [46], deep neural nets classified unrecognizable images with $>99\%$ certainty. This shows that a high level of effectiveness (under standard conditions) or even confidence does not imply that the decisions are correct or based on appropriately-learnt data.
31
-
32
- Bearing this in mind, and considering the fact that, nowadays, AI can act increasingly autonomous, explaining and justifying the decisions is now more crucial than ever, especially in the domain of RL where an agent learns by itself, without human interaction.
33
-
34
- # 1.2 Reinforcement Learning
35
-
36
- Reinforcement Learning is a trial-and-error learning algorithm in which an autonomous agent tries to find the optimal solution to a problem through automated learning [53]. Possible applications for the use of RL are teaching neural networks to play games like Go [55], teaching robots to perform certain tasks [32], or intelligent transport systems [4]. RL is usually introduced as a Markov Decision Process (MDP) if it satisfies the Markov property: the next state depends only on the current state and the agent's action(s), not on past states $[28]^2$ .
37
-
38
- ![](images/b71c8c1ff9a42b972a90f76059fe74dd9c01ae02c7d5e4b243473ec9ea3ca053.jpg)
39
- Fig. 2. Interaction between agent and environment in RL. The agent performs a certain action which is rewarded by the 'critic' in the environment, and it receives an update on the environment's states. Adapted from Barto et al. [5]
40
-
41
- The learning process is initiated by an agent randomly performing an action which leads to a certain environmental state. This state has a reward assigned to it depending on how desirable this outcome is, set by the designer of the task (see also figure 2). The algorithm will then learn a policy, i.e., an action-state-relation, in order to maximize the cumulative reward and be able to select the most optimal action in each situation. For more information on RL, see also [53, 34].
42
-
43
- # 1.3 Definition of important terms
44
-
45
- As already mentioned in section 1, the more complex a systems becomes, the less obvious its inner workings become. Additionally, there is no uniform term for this trade-off in the literature; XAI methods use an abundance of related, but distinct terms like transparency, reachability, etc... This inconsistency can be due to one or both of the following reasons: a) different terms are used in the same sense due to a lack of official definition of these terms, or b) different terms are used because the authors (subjectively) draw a distinction between them, without an official accounting of these differences. In any case, a uniform understanding and definition of what it means if a method is described as 'interpretable' or 'transparent' is important in order to clarify the potential, capacity and intention of a model. This is not an easy task, since there is no unique definition for the different terms to be found in the literature; even for 'interpretability', the concept which is most commonly used, 'the term [...] holds no agreed upon meaning, and yet machine learning conferences frequently publish papers which
46
-
47
- wield the term in a quasi-mathematical way' [35]. In Doshi-Velez and Kim [11, p. 2], interpretability is 'the ability to explain or to present in understandable terms to a human', however, according to Kim et al. [30, p. 7] 'a method is interpretable if a user can correctly and efficiently predict the methods result'. Some authors use transparency as a synonym for interpretability [35], some use comprehensibility as a synonym [16], then again others draw a distinction between the two [10] (for more information on how the different terms are used in the literature, we refer the reader to [35, 36, 11, 16, 30, 10, 45, 7]). If we tackle this issue in a more fundamental way, we can look at the definition of 'to interpret' or 'interpretation'. The Oxford Learners Dictionary<sup>3</sup> defines it as follows:
48
-
49
- to explain the meaning of something
50
- to decide that something has a particular meaning and to understand it in this way
51
- - to translate one language into another as it is spoken
52
- - the particular way in which something is understood or explained
53
-
54
- Seeing that, according to the definition, interpretation contains an explanation, we can look at the definition for 'to explain'/'explanation':
55
-
56
- - to tell somebody about something in a way that makes it easy to understand
57
- to give a reason, or be a reason, for something
58
- - a statement, fact, or situation that tells you why something happened
59
- - a statement or piece of writing that tells you how something works or makes something easier to understand
60
-
61
- Both definitions share the notion of conveying the reason and meaning of something in order to make someone understand, but while an explanation is focused on what to explain, an interpretation has the additional value of considering how to explain something; it translates and conveys the information in a way that is more easily understood. And that is, in our opinion, essential in the frame of XAI/XRL: not only extracting the necessary information, but also presenting it in an appropriate manner, translating it from the 'raw data' into something humans and especially laypersons can understand.
62
-
63
- So, because we deem a shared consensus on the nomenclature important, we suggest the use of this one uniform term, interpretability, to refer to the ability to not only extract or generate explanations for the decisions of the model, but also to present this information in a way that is understandable by human (non-expert) users to, ultimately, enable them to predict a model's behaviour.
64
-
65
- # 2 XAI Taxonomy
66
-
67
- XAI methods can be categorized based on two factors; first, based on when the information is extracted, the method can be intrinsic or post-hoc, and second, the scope can be either global or local (see figure 3, and figure 4 for examples).
68
-
69
- Global and local interpretability refer to the scope of the explanation; global models explain the entire, general model behaviour, while local models offer explanations for a specific decision [44]. Global models try to explain the whole logic of a model by inspecting the structures of the model [2, 13]. Local explanations try to answer the question: 'Why did the model make a certain prediction/decision for an instance/for a group of instances?' [44, 2]. They also try to identify the contributions of each feature in the input towards a specific output [13]. Additionally, global interpretability techniques lead to users trusting a model, while local techniques lead to trusting a prediction [13].
70
-
71
- Intrinsic vs. post-hoc interpretability depend on the time when the explanation is extracted/generated; An intrinsic model is a ML model that is constructed to be inherently interpretable or self-explanatory at the time of training by restricting the complexity of the model [13]. Decision trees, for example, have a simple structure and can be easily understood [44]. Post-hoc interpretability, in contrast, is achieved by analyzing the model after training by creating a second, simpler model, to provide explanations for the original model [13, 44]. Surrogate models or saliency maps are examples for this type [2]. Post-hoc interpretation models can be applied to intrinsic interpretation models, but not necessarily vice versa. Just like the models themselves, these interpretability models also suffer from a transparency-accuracy-trade-off; intrinsic models usually offer accurate explanations, but, due to their simplicity, their prediction performance suffers. Post-hoc interpretability models, in contrast, usually keep the accuracy of the original model intact, but are harder to derive satisfying and simple explanations from [13].
72
-
73
- Another distinction, which usually coincides with the classification into intrinsic and post-hoc interpretability, is the classification into model-specific or model-agnostic. Techniques are model-specific if they are limited to a specific model or model class [44], and they are model-agnostic if they can be used on any model [44]. As you can also see in figure 3, intrinsic models are model-specific, while post-hoc interpretability models are usually model-agnostic.
74
-
75
- Adadi and Berrada [2] offer an overview of common explainability techniques and their rough (i.e., neither mutually exclusive nor exhaustive) classifications into these categories. In section 3, we follow their example and provide classifications for a list of selected XRL method papers.
76
-
77
- # 3 Non-exhaustive list of XRL methods
78
-
79
- A literature review was conducted using the database Google Scholar. Certain combinations of keywords were used to select papers; first, 'explainable reinforcement learning', and 'XRL' together with 'reinforcement learning' and 'machine learning' were used. Then, we substituted 'explainable' for common variations used in literature like 'explainable', 'transparent', and 'understandable'. We then scanned the papers for relevance and consulted their citations and reference lists for additional papers. Because we only wanted to focus on current methods, we
80
-
81
- ![](images/767f27d710d6ea6bce0b7ee7e7211a60a59cdab2eab33a5c38170c2e7ab71f49.jpg)
82
- Fig.3. A pseudo ontology of XAI methods taxonomy. Adapted from Adadi and Berrada [2].
83
-
84
- restricted the search to papers from 2010-2020. Table 1 shows the list of selected papers and their classification according to section 2 based on our understanding.
85
-
86
- For a more extensive demonstration of the different approaches, we chose the latest paper of each quadrant $^4$ and explain them in more detail in the following sections as an example for the different XRL methods.
87
-
88
- # 3.1 Method A: Programmatically Interpretable Reinforcement Learning
89
-
90
- Verma et al. [63] have developed 'PIRL', a Programmatically Interpretable Reinforcement Learning framework, as an alternative to DRL. In DRL, the policies are represented by neural networks, making them very hard (if not impossible) to interpret. The policies in PIRL, on the other hand, while still mimicking the ones from the DRL model, are represented using a high-level, human-readable programming language. Here, the problem stays the same as in traditional RL (i.e., finding a policy that maximises the long-term reward), but in addition, they restrict the vast amount of target policies with the help of a (policy) sketch. To find these policies, they employ a framework which was inspired by imitation learning, called Neurally Directed Program Search (NDPS). This framework first uses DRL to compute a policy which is used as a neural 'oracle' to direct the policy search for a policy that is as close as possible to the neural oracle. Doing this, the performances of the resulting policies are not as high than the ones from the
91
-
92
- ![](images/cfcdc4a64522564774d92b20f883aa3a6c8572253b04f4f0f352247468852b75.jpg)
93
- Intrinsic Explanation (global or local)
94
-
95
- ![](images/570ce97dd59c9e2d25b523452bec5430ff2d4696dc2f01c464d112eeaf448b1b.jpg)
96
- Post-hoc Global Explanation
97
- Post-hoc Local Explanation
98
-
99
- ![](images/b9757f97980d8a90597520c7c5b39918a290cba2544b77f0d41699fdb7cc0554.jpg)
100
- Fig. 4. An illustration of global vs. local, and intrinsic vs. post-hoc interpretable machine learning techniques, with a deep neural network as an example. On the left, the model and the layers' constraints are built in a way that is inherently interpretable (intrinsic interpretability). The middle and right column show post-hoc interpretability, achieved by a global and local explanation, respectively. The global explanation explains the different representations corresponding to the different layers in general, while the local explanation illustrates the contribution of the different input features to a certain output. Adopted from Du et al. [13].
101
-
102
- DRL, but they are still satisfactory and, additionally, more easily interpretable. They evaluate this framework by comparing its performance with, among others, a traditional DRL framework in The Open Racing Car Simulator (TORCS) [65]. Here, the controller has to set five parameters (acceleration, brake, clutch, gear and steering of the car) to steer a car around a race track as fast as possible. Their results show that, while the DRL leads to quicker lap time, the NDPS still outperforms this for several reasons: it shows much smoother driving (i.e., less steering actions) and is less perturbed by noise and blocked sensors. It also is easier to interpret and is better at generalization, i.e., it performs better in situations (in this case, tracks) not encountered during training than a DRL model. Concerning restrictions of this method, it is worth noting that the authors only considered environments with symbolic inputs, not perceptual, in their experiments. They also only considered deterministic policies, not stochastic policies.
103
-
104
- # 3.2 Method B: Hierarchical and Interpretable Skill Acquisition in Multi-task Reinforcement Learning
105
-
106
- Shu et al.[54] proposed a new framework for multi-task RL using hierarchical policies that addressed the issue of solving complex tasks that require different
107
-
108
- Table 1. Selected XRL methods and their categorization according to the taxonomy described in section 2.
109
-
110
- <table><tr><td>Time\Scope</td><td>Global</td><td>Local</td></tr><tr><td>Intrinsic</td><td>PIRL (Verma et al. [63])Fuzzy RL policies (Hein et al. [22])</td><td>Hierarchical Policies (Shu et al. [54])</td></tr><tr><td>Post-hoc</td><td>Genetic Programming (Hein et al. [23])Reward Decomposition (Juozapaitis et al. [27])Expected Consequences (van der Waa et al. [64])Soft Decision Trees (Coppens et al. [8])Deep Q-Networks (Zahavy et al. [66])Autonomous Policy Explanation (Hayes and Shah [21])Policy Distillation (Rusu et al. [51])Linear Model U-Trees (Liu et al. [38])</td><td>Interestingness Elements (Se-queira and Gervasio [53])Autonomous Self-Explanation (Fukuchi et al. [17])Structural Causal Model (Madumal et al. [41])Complementary RL (Lee [33])Expected Consequences (van der Waa et al. [64])Soft Decision Trees (Coppens et al. [8])Linear Model U-Trees (Liu et al. [38])</td></tr></table>
111
-
112
- Notes. Methods in bold are presented in detail in this work.
113
-
114
- skills and are composed of several (simpler) subtasks. It is based on and extends multi-task RL with modular policy design through a two-layer hierarchical policy [3] by incorporating less assumptions, and, thus, less restrictions. They trained and evaluated their model with object manipulation tasks in a Minecraft game setting (e.g. finding, getting, or stacking blocks of a certain color), employing advantage actor-critic as policy optimization using off-policy learning. The model is hierarchical because each top-level policy (e.g., 'stack x') consists of several lower levels of actions ('find x' $\rightarrow$ 'get x' $\rightarrow$ 'put x', see also figure 5). The novelty of this method is the fact that each task is described by a human instruction (e.g. 'stack blue'), and agents can only access learnt skills through these descriptions, making its policies and decisions inherently human-interpretable.
115
-
116
- Additionally, a key idea of their framework is that a complex task could be decomposed into several simpler subtasks. If these sub-tasks could be fulfilled by employing an already learnt 'base policy', no new skill had to be learnt; otherwise, it would learn a new skill and perform a different, novel action. To boost efficiency and accuracy, the framework also incorporated a stochastic temporal grammar model that was used to model temporal relationships and priorities of
117
-
118
- ![](images/e7c5d1396aaf7c5329460949e2ac6b51ff1f43c7453debb604f46400ad23a3f5.jpg)
119
- Fig. 5. Example for the multi-level hierarchical policy for the task to stack two blue boxes on top of each other. The top-level policy $(\pi_3, \text{in red})$ encompasses the high-level plan 'get blue' $\rightarrow$ 'find blue' $\rightarrow$ 'put blue'. Each step (i.e., arrow) either initiates another policy (marked by a different color) or directly executes an action. Adopted from [54].
120
-
121
- tasks (e.g., before stacking a block on top of another block, you must first obtain said block).
122
-
123
- The resulting framework could efficiently learn hierarchical policies and representations in multi-task RL, only needing weak human supervision during training to decide which skills to learn. Compared to a flat policy that directly maps the state and instruction to an action, the hierarchical model showed a higher learning efficiency, could generalize well in new environments, and was inherently interpretable.
124
-
125
- # 3.3 Method C: Toward Interpretable Deep Reinforcement Learning with Linear Model U-Trees
126
-
127
- In Liu et al. [38], a mimic learning framework based on stochastic gradient descent is introduced. This framework approximates the predictions of an accurate, but complex model by mimicking the model's Q-function using Linear Model U-Trees (LMUTs). LMUTs are an extension of Continuous U-Trees (CUTs) which were developed to approximate continuous functions [60]. The difference between CUTs and LMUTs is that, instead of constants, LMUTs have a linear model at each leaf node which also improves its generalization ability. They also generally have fewer leaves and are therefore simpler and more easily understandable. The novelty of this method lies in the fact that other tree representations used for interpretations were only developed for supervised learning, not for DRL.
128
-
129
- The framework can be used to analyze the importance of input features, extract rules, and calculate 'super-pixels' ('contiguous patch[es] of similar pixels' [49, p. 1]) in image inputs (see table 2 and figure 6 for an example). It has two
130
-
131
- ![](images/2583f128de077ba17543a0a3c6ec30bc58e2d3a07aa7b0c5d4055c4af7af10aa.jpg)
132
- a)
133
-
134
- ![](images/9e0d39adec5f760a0dc17b0c55c08f5855b0f8e6ce7986e7706669b8e78d8939.jpg)
135
- b)
136
- Fig. 6. Examples of a) rule extraction, and b) super-pixels extracted by the LMUTs in Liu et al. [38]. a) Extracted rules for the mountain Cart scenario. Values at the top are the range of velocity and position and a Q vector ( $Q_{move\_left}, Q_{no\_push}, Q_{move\_right}$ representing the average Q-value). In this example, the cart is moving to the left to the top of the hill. The car should be pushed left ( $Q_{move\_left}$ is highest) to prepare for the final rush to the target on the right side. b) Super-pixels for the Flappy Bird scenario, marked by red stars. This is the first of four sequential pictures where the focus lies on the location of the bird and obstacles (i.e., pipes). In later pictures the focus would shift towards the bird's location and velocity.
137
-
138
- approaches to generate data and mimic the Q-function; the first one is an experience training setting which records and generates data during the training process for batch training. It records the state-action pairs and the resulting Q-values as 'soft supervision labels' [38, p. 1] during training. In cases where the mimic learning model cannot be applied to the training process, the second approach can be used: active play setting, which generates mimic data by applying the mature DRL to interact with the environment. Here, an online algorithm is required which uses stochastic gradient descent to dynamically update the linear models as more data is generated.
139
-
140
- Table 2. Examples of feature influences in the Mountain Car and Cart Pole scenario, extracted by the LMUTs in Liu et al. [38]
141
-
142
- <table><tr><td></td><td>Feature</td><td>Influence</td></tr><tr><td>Mountain</td><td>Velocity</td><td>376.86</td></tr><tr><td>Car</td><td>Position</td><td>171.28</td></tr><tr><td></td><td>Pole Angle</td><td>30541.54</td></tr><tr><td>Cart</td><td>Cart Velocity</td><td>8087.68</td></tr><tr><td>Pole</td><td>Cart Position</td><td>7171.71</td></tr><tr><td></td><td>Pole Velocity At Tip</td><td>2953.73</td></tr></table>
143
-
144
- They evaluate the framework in three benchmark environments: Mountain Car, Cart Pole, and Flappy Bird, all simulated by the OpenAI Gym toolkit [6]. Mountain Car and Cart Pole have a discrete action space and a continuous
145
-
146
- ![](images/bd6820f0c8fc7fb53b7fb36d3d05cedc3a5f4ee9bfd4fbf219de5f2c33e0942e.jpg)
147
- Fig. 7. Placement of the different tree models on the axes data coverage vs. data optimality. Adapted from Liu et al. [38].
148
-
149
- feature space, while Flappy Bird has two discrete actions and four consecutive images as inputs which result in 80x80 pixels each, so 6400 features. The LMUT method is compared to five other tree methods: a CART regression tree [40], M5 trees [48] with regression tree options (M5-RT) and with model tree options (M5-MT), and Fast Incremental Model Trees (FIMT, [25]) in the basic version, and in the advanced version with adaptive filters (FIMT-AF). The two parameters fidelity (how well the predictions of the mimic model match those from the mimicked model) and play performance (how well the average return in the mimic model matches that of the mimicked model) are used as evaluation metrics. Compared to CART and FIMT (-AF), the LMUT model showed higher fidelity with fewer leaves. For the Cart Pole environment, LMUT showed the highest fidelity, while the M5 trees showed higher performance for the other two environments, although LMUT was comparable. Concerning the play performance, the LMUT model performs best out of all the models. This was likely due to the fact that, contrary to the LMUTs, the M5 and CART trees fit equally over the whole training experience which includes sub-optimal actions in the beginning of training, while the FIMT only adapts to the most recent input and thus cannot build linear models appropriately. In their work, this is represented by sorting the methods on an axis between 'data coverage' (when the mimic model matches the mimicked model on a large section of the state space) and 'data optimality' (when it matches the states most important for performance) with the LMUT at the, as they call it, 'sweet spot between optimality and coverage' (p. 12, see also figure 7).
150
-
151
- There is a similar, newer tree method that uses Soft Decision Trees (SDTs) to extract DRL polices [8]. This method was not presented in this paper because, for one thing, it is less versatile (not offering rule extraction, for example), and for another, it was not clear whether the SDTs actually adequately explained the underlying, mimicked policy for their used benchmark.
152
-
153
- # 3.4 Method D: Explainable RL Through a Causal Lens
154
-
155
- According to Madumal et al. [41], not only is it important for a RL agent to explain itself and its actions, but also to bear in mind the human user at the receiving end of this explanation. Thus, they took advantage of the prominent theory that humans develop and deploy causal models to explain the world around them, and have adapted a structural causal model (SCM) based on Halpern [20] to mimic this for model-free RL. SCMs represent the world with random exogenous (external) and endogenous (internal) variables, some of which might exert a causal influence over others. These influences can be described with a set of structural equations.
156
-
157
- Since Madumal et al. [41] focused on providing explanations for an agent's behaviour based on the knowledge of how its actions influence the environment, they extend the SCM to include the agent's actions, making it an action influence model. More specifically, they offer 'actuals' and 'counterfactuals', that is, their explanations answer 'Why?' as well as 'Why not?' questions (e.g. 'Why (not) action A?'). This is noticeable because, contrary to most XAI models, it not only considers actual events occurred, but also hypothetical events that did not happen, but could have.
158
-
159
- In more detail, the process of generating explanations consists of three phases; first, an action influence model in the form of a directed acyclic graph (DAG) is required (see figure 8 for an example). Next, since it is difficult to uncover the true structural equations describing the relationships between the variables, this problem is circumvented by only approximating the equations so that they are exact enough to simulate the counterfactuals. In Madumal et al. [41], this is done by multivariate regression models during the training of the RL agent, but any regression learner can be used. The last phase is generating the explanations, more specifically, minimally complete contrastive explanations. This means that, first, instead of including the vectors of variables of ALL nodes in the explanation, it only includes the absolute minimum variables necessary. Moreover, it explains the actual (e.g. 'Why action A?') by simulating the counterfactual (e.g. 'Why not action B?') through the structural equations and finding the differences between the two. The explanation can then be obtained through a simple NLP template (for an example of an explanation, again, see figure 8).
160
-
161
- Madumal et al. [41]'s evaluations of the action influence model show promising results; in a comparison between six RL benchmark domains measuring accuracy ('Can the model accurately predict what the agent will do next?') and performance (training time), the model shows reasonable task prediction accuracy and negligible training time. In a human study, comparing the action influence model with two different models that have learnt how to play Starcraft II ( a real-time strategy game), they assessed task prediction by humans, explanation satisfaction, and trust in the model. Results showed that the action influence model performs significantly better for task prediction and explanation satisfaction, but not for trust. The authors propose that, in order to increase trust, further interaction might be needed. In the future, advancements to the model
162
-
163
- ![](images/28a355dda5b6ecbd3682206ecba93c3f729fc2aa334796b5cd2fb3d3276b39cf.jpg)
164
- Fig. 8. Action influence graph of an agent playing Starcraft II, a real-time strategy game with a large state and action space, reduced to four actions and nine state variables for the purpose of generating the explanations. In this case, the causal chain for the actual action 'Why $A_s$ ?' is shown in bold, and the chain for the counterfactual action 'Why not $A_b$ ?' would be $B \rightarrow A_n \rightarrow [D_u, D_b]$ . The explanation to the question 'Why not build_barracks ( $A_b$ )?' would be 'Because it is more desirable to do action build_supply_depot ( $A_s$ ) to have more Supply Depots ( $S$ ) as the goal is to have more Destroyed Units ( $D_u$ ) and Destroyed buildings ( $D_b$ )'. Adopted from Madumal et al. [41].
165
-
166
- # State variables:
167
-
168
- W - Worker number
169
- S - Supply depot number
170
- B - barracks number
171
- E - enemay location
172
- $A_{n}$ - Ally unit number
173
- $A_{b}$ - Ally unit health
174
- $A_{l}$ - Ally unit location
175
- $D_{u}$ - Destoryed units
176
- $D_{b}$ - Destroyed buildings
177
-
178
- # Actions:
179
-
180
- $A_{s}$ - build supply depot
181
- $A_{b}$ - build barracks
182
- $A_{m}$ - train offensive unit
183
- $A_{a}$ -attack
184
-
185
- can be made including extending the model to continuous domains or targeting the explanations to users with different levels of knowledge.
186
-
187
- # 4 Discussion
188
-
189
- In this paper, inspired by the current interest in and demand for XAI, we focused on a particular field of AI: Reinforcement Learning. Since most XAI methods are tailored for supervised learning, we wanted to give an overview of methods employed only on RL algorithms, since, to the best of our knowledge, there is no work present at the current point in time addressing this.
190
-
191
- First, we gave an overview over XAI, its importance and issues, and explained related terms. We stressed the importance of a uniform terminology and have thus suggested and defined a term to use from here on out. The focus, however, lay on collecting and providing an overview over the aforementioned XRL methods. Based on Adadi and Terrada [2]'s work, we have sorted selected methods according to the scope of the method and the time of information extraction. We then chose four methods, one for each possible combination of those categorizations, to be presented in detail.
192
-
193
- Looking at the collected XRL methods, it becomes clear that post-hoc interpretability models are much more prevalent than intrinsic models. This makes sense, considering the fact that RL models were developed to solve tasks without human supervision that were too difficult for un-/supervised learning and
194
-
195
- are thus highly complex; it is, apparently, easier to simplify an already existing, complex model than it is to construct it to be simple in the first place. It seems that the performance-interpretability trade-off is present not only for the AI methods themselves, but also for the explainability models applied to them.
196
-
197
- The allocation to global vs. local scope, however, seems to be more or less balanced. Of course, the decision to develop a global or a local method is greatly dependent on the complexity of the model and the task being solved, but one should also address the question if one of the two is more useful or preferable to human users. In van der Waa et al.'s study [64], for example, 'human users tend to favor explanations about policy rather than about single actions' (p. 1).
198
-
199
- In general, the form of the explanation and the consideration of the intended target audience is a very important aspect in the development of XAI/XRL methods that is too often neglected [1]. XAI methods need to exhibit context-awareness: adapting to environmental and user changes like the level of experience, cultural or educational differences, domain knowledge, etc., in order to be more human-centric [2]. The form and presentation of the explanation is essential as XAI 'can benefit from existing models of how people define, generate, select, present, and evaluate explanations' [43, p. 59]. For example, research shows that (causal) explanations are contrastive, i.e., humans answer a 'Why X?' question through the answer to the -often only implied- counterfactual 'Why not Y instead?'. This is due to the fact that a complete explanation for a certain event (instead of an explanation against the counterevent) involves a higher cognitive load [43]. Not only that, but a layperson also seems to be more receptive to a contrastive explanation, finding it 'more intuitive and more valuable' [43, p. 20]).
200
-
201
- Out of the papers covered in this work, we highlight Madumal et al.'s work [41], but also Sequeira and Gervasio [53] and van der Waa et al. [64]; of all thirteen selected XRL methods, only five evaluate (non-expert) user satisfaction and/or utility of a method [53, 27, 64, 17, 41], and only three of these offer contrastive explanations [41, 53, 64]. So, of all selected papers, only these free provide a combination of both, not only offering useful contrastive explanations, but also explicitly bearing in mind the human user at the end of an explanation.
202
-
203
- # 4.1 Conclusion
204
-
205
- For practical, legal, and psychological reasons, XRL (and XAI) is a quickly advancing field in research that has to address some key challenges to prove even more beneficial and useful. In order to have a common understanding about the goals and capabilities of an XAI/XRL model, a ubiquitous terminology is important; due to this, we suggest the term interpretability to be used from here on out and have defined it as 'the ability to not only extract or generate explanations for the decisions of the model, but also to present this information in a way that is understandable by human (non-expert) users to, ultimately, enable them to predict a model's behaviour'. Different approaches are possible to achieve this interpretability, depending on the scope (global vs. local) and the time of information extraction (intrinsic vs. post-hoc). Due to the complexity of a RL model, post-hoc interpretability seems to be easier to achieve than intrinsic
206
-
207
- interpretability: simplifying the original model (for example with the use of a surrogate model) instead of developing a simple model in the first place seems to be easier to achieve, but comes at the cost of accuracy/performance.
208
-
209
- What many models lack, however, is to consider the human user at the receiving end of an explanation and to adapt the model to them for maximum benefit. Research shows that contrastive explanations are more intuitive and valuable [43], and there is evidence that human users favor a global approach over a local one [64]. A context-aware system design is also important in order to cater to users with different characteristics, goals, and needs [2]. Especially considering the growing role of AI in critical infrastructures (for example analyzing and controlling power grids with models such as ARL [15, 61]), where the AI model might have to act autonomously or in cooperation with a human user, being able to explain and justify the model's decisions is crucial.
210
-
211
- To achieve this and be able to develop human-centered models for optimal and efficient human-computer interaction and cooperation, a bigger focus on interdisciplinary work is necessary, combining efforts from the fields of AI/ML, psychology, philosophy, and human-computer interaction.
212
-
213
- # 5 Acknowledgements
214
-
215
- This work was supported by the German Research Foundation under the grant GZ: JI 140/7-1. We thank our colleagues Stephan Balduin, Johannes Gerster, Lasse Hammer, Daniel Lange and Nils Wenninghoff for their helpful comments and contributions.
216
-
217
- # Bibliography
218
-
219
- [1] Abdul, A., Vermeulen, J., Wang, D., Lim, B.Y., Kankanhalli, M.: Trends and trajectories for explainable, accountable and intelligible systems. In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems - CHI '18. ACM Press (2018)
220
- [2] Adadi, A., Berrada, M.: Peeking inside the black-box: A survey on explainable artificial intelligence (XAI). IEEE Access 6, 52138-52160 (2018), https://doi.org/10.1109/access.2018.2870052, 10.1109/access.2018.2870052
221
- [3] Andreas, J., Klein, D., Levine, S.: Modular multitask reinforcement learning with policy sketches. In: Proceedings of the 34th International Conference on Machine Learning - Volume 70. p. 166175. ICML17, JMLR.org (2017)
222
- [4] Arel, I., Liu, C., Urbanik, T., Kohls, A.: Reinforcement learning-based multi-agent system for network traffic signal control. IET Intelligent Transport Systems 4(2), 128 (2010)
223
- [5] Barto, A.G., Singh, S., Chentanez, N.: Intrinsically motivated learning of hierarchical collections of skills. In: Proceedings of the 3rd International Conference on Development and Learning. pp. 112-19 (2004)
224
- [6] Brockman, G., Cheung, V., Pettersson, L., Schneider, J., Schulman, J., Tang, J., Zaremba, W.: Openai gym (2016)
225
- [7] Chakraborty, S., Tomsett, R., Raghavendra, R., Harborne, D., Alzantot, M., Cerutti, F., Srivastava, M., Preece, A., Julier, S., Rao, R.M., Kelley, T.D., Braines, D., Sensoy, M., Willis, C.J., Gurram, P.: Interpretability of deep learning models: A survey of results. In: 2017 IEEE SmartWorld, Ubiquitous Intelligence & Computing, Advanced & Trusted Computed, Scalable Computing & Communications, Cloud & Big Data Computing, Internet of People and Smart City Innovation (SmartWorld/SCALCOM/UIC/ATC/CBDCom/IOP/SCI). IEEE (2017)
226
- [8] Coppens, Y., Efthymiadis, K., Lenaerts, T., Nowé, A., Miller, T., Weber, R., Magazzeni, D.: Distilling deep reinforcement learning policies in soft decision trees. In: Proceedings of the IJCAI 2019 Workshop on Explainable Artificial Intelligence. pp. 1-6 (2019)
227
- [9] Dehghanpour, K., Wang, Z., Wang, J., Yuan, Y., Bu, F.: A survey on state estimation techniques and challenges in smart distribution systems. IEEE Transactions on Smart Grid 10(2), 2312-2322 (2018)
228
- [10] Doran, D., Schulz, S., Besold, T.R.: What does explainable ai really mean? a new conceptualization of perspectives (2017)
229
- [11] Doshi-Velez, F., Kim, B.: Towards a rigorous science of interpretable machine learning (2017)
230
- [12] Dosilovic, F.K., Brcic, M., Hlupic, N.: Explainable artificial intelligence: A survey. In: 2018 41st International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO). IEEE (2018), 10.23919/mipro.2018.8400040
231
-
232
- [13] Du, M., Liu, N., Hu, X.: Techniques for interpretable machine learning. Communications of the ACM 63(1), 68-77 (2019)
233
- [14] European Commission, Parliament: Regulation (EU) 2016/679 of the European parliament and of the council of 27 april 2016 on the protection of natural persons with regard to the processing of personal data and on the free movement of such data, and repealing Directive 95/46/EC (General Data Protection Regulation). OJ L 119, 1-88 (2016)
234
- [15] Fischer, L., Memmen, J.M., Veith, E.M., Tröschel, M.: Adversarial resilience learning—towards systemic vulnerability analysis for large and complex systems. In: The Ninth International Conference on Smart Grids, Green Communications and IT Energy-aware Technologies (ENERGY 2019). vol. 9, pp. 24-32 (2019)
235
- [16] Freitas, A.A.: Comprehensive classification models. ACM SIGKDD Explorations Newsletter 15(1), 1-10 (2014)
236
- [17] Fukuchi, Y., Osawa, M., Yamakawa, H., Imai, M.: Autonomous self-explanation of behavior for interactive reinforcement learning agents. In: Proceedings of the 5th International Conference on Human Agent Interaction - HAI '17. ACM Press (2017)
237
- [18] Glass, A., McGuinness, D.L., Wolverton, M.: Toward establishing trust in adaptive agents. In: Proceedings of the 13th international conference on Intelligent user interfaces - IUI '08. ACM Press (2008)
238
- [19] Goodman, B., Flaxman, S.: European union regulations on algorithmic decision-making and a “right to explanation”. AI Magazine 38(3), 50-57 (2017)
239
- [20] Halpern, J.Y.: Causes and explanations: A structural-model approach. part II: Explanations. The British Journal for the Philosophy of Science 56(4), 889-911 (2005)
240
- [21] Hayes, B., Shah, J.A.: Improving robot controller transparency through autonomous policy explanation. In: Proceedings of the 2017 ACM/IEEE International Conference on Human-Robot Interaction - HRI '17. ACM Press (2017)
241
- [22] Hein, D., Hentschel, A., Runkler, T., Udluft, S.: Particle swarm optimization for generating interpretable fuzzy reinforcement learning policies. Engineering Applications of Artificial Intelligence 65, 87-98 (2017), https://doi.org/10.1016/j.engappai.2017.07.005
242
- [23] Hein, D., Udluft, S., Runkler, T.A.: Interpretable policies for reinforcement learning by genetic programming. Engineering Applications of Artificial Intelligence 76, 158-169 (2018)
243
- [24] Herlocker, J.L., Konstan, J.A., Riedl, J.: Explaining collaborative filtering recommendations. In: Proceedings of the 2000 ACM conference on Computer supported cooperative work - CSCW '00. ACM Press (2000)
244
- [25] Ikonomovska, E., Gama, J., Džeroski, S.: Learning model trees from evolving data streams. Data Mining and Knowledge Discovery 23(1), 128-168 (2010)
245
- [26] Israelsen, B.W., Ahmed, N.R.: “dave...i can assure you ...that it's going to be all right ...” a definition, case for, and survey of algorithmic assurances in human-autonomy trust relationships. ACM Computing Surveys 51(6), 1-37 (2019)
246
-
247
- [27] Juozapaitis, Z., Koul, A., Fern, A., Erwig, M., Doshi-Velez, F.: Explainable reinforcement learning via reward decomposition. In: Proceedings of the IJCAI 2019 Workshop on Explainable Artificial Intelligence. pp. 47-53 (2019)
248
- [28] Kaelbling, L.P., Littman, M.L., Moore, A.W.: Reinforcement learning: A survey (1996)
249
- [29] Kaelbling, L.P., Littman, M.L., Cassandra, A.R.: Planning and acting in partially observable stochastic domains. Artificial Intelligence 101(1-2), 99-134 (1998)
250
- [30] Kim, B., Khanna, R., Koyejo, O.O.: Examples are not enough, learn to criticize! criticism for interpretability. In: Lee, D.D., Sugiyama, M., Luxburg, U.V., Guyon, I., Garnett, R. (eds.) Advances in Neural Information Processing Systems 29. pp. 2280-2288. Curran Associates, Inc. (2016), http://papers.nips.cc/paper/6300-examples-are-not-enough-learn-to-critici
251
- [31] Kimura, H., Miyazaki, K., Kobayashi, S.: Reinforcement learning in pomdpps with function approximation. In: ICML. vol. 97, pp. 152-160 (1997)
252
- [32] Kober, J., Bagnell, J.A., Peters, J.: Reinforcement learning in robotics: A survey. The International Journal of Robotics Research 32(11), 1238-1274 (2013)
253
- [33] Lee, J.H.: Complementary reinforcement learning towards explainable agents (2019)
254
- [34] Li, Y.: Deep reinforcement learning (2018)
255
- [35] Lipton, Z.C.: The mythos of model interpretability (2016)
256
- [36] Lipton, Z.C.: The mythos of model interpretability. Communications of the ACM 61(10), 36-43 (2018)
257
- [37] Littman, M., Kaelbling, L.: Background on pomdp's (1999), https://cs.brown.edu/research/ai/pomdp/tutorial/pomdp-background.html, [Retrieved: 2020-04-15]
258
- [38] Liu, G., Schulte, O., Zhu, W., Li, Q.: Toward interpretable deep reinforcement learning with linear model u-trees. In: Machine Learning and Knowledge Discovery in Databases, pp. 414-429. Springer International Publishing (2019)
259
- [39] Liu, Y., Gadepalli, K., Norouzi, M., Dahl, G.E., Kohlberger, T., Boyko, A., Venugopalan, S., Timofeev, A., Nelson, P.Q., Corrado, G.S., Hipp, J.D., Peng, L., Stumpe, M.C.: Detecting cancer metastases on gigapixel pathology images (2017)
260
- [40] Loh, W.Y.: Classification and regression trees. WIREs Data Mining and Knowledge Discovery 1(1), 14-23 (2011)
261
- [41] Madumal, P., Miller, T., Sonenberg, L., Vetere, F.: Explainable reinforcement learning through a causal lens (2019)
262
- [42] Martens, D., Vanthienen, J., Verbeke, W., Baesens, B.: Performance of classification models from a user perspective. Decision Support Systems 51(4), 782-793 (2011)
263
- [43] Miller, T.: Explanation in artificial intelligence: Insights from the social sciences. Artificial Intelligence 267, 1-38 (2019)
264
-
265
- [44] Molar, C.: Interpretable machine learning (2018), https://christophm.github.io/interpretable-ml-book/, [Retrieved: 2020-03-31]
266
- [45] Montavon, G., Samek, W., Müller, K.R.: Methods for interpreting and understanding deep neural networks. Digital Signal Processing 73, 1-15 (2018)
267
- [46] Nguyen, A., Yosinski, J., Clune, J.: Deep neural networks are easily fooled: High confidence predictions for unrecognizable images. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)
268
- [47] Nguyen, T.T., Hui, P.M., Harper, F.M., Terveen, L., Konstan, J.A.: Exploring the filter bubble. In: Proceedings of the 23rd international conference on World wide web - WWW '14. ACM Press (2014)
269
- [48] Quinlan, J.R., et al.: Learning with continuous classes. In: 5th Australian joint conference on artificial intelligence. vol. 92, pp. 343-348. World Scientific (1992)
270
- [49] Ribeiro, M.T., Singh, S., Guestrin, C.: "why should i trust you?". In: Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining - KDD '16. ACM Press (2016)
271
- [50] Rudin, C.: Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence 1(5), 206-215 (2019)
272
- [51] Rusu, A.A., Colmenarejo, S.G., Gulcehre, C., Desjardins, G., Kirkpatrick, J., Pascanu, R., Mnih, V., Kavukcuoglu, K., Hadsell, R.: Policy distillation (2015)
273
- [52] Schrittwieser, J., Antonoglou, I., Hubert, T., Simonyan, K., Sifre, L., Schmitt, S., Guez, A., Lockhart, E., Hassabis, D., Graepel, T., et al.: Mastering ATARI, go, chess and shogi by planning with a learned model (2019)
274
- [53] Sequeira, P., Gervasio, M.: Interestingness elements for explainable reinforcement learning: Understanding agents' capabilities and limitations (2019)
275
- [54] Shu, T., Xiong, C., Socher, R.: Hierarchical and interpretable skill acquisition in multi-task reinforcement learning (2017)
276
- [55] Silver, D., Schrittwieser, J., Simonyan, K., Antonoglou, I., Huang, A., Guez, A., Hubert, T., Baker, L., Lai, M., Bolton, A., et al.: Mastering the game of go without human knowledge. nature 550(7676), 354-359 (2017)
277
- [56] Szegedy, C., Zaremba, W., Sutskever, I., Bruna, J., Erhan, D., Goodfellow, I., Fergus, R.: Intriguing properties of neural networks (2013)
278
- [57] The European Commission: Communication from the Commission to the European Parliament, the European Council, the Council, the European Economic and Social Committee and the Committee of the Regions. The European Commission (2018), https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe, article; accessed 27.03.2020
279
- [58] The European Commission: Independent High-Level Expert Group on Artificial Intelligence set up by the European Commission. The European Commission (2018), https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe, article; accessed 27.04.2020
280
-
281
- [59] Tomzcak, K., Pelter, A., Gutierrez, C., Stretch, T., Hilf, D., Donadio, B., Tenhundfeld, N.L., de Visser, E.J., Tossell, C.C.: Let Tesla park your Tesla: Driver trust in a semi-automated car. In: 2019 Systems and Information Engineering Design Symposium (SIEDS). IEEE (2019)
282
- [60] Uther, W.T., Veloso, M.M.: Tree based discretization for continuous state space reinforcement learning. In: Aaiai/iaai. pp. 769-774 (1998)
283
- [61] Veith, E., Fischer, L., Tröschel, M., Niefe, A.: Analyzing cyber-physical systems from the perspective of artificial intelligence. In: Proceedings of the 2019 International Conference on Artificial Intelligence, Robotics and Control. ACM (2019)
284
- [62] Veith, E.M.: Universal Smart Grid Agent for Distributed Power Generation Management. Logos Verlag Berlin GmbH (2017)
285
- [63] Verma, A., Murali, V., Singh, R., Kohli, P., Chaudhuri, S.: Programmatically interpretable reinforcement learning. PMLR 80:5045-5054 (2018)
286
- [64] van der Waa, J., van Diggelen, J., van den Bosch, K., Neerincx, M.: Contrastive explanations for reinforcement learning in terms of expected consequences. IJCAI-18 Workshop on Explainable AI (XAI). Vol. 37. 2018 (2018)
287
- [65] Wymann, B., Espié, E., Guionneau, C., Dimitrakakis, C., Coulom, R., Sumner, A.: Torcs, the open racing car simulator. Software available at http://torcs.sourceforge.net 4(6), 2 (2000)
288
- [66] Zahavy, T., Zrihem, N.B., Mannor, S.: Graying the black box: Understanding dqns (2016)
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ca7a92e0cafd0443946995c15602dc6f54a803d4227d4b3ca917be0da203075
3
+ size 52804
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/2020/2005_06xxx/2005.06247/images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16fd67678863f50f4c85764020b3f65604e7c37bf0c4c0f0c5442a02fd963be8
3
  size 288695
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbb4f18dd1bdcdcadb184c77328099d325002304909d25513842848be605e54d
3
  size 288695
data/2020/2005_06xxx/2005.06247/layout.json CHANGED
The diff for this file is too large to render. See raw diff