Eric03 commited on
Commit
c8d668a
·
verified ·
1 Parent(s): 76efe78

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2010.03147/main_diagram/main_diagram.drawio +1 -0
  2. 2010.03147/main_diagram/main_diagram.pdf +0 -0
  3. 2010.03147/paper_text/intro_method.md +136 -0
  4. 2011.08908/main_diagram/main_diagram.drawio +0 -0
  5. 2011.08908/paper_text/intro_method.md +65 -0
  6. 2101.00265/main_diagram/main_diagram.drawio +0 -0
  7. 2101.00265/paper_text/intro_method.md +112 -0
  8. 2103.01009/main_diagram/main_diagram.drawio +1 -0
  9. 2103.01009/main_diagram/main_diagram.pdf +0 -0
  10. 2103.01009/paper_text/intro_method.md +116 -0
  11. 2104.07586/main_diagram/main_diagram.drawio +0 -0
  12. 2104.07586/paper_text/intro_method.md +37 -0
  13. 2106.03272/main_diagram/main_diagram.drawio +1 -0
  14. 2106.03272/main_diagram/main_diagram.pdf +0 -0
  15. 2106.03272/paper_text/intro_method.md +189 -0
  16. 2108.02866/main_diagram/main_diagram.drawio +1 -0
  17. 2108.02866/main_diagram/main_diagram.pdf +0 -0
  18. 2108.02866/paper_text/intro_method.md +61 -0
  19. 2109.14710/main_diagram/main_diagram.drawio +0 -0
  20. 2109.14710/paper_text/intro_method.md +226 -0
  21. 2110.13947/main_diagram/main_diagram.drawio +0 -0
  22. 2110.13947/paper_text/intro_method.md +135 -0
  23. 2111.04138/main_diagram/main_diagram.drawio +1 -0
  24. 2111.04138/main_diagram/main_diagram.pdf +0 -0
  25. 2111.04138/paper_text/intro_method.md +123 -0
  26. 2111.12892/main_diagram/main_diagram.drawio +0 -0
  27. 2111.12892/paper_text/intro_method.md +59 -0
  28. 2201.08265/main_diagram/main_diagram.drawio +1 -0
  29. 2201.08265/paper_text/intro_method.md +207 -0
  30. 2202.12823/main_diagram/main_diagram.drawio +1 -0
  31. 2202.12823/main_diagram/main_diagram.pdf +0 -0
  32. 2202.12823/paper_text/intro_method.md +126 -0
  33. 2203.10761/main_diagram/main_diagram.drawio +1 -0
  34. 2203.10761/main_diagram/main_diagram.pdf +0 -0
  35. 2203.10761/paper_text/intro_method.md +102 -0
  36. 2203.12997/main_diagram/main_diagram.drawio +0 -0
  37. 2203.12997/paper_text/intro_method.md +89 -0
  38. 2204.09245/main_diagram/main_diagram.drawio +1 -0
  39. 2204.09245/main_diagram/main_diagram.pdf +0 -0
  40. 2204.09245/paper_text/intro_method.md +101 -0
  41. 2205.10442/main_diagram/main_diagram.drawio +1 -0
  42. 2205.10442/main_diagram/main_diagram.pdf +0 -0
  43. 2205.10442/paper_text/intro_method.md +102 -0
  44. 2206.04301/main_diagram/main_diagram.drawio +1 -0
  45. 2206.04301/main_diagram/main_diagram.pdf +0 -0
  46. 2206.04301/paper_text/intro_method.md +111 -0
  47. 2206.07840/main_diagram/main_diagram.drawio +0 -0
  48. 2206.07840/paper_text/intro_method.md +79 -0
  49. 2207.04543/main_diagram/main_diagram.drawio +1 -0
  50. 2207.04543/main_diagram/main_diagram.pdf +0 -0
2010.03147/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2020-10-06T10:56:54.127Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/13.5.7 Chrome/83.0.4103.122 Electron/9.1.2 Safari/537.36" etag="UEc8LGqqSHXknaJpnogC" version="13.5.7" type="device"><diagram id="ovxT7Bu_kcGybfa3Wofj" name="Page-1">7Z1de5s4FoB/jS/jB0l8mMs0TWc6m/aZmXR3p3OThxjZZouNC6Sx59evMAiDJD6MsSCVfdEaQQTWe87ROdKRmKC79e6X0NmuPgUu9idQc3cT9H4CoQkB+Tcp2KcFupkVLEPPTYsKBY/ePzgr1LLSF8/FUenCOAj82NuWC+fBZoPncanMCcPgtXzZIvDLd906S8wVPM4dny/9r+fGq7R0ZmjH8l+xt1zROwMtO7N26MVZQbRy3OC1UITuJ+guDII4/bbe3WE/aTvaLunffag4mz9YiDdxmz/YLJ8+/r38Zj//gT9/Xzgfo6W2uclq+eH4L9kPnkDTJ/W9WwSkWvLU8T5rCvP7S0BP3EQHULfkAmBud8eT5Nsy+f/d/Z9faE3kkdLK0lNZa+T1wjB42bg4eUpATr+uvBg/bp15cvaVyBQpW8VrPzu98Hz/LvCD8PC3CAPXwBYpj+Iw+IYLZ2zTQo6Z36/YTvRH4zDGu0JR1m6/4GCN43BPLqFnKcNMiJEN0+PXo0gges2qKA6zTBIzKVzmVR9BkS8ZqxO4IY7bI/YXpOQ2jsnv9IIN+f7g7HEY9drei8UCzuei9nbNZ9O4UHtDW2vX3rSs9/YGtkBRmHbFG/c2sTjkaO47UeTNy01J2iPc/0UOtKlmGLTga3J2qun0+H3SBlp+tC8e/Y5Dj/weHGaFlU2N3ZJV4xu60JCGoB1pWYh9J/Z+lG2hqHGzO/weeAersaPcbPLL7OPHKGHVoV2uMQpewjnOKimaMr7eckWAebTYCZc45io6oM9bobs0QP0qDadLA9IvIw2k3mGlwWjoRGm/d2LHqos61tfKXjUtjl6eu97QFt0PNNyPFAtu2dTlk34gLqtDuT/ZBBvMdD5ZkeN7y02iU0TIE8l/l/QqHvHXbrMTa891k9sIO7Zj11erMKf0VIwQI8D1VPqFOqrvT0/fv97/6nz5z/rJjj48RX8+ejf0/kVLRNT/MTsMwngVLION498fS5lmOV7zEATbjM//cBzvM/fceYkDxpjtvDi1ZUZ29JVaKvL9aMSSg5IN+6t48LV4UGH5Kpml1qJOTa30wtQa1PlWbc1ma3t4lutBn/scH30UpgSeaEp+WpsxK3u3Mm1GrcALJOy5DW3SHruUI0N8Op0W4I4dbXpPGuWD7HdmNg/2BB9qDHwwNHw4kHlpXbsmqv2zcq4G8XOnI3M29DfobExtyEZbh2GL/sKtRk8E6X07GOLASNcga2ummmYdAzCpYRLig+YoFvgaSDV/AFlM/NpWrS/lcSKzHSmgGinIkeLHgeV23vywpJAUVI0UgiMjZcyuPvZQPjYyBoZv8gZVFGUrZ08BYkBZQ4PitVQESjlzyg5ZDA4K0J5v/KFHawPXGDeY2W++dNwAdZuJNHMLKilWMHm/RqSHn1XTQwg5MkNroqW1QhXF6sV1nNEcOK4Db2e8pj+jSfXj0kYTVTmdkkwm/Z2NeqiclwlH57zoUEE9RJL00GD1EMnVQz41T6yHygURaGzRHrD5IemPDwJY6wlMgvM75UwnG6BDc3BkfIRei0w5LWND9cGR5aNxbZGpF9UhNqobATTenayFpmJ8Z7PQ+Cx1qfEd1Phx5gZoyvVo+VKCHNrA+TdQO7FHU9Fz1McGTZCU+eA8H5a+3R2y/RceaTcWyDkrb1wHzxbClTfmfIafFz15D+zKG8jbNLkrb6DBZydeffQaZECwWEquctCRr5bElDNnwBodMX7Q4uqhlzz00RHjFx1dPb3aYd8RMOPd86ujVztEKJOZeDkVR6zKN786HkiQYydaTnoxWLxPXglLOc3iMngQr1lSYfEZHpWw1HM3+DQPms86FC7AR2GVvFQcC7SsRl6miFcPY4FiXrx/WMNLOXPI5QQMrl8ndF4K8uLmjofmRbcA+YlSOC6dmQGYUdy2mRnNFdFEgoadQ27D0NkXLtsmF0QnPLCJTnuu8vXkS/oEXfNNxII4uwriqYLIbB/UXRDZii4liOx9mgSx/vrLCCK10HIFkRWqLoJZKYjNSW2aHInVueTSjhKrV+VgNUhsX0KiX7vNs7tNoHVjD7g0k9yrkkXfGIOJGIwkty6xK0muItkcTbU5MhvOdefIVoSAXI5X3/FU9twyj87s7coxPln0B1lCOR6SfWkxV5Fka2wM4ni/aS3m/OnO7Hktlk0fKq3FrHecTw+fO6iUD9fL4oiU5si6tN05chqZZ4vKIjlIjDsekkx32JkkV5FsjmpHOayn253j8BppqU3S6Ikk4jYGkU5ykIh1NCRZv7U7ycF10uRT664boknaEC0Xm6HSKk3RKxMYPp0Xy2SUuHUy2uFDzvjJSp13zvzb8nCPwiUfDh9yyTJZxEMR0Rc3JQfzYJ28yePw3XWiVf6AZZCwH3CA2cEV0DSR4nIc6qeUyCHjUuj4JOb1oT3GqV8tWPfASUdM+lfbpaAXU7AeXhoARftu7/ra1Vv42rBPb2tXb0nCxeQWkhLad0sQr2j38Ok3vLx5eL/+4T7s0Aw5XwSvjbuuDa/d+0vmQoZ/vb/Z24Ghu3+YwfKHYwQ3/369GWSU8Myd2oHF7NQOTIsWyHkxVs+DkzpdNnzqK664ijR7qmmmRj/Mk/W3xZFQlgYZqTxTlnSIGFkC4E3JUr4OOpelWTdZggbRrMJb2GzEiNZsatsWd1qSbPE7MtF19ffrZ+y6b3NVPWCjbWDw/QGCGr1KSpcA1J5xYG0qsHpKRwasyvQ3kiLmqPh8g9YXR25MLN8gThZJxfPjmAmfziS5imRzVHy+oS+N5GeOpJNUe76BnTnqTJKfOZJOUu1sN3bmqDvJwXWSOsTXmSP5M0e52Aw2jiUIEO+v24EVvFjaQpSYYKWsXGKCnYrqiEHliFljIybYp6iOmHr7c6CxERNsR1lHTMntwMyRMeP3fW1gppxlRGg2Mmb8rkUNzNSb+WQ2WB6cmfDt9mwkuHFvw/CQ5DM/bN87L7M4TiRNCpNI2VRUzRRSZXum0VbdU7d9XUrDBii07NzwkVsdActVtJ190s2Gii49d1mddtg6dWUmSl2ZHF/z0pS8Qh7T20ZVGlkQOyfa4nkCcuHtEh3tJ5uEAUDTxwqCRGcUioIE9GqZOUs/9dP0M7N6hVaqbJSx6I5lTbXip+yIwJZbMIjqLVWEWiYD9zZVK1KlEw3r6Nmx+4SxbdwaFvtaKtmwWmTctu8Fp6XEHE2ZXtAyp/QljtS96SwQfF35Y8qSCVGS6E9ledksvL60l6uoMylyGAaJI3G8nLgBq0+Bi5Mr/g8=</diagram></mxfile>
2010.03147/main_diagram/main_diagram.pdf ADDED
Binary file (21.1 kB). View file
 
2010.03147/paper_text/intro_method.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Open Information Extraction (OpenIE) is an ontology-free information extraction paradigm that generates extractions of the form *(subject; relation; object)*. Built on the principles of domainindependence and scalability [\(Mausam,](#page-9-0) [2016\)](#page-9-0), OpenIE systems extract open relations and arguments from the sentence, which allow them to be
4
+
5
+ used for a wide variety of downstream tasks like Question Answering [\(Yan et al.,](#page-10-0) [2018;](#page-10-0) [Khot et al.,](#page-9-1) [2017\)](#page-9-1), Event Schema Induction [\(Balasubramanian](#page-8-0) [et al.,](#page-8-0) [2013\)](#page-8-0) and Fact Salience [\(Ponza et al.,](#page-10-1) [2018\)](#page-10-1).
6
+
7
+ <span id="page-0-1"></span>
8
+
9
+ | Subject Relation Object | | | | | | | | | | | |
10
+ |-------------------------|-----|---------|----|-------|----|-------|-----|------|------|---------|------|
11
+ | Rome | | | | | | | | | | | |
12
+ | Rome | the | capital | of | Italy | is | known | for | it's | rich | history | [is] |
13
+ | Rome | the | capital | of | Italy | is | known | for | it's | rich | history | [is] |
14
+
15
+ Figure 1: The extractions *(Rome; [is] the capital of; Italy)* and *(Rome; is known for; it's rich history)* can be seen as the output of grid labeling. We additionally introduce a token *[is]* to the input.
16
+
17
+ End-to-end neural systems for OpenIE have been found to be more accurate compared to their nonneural counterparts, which were built on manually defined rules over linguistic pipelines. The two most popular neural OpenIE paradigms are *generation* [\(Cui et al.,](#page-9-2) [2018;](#page-9-2) [Kolluru et al.,](#page-9-3) [2020\)](#page-9-3) and *labeling* [\(Stanovsky et al.,](#page-10-2) [2018;](#page-10-2) [Roy et al.,](#page-10-3) [2019\)](#page-10-3).
18
+
19
+ *Generation* systems generate extractions one word at a time. IMoJIE [\(Kolluru et al.,](#page-9-3) [2020\)](#page-9-3) is a state-of-the-art OpenIE system that re-encodes the partial set of extractions output thus far when generating the next extraction. This captures dependencies among extractions, reducing the overall redundancy of the output set. However, this repeated re-encoding causes a significant reduction in speed, which limits use at Web scale.
20
+
21
+ On the other hand, *labeling*-based systems like RnnOIE [\(Stanovsky et al.,](#page-10-4) [2015\)](#page-10-4) are much faster (150 sentences per second, compared to 3 sentences of IMoJIE) but relatively less accurate. They label each word in the sentence as either *S* (Subject), *R* (Relation), *O* (Object) or *N* (None) for each extraction. However, as the extractions are predicted independently, this does not model the inherent dependencies among the extractions.
22
+
23
+ We bridge this trade-off though our proposed
24
+
25
+ <sup>\*</sup>Equal Contribution
26
+
27
+ <span id="page-0-0"></span><sup>1</sup><https://github.com/dair-iitd/openie6>
28
+
29
+ <span id="page-1-0"></span>
30
+
31
+ | | Other signs of lens subluxation include mild conjunctival redness, vitreous humour degeneration, |
32
+ |---------------|-----------------------------------------------------------------------------------------------------|
33
+ | Sentence | and an increase or decrease of anterior chamber depth . |
34
+ | IGL | (Other signs of lens subluxation; include; mild conjunctival redness, vitreous humour degeneration) |
35
+ | IGL | (Other signs of lens subluxation; include; mild conjunctival redness, vitreous humour degeneration, |
36
+ | +Constraints | and an increase or decrease of anterior chamber depth) |
37
+ | IGL | (Other signs of lens subluxation; include; mild conjunctival redness) |
38
+ | +Constraints | (Other signs of lens subluxation; include; vitreous humour degeneration) |
39
+ | +Coordination | (Other signs of lens subluxation; include; an increase of anterior chamber depth) |
40
+ | Analyzer | (Other signs of lens subluxation; include; an decrease of anterior chamber depth) |
41
+
42
+ Table 1: For the given sentence, IGL based OpenIE extractor produces an incomplete extraction. Constraints improve the recall by covering the remaining words. Coordination Analyzer handles hierarchical conjunctions.
43
+
44
+ OpenIE system that is both fast and accurate. It consists of an OpenIE extractor based on a novel iterative labeling-based architecture — Iterative Grid Labeling (IGL). Using this architecture, OpenIE is modeled as a 2-D grid labeling problem of size (M, N) where M is a pre-defined maximum number of extractions and N is the sentence length, as shown in Figure [1.](#page-0-1) Each extraction corresponds to one row in the grid. Iterative assignment of labels in the grid helps IGL capture dependencies among extractions without the need for re-encoding, thus making it much faster than generation-based approaches.
45
+
46
+ While IGL gives high precision, we can further improve recall by incorporating (soft) global coverage constraints on this 2-D grid. We use constrained training [\(Mehta et al.,](#page-10-5) [2018\)](#page-10-5) by adding a penalty term for all constraint violations. This encourages the model to satisfy these constraints during inference as well, leading to improved extraction quality, without affecting running time.
47
+
48
+ Furthermore, we observe that existing neural OpenIE models struggle in handling coordination structures, and do not split conjunctive extractions properly. In response, we first design a new coordination analyzer [\(Ficler and Goldberg,](#page-9-4) [2016b\)](#page-9-4). It is built with the same IGL architecture, by interpreting each row in the 2-D grid as a coordination structure. This leads to a new state of the art on this task, with a 12.3 pts improvement in F1 over previous best reported result [\(Teranishi et al.,](#page-10-6) [2019\)](#page-10-6), and a 1.8 pts gain in F1 over a strong BERT baseline.
49
+
50
+ We then combine the output of our coordination analyzer with our OpenIE extractor, resulting in a further increase in performance (Table [1\)](#page-1-0). Our final OpenIE system — OpenIE6 — consists of IGLbased OpenIE extractor (trained with constraints) and IGL-based coordination analyzer. We evaluate OpenIE6 on four metrics from the literature and find that it exceeds in three of them by at least 4.0 pts in F1. We undertake manual evaluation to
51
+
52
+ reaffirm the gains. In summary, this paper describes OpenIE6, which
53
+
54
+ - is based on our novel IGL architecture,
55
+ - is trained with constraints to improve recall,
56
+ - handles conjunctive sentences with our new stateof-art coordination analyzer, which is 12.3 pts better in F1, and
57
+ - is 10× faster compared to current state of the art and improves F1 score by as much as 4.0 pts.
58
+
59
+ # Method
60
+
61
+ Given a sentence with word tokens {w1, w2, . . . , w<sup>N</sup> } the task of OpenIE is to output a set of extractions, say {E1, E2, . . . , EM},
62
+
63
+ <span id="page-3-0"></span>![](_page_3_Figure_0.jpeg)
64
+
65
+ Figure 3: Model architecture for IGL. BERTembeddings of the words are iteratively passed through self-attention layers. st1, st2, st<sup>3</sup> refer to the appended tokens *[is]*, *[of]*, *[from]*, respectively. At every iteration, we get an extraction by labeling the words using a fully-connected layer. Embeddings of the generated labels are added to the iterative layer embeddings before passing them to the next iteration.
66
+
67
+ where each extraction is of the form *(subject; relation; object)*. For a labeling-based system, each word is labeled as *S* (Subject), *R* (Relation), *O* (Object), or *N* (None) for every extraction. We model this as a 2-D grid labeling problem of size (M, N), where the words represent the columns and the extractions represent the rows (Figure [2\)](#page-2-0). The output at position (m, n) in the grid (Lm,n) represents the label assigned to the n th word in the mth extraction.
68
+
69
+ We propose a novel Iterative Grid Labeling (IGL) approach to label this grid, filling up one row after another iteratively. We refer to the OpenIE extractor trained using this approach as IGL-OIE.
70
+
71
+ IGL-OIE is based on a BERT encoder, which computes contextualized embeddings for each word. The input to the BERT encoder is {w1, w2, . . . , w<sup>N</sup> ,*[is]*,*[of]*,*[from]*}. The last three tokens (referred as st<sup>i</sup> in Figure [3\)](#page-3-0) are appended because, sometimes, OpenIE is required to predict tokens that are not present in the input sentence.[2](#page-3-1) E.g., "*US president Donald Trump gave a speech on Wednesday.*" will have one of the extractions as *(Donald Trump; [is] president [of]; US)*. The appended tokens make such extractions possible in a labeling framework.
72
+
73
+ The contextualized embeddings for each word or appended token are iteratively passed through
74
+
75
+ a 2-layer transformer to get their *IL embeddings* at different levels, until a maximum level M, i.e. a word w<sup>n</sup> has a different contextual embedding ILm,n for every row (level) m. At every level m, each ILm,n is passed though a fully-connected labeling layer to get the labels for words at that level (Figure [3\)](#page-3-0). Embeddings of the predicted labels are added to the *IL embeddings* before passing them to the next iteration. This, in principle, maintains the information of the extractions output so far, and hence can capture dependencies among labels of different extractions. For words that were broken into word-pieces by BERT, only the embedding of the first word-piece is retained for label prediction. We sum the cross-entropy loss between the predicted labels and the gold labels at every level to get the final loss, denoted by JCE.
76
+
77
+ OpenIE systems typically assign a confidence value to an extraction. In IGL, at every level, the respective extraction is assigned a confidence value by adding the log probabilities of the predicted labels (*S*, *R*, and *O*), and normalizing this by the extraction length.
78
+
79
+ We believe that IGL architecture has value beyond OpenIE, and can be helpful in tasks where a set of labelings for a sentence is desired, especially when labelings have dependencies amongst them.[3](#page-3-2) We showcase another application of IGL for the task of coordination analysis in Section [5.](#page-5-0)
80
+
81
+ Our preliminary experiments revealed that IGL-OIE has good precision, but misses out important extractions. In particular, we observed that the set of output extractions did not capture all the information from the sentence (Table [1\)](#page-1-0). We formulate constraints over the 2-D grid of extractions (as shown in Figure [2\)](#page-2-0) which act as an additional form of supervision to improve the coverage. We implement these as soft constraints, by imposing additional violation penalties in the loss function. This biases the model to learn to satisfy the constraints, without explicitly enforcing them at inference time.
82
+
83
+ To describe the constraints, we first define the notion of a *head verb* as all verbs except light verbs (do, be, is, has, etc.). We run a POS tagger on the input sentence, and find all head verbs in the sentence by removing all light verbs.[4](#page-3-3) For example,
84
+
85
+ <span id="page-3-1"></span><sup>&#</sup>x27;is', 'of' and 'from' are the most frequent such tokens.
86
+
87
+ <span id="page-3-2"></span><sup>3</sup> IGL is a generalization of [Ju et al.](#page-9-17) [\(2018\)](#page-9-17). Their model can only label spans which are subsets of one another.
88
+
89
+ <span id="page-3-3"></span><sup>4</sup>We used the light verbs listed by [Jain and Mausam](#page-9-18) [\(2016\)](#page-9-18).
90
+
91
+ <span id="page-4-0"></span>![](_page_4_Figure_0.jpeg)
92
+
93
+ Figure 4: The final OpenIE system. IGL-CA identifies conjunct boundaries by labeling a 2-D grid. This generates simple sentences and CIGL-OIE emits the final extractions.
94
+
95
+ for the sentence, "Obama gained popularity after Oprah endorsed him for the presidency", the head verbs are gained and endorsed. In order to cover all valid extractions like (Obama; gained; popularity) and (Oprah; endorsed him for; the presidency), we design the following coverage constraints:
96
+
97
+ - *POS Coverage* (**POSC**): All words with POS tags as nouns (N), verbs (V), adjectives (JJ), and adverbs (RB) should be part of at least one extraction. E.g. the words *Obama*, *gained*, *popularity*, *Oprah*, *endorsed*, *presidency* must be covered in the set of extractions.
98
+ - Head Verb Coverage (HVC): Each head verb should be present in the relation span of some (but not too many) extractions. E.g. (Obama; gained; popularity), (Obama; gained; presidency) is not a comprehensive set of extractions.
99
+ - Head Verb Exclusivity (HVE): The relation span
100
+ of one extraction can contain at most one head
101
+ verb. E.g. gained popularity after Oprah endorsed is not a good relation as it contains two
102
+ head verbs.
103
+ - Extraction Count (EC): The total number of extractions with head verbs in the relation span must be no fewer than the number of head verbs in the sentence. In the example, there must be at least two extractions containing head verbs, as the sentence itself has two head verbs.
104
+
105
+ **Notation**: We now describe the penalty terms for these constraints. Let $p_n$ be the POS tag of $w_n$ . We define an indicator $x_n^{imp} = 1$ if $p_n \in \{N, V, JJ, RB\}$ , and 0 otherwise. Similarly, let $x_n^{hv} = 1$ denote that $w_n$ is a head verb. At each extraction level m, the model computes $Y_{mn}(k)$ , the probability of assigning the $n^{th}$ word the label $k \in \{S, R, O, N\}$ . We formulate the penalties associated with our constraints as follows:
106
+
107
+ • **POSC** - To ensure that the $n^{th}$ word is covered, we compute its maximum probability $(posc_n)$ of belonging to any extraction. We introduce
108
+
109
+ a penalty if this value is low. This penalty is aggregated over words with important POS tags, $J_{posc} = \sum_{n=1}^{N} x_n^{imp} \cdot posc_n$ , where
110
+
111
+ $$posc_n = 1 - \max_{m \in [1, M]} \left( \max_{k \in \{S, R, O\}} Y_{mn}(k) \right)$$
112
+
113
+ - HVC A penalty is imposed for the $n^{th}$ word, if it is not present in relation of any extraction or if it is present in relation of many extractions. This penalty is aggregated over head verbs, $J_{hvc} = \sum_{n=1}^{N} x_n^{hv} \cdot hvc_n$ , where $hvc_n = \left|1 \sum_{m=1}^{M} Y_{mn}(R)\right|$ .
114
+ - **HVE** A penalty is imposed if the relation span of an extraction contains more than one head verb. This penalty is summed over all extractions. I.e., $J_{hve} = \sum_{m=1}^{M} hve_m$ , where
115
+
116
+ Verb. This penalty is summed over all extraction i.e.,
117
+ $$J_{hve} = \sum_{m=1}^{M} hve_m$$
118
+ , where $hve_m = \max\left(0, \left(\sum_{n=1}^{N} x_n^{hv} \cdot Y_{mn}(R)\right) - 1\right)$
119
+
120
+ • EC - $ec_m$ denotes the score $\in [0,1]$ of the $m^{th}$ extraction containing a head verb, i.e. $ec_m = \max_{n \in [1,N]} \left( x_n^{hv} \cdot Y_{mn}(R) \right)$ . A penalty is imposed if the sum of these scores is less than the actual number of head verbs in the sentence.
121
+
122
+ $$J_{ec} = \max\left(0, \sum_{n=1}^{N} x_n^{hv} - \sum_{m=1}^{M} ec_m\right)$$
123
+
124
+ Ideally, no constraint violations of HVC and HVE would imply that EC would also never gets violated. However, as these are soft constraints, this scenario is never materialized in practice. We find that our model performs better and results in fewer constraint violations when trained with POSC, HVC, HVE and EC combined. The full loss function is $J = J_{CE} + \lambda_{posc} J_{posc} + \lambda_{hvc} J_{hvc} + \lambda_{hve} J_{hve} + \lambda_{ec} J_{ec}$ , where $\lambda_{\star}$ are hyperparameters. We refer to the OpenIE extractor trained using this constrained loss as Constrained Iterative Grid Labeling OpenIE Extractor (CIGL-OIE).
125
+
126
+ The model is initially trained without constraints for a fixed *warmup* number of iterations, followed by constrained training till convergence.
127
+
128
+ Coordinated conjunctions (CC) are conjunctions such as *"and"*, *"or"* that connect, or coordinate words, phrases, or clauses (they are called the conjuncts). The goal of coordination analysis is to detect coordination structures — the coordinating conjunctions along with their constituent conjuncts. In this section we build a novel coordination analyzer and use its output downstream for OpenIE.
129
+
130
+ Sentences can have hierarchical coordinations, i.e., some coordination structures nested within the conjunct span of others [\(Saha and Mausam,](#page-10-10) [2018\)](#page-10-10). Therefore, we pose coordination analysis as a hierarchical labeling problem, as illustrated in Figure [4.](#page-4-0) We formulate a 2-D grid labeling problem, where all coordination structures at the same hierarchical level are predicted in the same row.
131
+
132
+ Specifically, we define a grid of size (M, N), where M is the maximum depth of hierarchy and N is the number of words in the sentence. The value at (m, n) th position in the grid represents the label assigned to the n th word in the mth hierarchical level, which can be *CC* (coordinated conjunction), *CONJ* (belonging to a conjunct span), or *N* (None). Using IGL architecture for this grid gives an end-to-end Coordination Analyzer that can detect multiple coordination structures, with two or more conjuncts. We refer to this Coordination Analyzer as IGL-CA.
133
+
134
+ Coordination Analyzer in OpenIE: Conjuncts in a coordinate structure exhibit *replaceability* – a sentence is still coherent and consistent, if we replace a coordination structure with any of its conjuncts [\(Ficler and Goldberg,](#page-9-4) [2016b\)](#page-9-4). Following CalmIE's approach, we generate simple (non-conjunctive) sentences using IGL-CA. We then run CIGL-OIE on these simple sentences to generate extractions. These extractions are de-duplicated and merged to yield the final extraction set (Figure [4\)](#page-4-0). This pipelined approach describes our final OpenIE system — OpenIE6.
135
+
136
+ For a conjunctive sentence, CIGL-OIE's confidence values for extractions will be with respect to multiple simple sentences, and may not be calibrated across them. We use a separate confidence estimator, consisting of a BERT encoder and an LSTM decoder trained on (sentence, extraction) pairs. It computes a log-likelihood for every extraction w.r.t. the original sentence — this serves as a better confidence measure for OpenIE6.
2011.08908/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2011.08908/paper_text/intro_method.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ We introduce *Stochastic Multi-Expert Neural Patcher* ([[Shield]{.sans-serif}]{.smallcaps}) which patches *only* the last layer of an already *trained* NN model $f(\mathbf{x}, \theta)$ and transforms it into an ensemble of multiple expert predictors with stochastic weights. These predictors are designed to be strategically selected with different weights during inference depending on the input. This is realized by two complementary modules, namely (i) a *Stochastic Ensemble (SE)* module that transforms $f(\cdot)$ into a randomized ensemble of different heads and (ii) a *Multi-Expert (ME)* module that uses Neural Architecture Search (NAS) to dynamically learn the optimal architecture of each head to promote their diversity.
4
+
5
+ This module extends the last layer of $f(\cdot)$, which is typically a fully-connected layer (followed by a softmax for classification), to an ensemble of $K$ *prediction heads*, denoted $\mathcal{H}{=}\{h(\cdot)\}_j^K$. Each head $h_j(\cdot)$, parameterized by $\theta_{h_j}$, is an expert predictor that is fed with a feature representation learned by *up to the second-last layer* of $f(\cdot)$ and outputs a prediction logit score: $$\begin{equation}
6
+ h_j: f(\mathbf{x}, \theta^*_{L-1}) \in \mathbb{R}^{Q} \mapsto \tilde{y}_j \in \mathbb{R}^{M},
7
+ \label{eqn:head}
8
+ \end{equation}$$ where $\theta^*_{L-1}$ are *fixed* parameters of $f$ up to the last prediction head layer, $Q$ is the size of the feature representation of $\mathbf{x}$ generated by the base model $f(\mathbf{x}, \theta^*_{L-1})$, and $M$ is the number of labels. To aggregate all logit scores returned from all heads, then, a classical ensemble method would average them as the final prediction: $\hat{y}^*{=}\frac{1}{K} \sum_j^K \tilde{y}_j$. However, this simple aggregation assumes each $h_j(\cdot) \in \mathcal{H}$ learns from very similar training signals. Hence, when $\theta^*_{L-1}$ already learns some of the task-dependent information, $\mathcal{H}$ will eventually converge *not* to a set of experts but very similar predictors. To resolve this issue, we introduce stochasticity into the process by assigning *prediction heads with stochastic weights* during both training and inference. Specifically, we introduce a new aggregation mechanism: $$\begin{equation}
9
+ \hat{y} = \frac{1}{K} \sum_j^K \alpha_j w_j \tilde{y}_j,
10
+ \label{eqn:final_logit}
11
+ \end{equation}$$
12
+
13
+ where $w_j$ weights $\tilde{y}_j$ according to head $j$'s expertise on the current input $\mathbf{x}$, and $\alpha_j \in [0,1]$ is a probabilistic scalar, representing how much of the weight $w_j$ should be accounted for. Let us denote $w$, $\alpha \in \mathbb{R}^{K}$ as vectors containing all scalars $w_j$ and $\alpha_j$, respectively, and $\tilde{\mathbf{y}} \in \mathbb{R}^{(K\times M)}$ as the concatenation of all vectors $\tilde{y}_j$ returned from each of the heads. We calculate $w$ and $\alpha$ as follows: $$\begin{equation}
14
+ \begin{aligned}
15
+ w &= \mathbf{W}^T(\tilde{\mathbf{y}} \oplus f(\mathbf{x}, \theta^*_{L-1})) + \mathbf{b},
16
+ \end{aligned}
17
+ \label{eqn:scale_score}
18
+ \end{equation}$$ $$\begin{equation}
19
+ \alpha = \mathrm{softmax}((w + \mathbf{g})/\tau),
20
+ \label{eqn:gumbel_softmax}
21
+ \end{equation}$$ where $\mathbf{W} \in \mathbb{R}^{(K\times M + Q)\times K}$, $\mathbf{b} \in \mathbb{R}^{K}$ are trainable parameters, $\mathbf{g} \in \mathbb{R}^K$ is a noise vector sampled from the *Standard Gumbel Distribution* and therefore, probability vector $\alpha$ is sampled by a technique known as *Gumbel-Softmax* [@jang2016categorical] controlled by the noise vector $\mathbf{g}$ and the temperature $\tau$. Unlike the standard Softmax, the Gumbel-Softmax is able to learn a categorical distribution (over $K$ heads) optimized for a downstream task [@jang2016categorical]. Annealing $\tau{\rightarrow}0$ encourages a pseudo one-hot vector (e.g., \[0.94, 0.03, 0.01, 0.02\] when $K{=}4$), which makes Eq. ([\[eqn:final_logit\]](#eqn:final_logit){reference-type="ref" reference="eqn:final_logit"}) a mixture of experts [@avnimelech1999boosted]. Importantly, $\alpha$ is sampled in an inherently stochastic way depending on the gumbel noise $\mathbf{g}$.
22
+
23
+ While $\mathbf{W}, \mathbf{b}$ is learned to *deterministically* assigns more weights $w$ to heads that are experts for each input $\mathbf{x}$ (Eq. ([\[eqn:scale_score\]](#eqn:scale_score){reference-type="ref" reference="eqn:scale_score"})), $\alpha$ introduces *stochasticity* into the final logits. The multiplication of $\alpha_jw_j$ in Eq. ([\[eqn:final_logit\]](#eqn:final_logit){reference-type="ref" reference="eqn:final_logit"}) then enables us to use different sets of weighted ensemble models *while still maintaining the ranking of the most important head*. Thus, this further diversifies the learning of each expert and confuse attackers when they iteratively try different inputs to find good adversarial perturbations.
24
+
25
+ Finally, to train this module, we use Eq. ([\[eqn:final_logit\]](#eqn:final_logit){reference-type="ref" reference="eqn:final_logit"}) as the final prediction and train the whole module with *Negative Log Likelihood (NLL)* loss following the objective: $$\begin{equation}
26
+ \min_{\mathbf{W}, \mathbf{b}, \{\theta_{h}\}^K_j} \mathcal{L_{\mathrm{SE}}} = -\frac{1}{N} \sum_i^N y_i log(\mathrm{softmax}(\hat{y}_i)).
27
+ \label{eqn:mle}
28
+ \end{equation}$$
29
+
30
+ :::: algorithm
31
+ ::: algorithmic
32
+ **Input:** pre-trained neural network $f(\cdot)$ **Input:** $\mathcal{O}$, $K$, $\tau$, $\gamma$ *Initialize* $\mathbf{W}, \mathbf{b}, \theta_\mathcal{O}, \{\beta\}_j^K$ Freeze $\{\beta\}_j^K$ and optimize $\mathbf{W}, \mathbf{b}, \theta_\mathcal{O}$ via Eq. ([\[eqn:mle\]](#eqn:mle){reference-type="ref" reference="eqn:mle"}) in mini-batch from *train* set.
33
+
34
+ Freeze $\mathbf{W}, \mathbf{b}, \theta_\mathcal{O}$ and optimize $\{\beta\}_j^K$ via Eq. ([\[eqn:me\]](#eqn:me){reference-type="ref" reference="eqn:me"}) with $\gamma$ multiplier in mini-batch from *validation* set.
35
+ :::
36
+ ::::
37
+
38
+ While the *SE* module facilitates stochastic weighted ensemble among heads, the *ME* module searches for the optimal architecture for each head that maximizes the diversity in how they make predictions. To do this, we utilize the [DARTS]{.smallcaps} algorithm [@liu2018darts] as follows. Let us denote $\mathcal{O}_j{=}\{o_j(\cdot)\}_t^T$ where $T$ is the number of possible architectures to be selected for $h_j \in \mathcal{H}$. We want to learn a one-hot encoded *selection* vector $\beta_j \in \mathbb{R}^T$ that assigns $h_j(\cdot) \leftarrow o_{j,\mathrm{argmax}(\beta_j)}(\cdot)$ during prediction. Since $\mathrm{argmax}(\cdot)$ operation is not differentiable, during training, we relax the categorical assignment of the architecture for $h_j(\cdot) \in \mathcal{H}$ to a softmax over all possible networks in $\mathcal{O}_j$: $$\begin{equation}
39
+ h_j(\cdot) \longleftarrow \frac{1}{T} \sum_t^T \frac{\mathrm{exp}(\beta_j^t)}{\sum_t^T \mathrm{exp}(\beta_j^T)} o_{j,t}(\cdot).
40
+ \label{eqn:head_function}
41
+ \end{equation}$$
42
+
43
+ However, the original DARTS algorithm *only* optimizes prediction performance. In our case, we also want to promote the diversity among heads. To do this, we force each $h_j(\cdot)$ to specialize in different features of an input, i.e., in how it makes predictions. This can be achieved by *maximizing* the difference among the gradients of the word-embedding $\mathbf{e}_i$ of input $\mathbf{x}_i$ w.r.t to the outputs of each $h_j(\cdot) \in \mathcal{H}$. Hence, given a fixed set of parameters $\theta_{\mathcal{O}}$ of all possible networks for every heads, we train all selection vectors $\{\beta\}_j^K$ by optimizing the objective: $$\begin{equation}
44
+ \begin{aligned}
45
+ &\mathrm{minimize}_{\{\beta\}_j^K} \mathcal{L}_{\mathrm{experts}} = \\
46
+ &\sum_i^N \sum_{n<m}^K{\Big(}\mathrm{d}(\nabla_{\mathbf{e}_i}\mathcal{J}_n; \nabla_{\mathbf{e}_i}\mathcal{J}_m) - ||\nabla_{\mathbf{e}_i}\mathcal{J}_n{-}\nabla_{\mathbf{e}_i}\mathcal{J}_m ||_2^2 \Big),
47
+ \end{aligned}
48
+ \label{eqn:expert}
49
+ \end{equation}$$
50
+
51
+ where $\mathrm{d}(\cdot)$ is the cosine-similarity function, and $\mathcal{J}_j$ is the NLL loss as if we only use a single prediction head $h_j$. In this module, however, not only do we want to maximize the differences among gradients vectors, but also we want to ensure the selected architectures eventually converge to good prediction performance. Therefore, we train the whole *ME* module with the following objective: $$\begin{equation}
52
+ \mathrm{minimize}_{\{\beta\}_j^K} \mathcal{L}_{ME} = \mathcal{L}_{SE} + \gamma \mathcal{L}_{\mathrm{experts}}.
53
+ \label{eqn:me}
54
+ \end{equation}$$
55
+
56
+ To combine the *SE* and *ME* modules, we replace Eq. ([\[eqn:head_function\]](#eqn:head_function){reference-type="ref" reference="eqn:head_function"}) into Eq. ([\[eqn:head\]](#eqn:head){reference-type="ref" reference="eqn:head"}) and optimize the overall objective: $$\begin{equation}
57
+ \begin{aligned}
58
+ &\mathrm{minimize}_{\mathbf{\{\beta\}_j^K}} \mathcal{L}_{\mathrm{ME}}^{\mathrm{val}} + \gamma \mathcal{L}_{\mathrm{experts}}^{\mathrm{val}} \quad
59
+ \mathrm{s.t.} \\
60
+ &\mathbf{W}, \mathbf{b}, \theta_{\mathcal{O}} = \mathrm{minimize}_{\mathbf{W}, \mathbf{b}, \theta_{\mathcal{O}}} \mathcal{L}_{\mathrm{SE}}^{\mathrm{train}}.
61
+ \end{aligned}
62
+ \label{eqn:final}
63
+ \end{equation}$$
64
+
65
+ We employ an *iterative* training strategy [@liu2018darts] with the *Adam* optimization algorithm [@kingma2014adam] as in Alg. [\[alg:training\]](#alg:training){reference-type="ref" reference="alg:training"}. By alternately freezing and training $\mathbf{W}, \mathbf{b}$, $\theta_{\mathcal{O}}$ and $\{\beta\}_j^K$ using a training set $\mathcal{D}_{\mathrm{train}}$ and a validation set $\mathcal{D}_{\mathrm{val}}$, we want to (i) achieve high quality prediction performance through Eq. ([\[eqn:mle\]](#eqn:mle){reference-type="ref" reference="eqn:mle"}) and (ii) select the optimal architecture for each expert to maximize their specialization through Eq. ([\[eqn:expert\]](#eqn:expert){reference-type="ref" reference="eqn:expert"}).
2101.00265/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2101.00265/paper_text/intro_method.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Text-to-image retrieval is the task of retrieving a list of relevant images from a corpus given text queries. This task is challenging because in order to find the most relevant images given text query, the model needs to not only have good representations for both textual and visual modalities, but also capture the fine-grained interaction between them.
4
+
5
+ Existing text-to-image retrieval models can be broadly divided into two categories: query-agnostic and query-dependent models. The dual-encoder architecture is a common query-agnostic model, which uses two encoders to encode the query
6
+
7
+ <span id="page-0-1"></span>![](_page_0_Figure_14.jpeg)
8
+
9
+ Figure 1: Inference Time vs. Model Accuracy. Each dot represents Recall@1 for different models on MSCOCO 1K split. By setting top *n*-terms to 500, our model significantly outperforms the previous best query-agnostic retrieval models, with ∼2.8X speedup. See section [5.1](#page-6-0) for details.
10
+
11
+ and images separately and then compute the relevancy via inner product [\(Faghri](#page-8-0) *et al.*, [2017;](#page-8-0) Lee *[et al.](#page-8-1)*, [2018;](#page-8-1) [Wang](#page-9-0) *et al.*, [2019a\)](#page-9-0). The transformer architecture is a well-known querydependent model [\(Devlin](#page-8-2) *et al.*, [2018;](#page-8-2) [Yang](#page-9-1) *et al.*, [2019\)](#page-9-1). In this case, each pair of text and image is encoded by concatenating and passing into one single network, instead of being encoded by two separate encoders (Lu *[et al.](#page-8-3)*, [2020;](#page-8-3) Li *[et al.](#page-8-4)*, [2020b\)](#page-8-4). This method borrows the knowledge from large pretrained transformer models and shows much better accuracy compared to dual-encoder methods [\(Li](#page-8-4) *et [al.](#page-8-4)*, [2020b\)](#page-8-4).
12
+
13
+ Besides improving the accuracy, retrieval speed has also been a long-existing subject of study in the information retrieval (IR) community [\(Man](#page-8-5)ning *[et al.](#page-8-5)*, [2008\)](#page-8-5). Query-dependent models are prohibitively slow to apply to the entire image corpus because it needs to recompute for every dif-
14
+
15
+ <sup>∗</sup> This work was partially done during an internship at SOCO
16
+
17
+ ferent query. On the other hand, query-agnostic model is able to scale by pre-computing an image data index. For dual-encoder systems, further speed improvement can be obtained via Approximate Nearest Neighbors (ANN) Search and GPU acceleration [\(Johnson](#page-8-6) *et al.*, [2019\)](#page-8-6).
18
+
19
+ In this work, we propose VisualSparta, a simple yet effective text-to-image retrieval model that outperforms all existing query-agnostic retrieval models in both accuracy and speed. By modeling fine-grained interaction between visual regions with query text tokens, our model is able to harness the power of large pre-trained visual-text models and scale to very large datasets with real-time response. To our best knowledge, this is the first model that integrates the power of transformer models with real-time searching, showing that large pre-trained models can be used in a way with significantly less amount of memory and computing time. Lastly, our method is embarrassingly simple because its image representation is essentially a weighted bag-of-words, and can be indexed in a standard Inverted Index for fast retrieval. Comparing to other sophisticated models with distributed vector representations, our method does not depend on ANN or GPU acceleration to scale up to very large datasets.
20
+
21
+ Contributions of this paper can be concluded as the following: (1) A novel retrieval model that achieves new state-of-the-art results on two benchmark datasets, i.e., MSCOCO and Flickr 30K. (2) Weighted bag-of-words is shown to be an effective representation for cross-modal retrieval that can be efficiently indexed in an Inverted Index for fast retrieval. (3) Detailed analysis and ablation study that show advantages of the proposed method and interesting properties that shine light for future research directions.
22
+
23
+ # Method
24
+
25
+ As query processing is an online operation during retrieval, the efficiency of encoding query needs to be well considered. Previous methods pass the query sentence into a bi-RNN to give token representation provided surrounding tokens (Lee *et al.*, 2018; Wang *et al.*, 2019a, 2020).
26
+
27
+ Instead of encoding the query in a sequential manner, we drop the order information of the query and only use the pretrained token embeddings to represent each token. In other words, we do not encode the local contextual information for the query and purely rely on independent word embedding $E_{tok}$ of each token. Let a query be $q = [w_1, ..., w_m]$ after tokenization, we have:
28
+
29
+ $$\hat{w}_i = E_{tok}\left(w_i\right) \tag{1}$$
30
+
31
+ where $w_i$ is the i-th token of the query. Therefore, a query is represented as $\hat{w} = \{\hat{w}_1, ..., \hat{w}_m\}, \hat{w}_i \in \mathbb{R}^{d_H}$ . In this way, each token is represented independently and agnostic to its local context. This is essential for the efficient indexing and inference, as described next in section 3.3.
32
+
33
+ Compared with query information which needs to be processed in real-time, answer processing can be rich and complex, as answer corpus can be indexed offline before the query comes. Therefore, we follow the recent works in Vision-Language Transformers (Li *et al.*, 2019, 2020b) and use the contextualized representation for the answer corpus.
34
+
35
+ Specifically, for an image, we represent it using information from three sources: regional visual features, regional location features, and label features with attributes, as shown in Figure 2.
36
+
37
+ Regional visual features and location features Given an image v, we pass it through Faster-RCNN (Ren *et al.*, 2016) to get n regional visual features $v_i$ and their corresponding location features $l_i$ :
38
+
39
+ $$v_1, ..., v_n = \text{RCNN}(v), v_i \in \mathbb{R}^{d_{rcnn}}$$
40
+ (2)
41
+
42
+ and the location features are the normalized top left and bottom right positions of the region proposed from Faster-RCNN, together with the region width and height:
43
+
44
+ $$l_i = [l_{xmin}, l_{xmax}, l_{ymin}, l_{ymax}, l_{width}, l_{height}]$$
45
+ (3)
46
+
47
+ Therefore, we represent one region by the concatenation of two features:
48
+
49
+ $$E_i = [v_i; l_i] \tag{4}$$
50
+
51
+ $$E_{image} = [E_1, ..., E_n], E_i \in \mathbb{R}^{d_{rcnn} + d_{loc}}$$
52
+ (5)
53
+
54
+ where $E_{image}$ is the representation for a single image.
55
+
56
+ Label features with attributes Additional to the deep representations from the proposed image region, previous work by Li *et al.* (2020b) shows that the object label information is also useful as an additional representation for the image. We also encode the predicted objects and corresponding attributes obtained from Faster-RCNN model with pretrained word embeddings:
57
+
58
+ $$\hat{o_i} = E_{tok}(o_i) + E_{pos}(o_i) + E_{seg}(o_i) \quad (6)$$
59
+
60
+ $$E_{label} = [\hat{o_1}, ..., \hat{o_k}], \hat{o_i} \in \mathbb{R}^{d_H}$$
61
+ (7)
62
+
63
+ where k represents the number of tokens after the tokenization of attributes and object labels for n
64
+
65
+ <span id="page-3-0"></span>![](_page_3_Figure_0.jpeg)
66
+
67
+ Figure 2: VisualSparta Model. It first computes contextualized image region representation and non-contextualized query token representation. Then it computes a matching score between every query token and image region that can be stored in an inverted index for efficient searching.
68
+
69
+ image regions. Etok, Epos, and Eseg represent token embeddings, position embeddings, and segmentation embeddings respectively, similar to the embedding structure in [Devlin](#page-8-2) *et al.* [\(2018\)](#page-8-2).
70
+
71
+ Therefore, one image can be represented by the linear transformed image features concatenated with label features:
72
+
73
+ $$a = [(E_{image}W + b); E_{label}]$$
74
+ (8)
75
+
76
+ where W ∈ R (drcnn+dloc)×d<sup>H</sup> and b ∈ R <sup>d</sup><sup>H</sup> are the trainable linear combination weights and bias. The concatenated embeddings a are then passed into a Transformer encoder Timage, and the final image feature is the hidden output of it:
77
+
78
+ $$H_{image} = T_{image}(a) \tag{9}$$
79
+
80
+ where Himage ∈ R (n+k)×d<sup>H</sup> is the final contextualized representation for one image.
81
+
82
+ Given the visual and query representations, the matching score can now be computed between a query and an image. Different from other dualencoder based interaction model, we adopt the finegrained interaction model proposed by [Zhao](#page-9-6) *et al.* [\(2020\)](#page-9-6) to compute the relevance score by:
83
+
84
+ $$y_i = \max_{j \in [1, n+k]} (\hat{w}_i^T H_j)$$
85
+ (10)
86
+
87
+ $$\phi(y_i) = \text{ReLU}(y_i + b) \tag{11}$$
88
+
89
+ $$f(q, v) = \sum_{i=1}^{m} \log(\phi(y_i) + 1)$$
90
+ (12)
91
+
92
+ where Eq[.10](#page-3-1) captures the fragment-level interaction between every image region and every query word token; Eq[.11](#page-3-2) produces sparse embedding outputs via a combination of ReLU and trainable bias, and Eq[.12](#page-3-3) sums up the score and prevents an overly large score using log operation.
93
+
94
+ Following the training method presented in [Zhao](#page-9-6) *[et al.](#page-9-6)* [\(2020\)](#page-9-6), we use cross entropy loss to train VisualSparta. Concretely, we maximize the objective in Eq. [13,](#page-3-4) which tries to decide between the ground truth image v <sup>+</sup> and irrelevant/random images V <sup>−</sup> for each text query q. The parameters to learn include both the query encoder Etok and the image transformer encoder Timage. Parameters are optimized using Adam [\(Kingma and Ba,](#page-8-13) [2014\)](#page-8-13).
95
+
96
+ <span id="page-3-4"></span>
97
+ $$J = f(q, v^{+}) - \log \sum_{k \in V^{-}} e^{f(q,k)}$$
98
+ (13)
99
+
100
+ <span id="page-3-3"></span><span id="page-3-2"></span><span id="page-3-1"></span>In order to achieve efficient training, we use other image samples from the same batch as negative examples for each training data, an effective technique that is widely used in response selection [\(Zhang](#page-9-7) *et al.*, [2018;](#page-9-7) [Henderson](#page-8-14) *et al.*, [2019\)](#page-8-14). Preliminary experiments found that as long as the batch size is large enough (we choose to use batch size of 160), this simple approach performs equally well compared to other more sophisticated methods, for example, sample similar images that have nearby labels.
101
+
102
+ VisualSparta model structure is suitable for realtime inference. As discussed in section [3.1.1,](#page-2-0) since query embeddings are non-contextualized, we are able to compute the relationship between each query term w<sup>i</sup> and every image v offline.
103
+
104
+ Concretely, during offline indexing, for each image v, we first compute fragment-level interaction between its regions and every query term in the vocabulary, same as in Eq. [10.](#page-3-1) Then, we cache the computed ranking score:
105
+
106
+ $$CACHE(w, v) = Eq. 11 \tag{14}$$
107
+
108
+ During test time, given a query q = [w1, ..., wm], the ranking score between q and an image v is:
109
+
110
+ $$f(q, v) = \sum_{i=1}^{m} \log(\text{CACHE}(w_i, v) + 1) \qquad (15)$$
111
+
112
+ As shown in Eq. [15,](#page-4-1) the final ranking score during inference time is an O(1) look-up operation followed by summation. Also, the query-time computation can be fit into an Inverted Index architecture [\(Manning](#page-8-5) *et al.*, [2008\)](#page-8-5), which enables us to use VisualSparta index with off-the-shelf search engines, for example, Elasticsearch [\(Gheorghe](#page-8-15) *et [al.](#page-8-15)*, [2015\)](#page-8-15).
2103.01009/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-06-04T16:52:05.045Z" agent="5.0 (Windows)" etag="9dFh-LWCo1iPpnYOjIcc" version="14.7.6" type="device"><diagram id="V5055qw27HKLXg1NYph4" name="Page-1">7V1bc5s6EP41nrYPyXATl8faufTh5EymaeekTxliZJsWg4tx4vTXHwnETRIxtgFRbGeagJAXoV190n67qCN1stzehvZqcRc40BspkrMdqVcjRVEN3UR/cMlbUnKh6aqeFM1D10kK5bzgwf0DSaFESjeuA9elilEQeJG7KhdOA9+H06hUZodh8FquNgu88l1X9pzcUcoLHqa2B5lq/7lOtEhKFVW18gtfoDtfkFurqkQELe20NpGxXthO8Fq4mXo9UidhEETJ0XI7gR7uv7RjEkE3FVezloXQj+p84dqdfXlUfo//rJSHV8v9dvvmfLmQTZDIebG9DXlm0tzoLe2EMNj4DsRipJE6fl24EXxY2VN89RUpHpUtoqWHzmR0OAv86MZeuh7W+S0Mwrlro+J1FAa/4CTwghCV+4EPcV3X89KikaLeSPgHlZMmwTCC28qnlbM+RPYHgyWMwjdUhXzBNIkeiOkZBiAFr7kejVTri6IKNVJoE9uZZ8Lz3kUHpIP36mztRDpbtXrQ2eqJdLakdtnZym9nPDXVt1/R4712By/GM+/rhcLpat2LSKeV+lz/vQnSCxfrGPA/owqatNrmF9HRPP4LJh/Rv2c4d/2RMbY9d+4jdRlXqBCdo+/dPKFD1JkROsV10B3QQ6KnkO5G4BrXQZftTfFC8SvBJiKXsMCkjm4vseaT33lV3AfLlQeXSE+oDXEvorv8c58LAJNUEq79nWkAkew/r2PRh9/19uv37K7Qd+iu+ZT2PlJmogDSnbT9F6w7m56wmTr2ehEPC3wlFo2Op7gJyKDH2HxdNEd+JheWruNgiVjGCktebud4QXCZzMBK8heLfUvlb8mBH0TTBTmuGF/UUJoB/JONu3RWBkQAWUIAiRmZ6Lt6/CE1C+XyDf4hz31vR+gx/fjhFUnOuo0ZoZxxXDlotfKYNbOC4pg1FXbMKqA1gFTMylHruC91Bi14b9DiBdDzDJnmAhnl0wsaDBI2WCyOM2rBxIOzqPS9F/K9SYi74xNl13ETDzVrqUWz1tRLsL9lk3njAKMG1/inmbmFMlPd0jlmqnOmFllpz0ytXZPLNOuM3BTVm/jDWue1P0V+Sl2AbGKBwNdgScdliMs02qmus3VFQdeqalwCVttma8pOlzLvLdqQHOT+wd36sNerxCecuVusQ6rnjXcmF2qCSD5114DFmaYBRckarapsdVdQlcxb8bU4LFW5hqZ853OCkVdTz16v3WlZQetgE07hPQxd1CgYYk26/pwMtvrKqu591IBUgCJfysWPZFSrE27d6JG0ER//wE2KUR2fXaWLl/gkxf7k4aGT8glVakYdFD/1u32r8A2ioG7A0XZaFkLPjtyXckN4FkDucB+4MY6m9paxL5m9GdalbJTlEN0lXy3SDrQ01nqRNE0pS4vscA4jRlpsmlkHHGOtPA/l1HFFMvuIKzU4kr8WV3zURY85mODTHynO4JMcWuKz/bElGUc9xha5com5N7DIKkIl3co+GrWeMSjBrWNMDSr177fcot0WzHjwlmsp7DR2oOFKzNKbhtPWTVXnmGrB9ec78rFD7u7hh8ceftm+d5GlpGincx4Fq4qJuuyyFZwv/R1XfBGE7h90yU4b2sT8KkuUnnUOU2vyplepvemVR/oMDqQETq8S3yY6ml41y2pqegUqjVK0qLZRKoXJA1Dq5xmlarMLRv9QSuORCzs1v0yJ5jzqooxlEihR499gst4sn9D1TXLm+ujvvx9fPqFaTMCIw0nHXPYGV0xqnclpYeS0oXK81a7J6ZTHaIicvoPrNc5IUaQb/0xRMxrnRc06p6g1Xl7BXmEzvXbYDAfNMFr9ZOO670FMV0xWHcbKsiQJLUoY6isrb2QOoy1F4gQzumaytCEzWYIZcg0IXWbTDLkRc1FNMeSxtK4Zco3HXjUBa3smAu0Pju6upJczZtZh//uBmTxiaiiY2QN6QjBu0ux/voZvmP03FLVrssI4BcsdCWP/xVouzf7jKfpAw6XZf0OmfZXWTZWXUTU4UxUIsqpQkNUs61LKP3JTiEsTwobSddgK7KAFz653rXTnLOukiWUk47xoHJKm62Uk4NFyYhCuQwe72o+oj267XW8gFt0Y1xvDXXOuN5bWtesN2mIUz653PzGTcb17gZn9oSs7Xvs1gZu7V4WCcZNxvXXa7WjK9QZW18vC/iTedexgn4LlMq53luNyvOsN6DdiWzfV/vCbwwTZipm3F6734YjLuN6ga84I7McZkYwRKvOA98ot1vsDkRGE0SKYB77tXeel4+kmfMleRmbtu/SuMv8F3uNtT9y0TaOfTmddHYF+NfkbpFT7rVBthSus32k0c6e00blVJjKbtdE092f/rDHkz8gDzWxuMYRoMaikE1AoujFpvkw5ddCsHgnHuTF6jW0/zkDVwtRH+7SHA5UmdQVUzJ06Aiqeq10fqIaZ3NxmroNi9RGpdmTc7NhNozLJmdl3piqP+YXZoIbsX1Mjo/qc+Cwu8VnnJOp0nfisV79Bdkji8/eVY0flvOfnkEMjnzOhqclVZCa0zkt5aS8cK//l4VgxmdAmx1K6Di3oQ37pUHAmtG4JXfYz4VgTNBmOxdK6Dsfq1RtO9TccKw8zHCsmE7oPmGnwyKyhYGYPkvQE4yYdjjUlOhTVVDjWoneQbBs/jSHvDNaDTGixlsuEY03QWDjWoinD1k1VOQVTFQiyhlCQfTccewTiMuFYq+s8AoMT5eDsg/HhiRxk5CC7+TWfF/xGmL0zh9cKgZMbn0AOz6gOQxzC4V3B8866XLKOt4ty52SdUSdJ7tR2wKT5BjN7pUWk59efHLHBsWWG2AUJzZaZSpM768bSumbLjDrvvZ4artCMUj9wZcgsfA+cHcHYwjBKWks765pq1zvrGjxGfnCWK45REmu5NKOEp7EDDZdmlEzmbee2TTUF/v33rPww0ARUZtPKNkM59Ja7ppxChbjNLM2T4MMFzrti97Sht9w9Yt6leUVT7XrL3fS/IDsEvoaZltopfNF78daHL7k9+NpvYyM2ob5EFB/E5MkxgBTy76H3HLwWU+/jAnQhNYZYhB1GLKjGxTeul7YONZ2ccTng/H3wCkitkdVfF+yEQRigzM5QDs7d1y3lMqUa8wihVgvF9k7f1/WKdrebvm/u50j3c0RkYZF8OEjn4ZAMB1NnbNiSKV7tuBGhtjUiNFDZ9JYHxY6suWQh8BWuoI0bPEOWU7FUqFpuxG8fkDCmhN9VABMnwL0x+XYh92PtkQU26y0/agz7MIiQLQdYuJUvnI9cZahq2UKAJF8qgFlo6CnQlVcaWeBqj7UGOs3/p/vE5nDA9y5wIK7xPw==</diagram></mxfile>
2103.01009/main_diagram/main_diagram.pdf ADDED
Binary file (63.9 kB). View file
 
2103.01009/paper_text/intro_method.md ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Whereas many traditional machine learning models operate on sequential or Euclidean (grid-like) data representations, GNNs allow for graph-structured inputs. GNNs have yielded breakthroughs in a variety of complex domains, including drug discovery [\[33,](#page-11-1) [50\]](#page-12-1), fraud detection [\[56\]](#page-12-2), computer vision [\[49,](#page-12-3) [43\]](#page-12-4), and particle physics [\[18\]](#page-10-0).
4
+
5
+ GNNs have also been successfully applied to reinforcement learning (RL), with promising results on locomotion control tasks with small state and action spaces. Not only are GNN policies as effective as MLPs on certain training tasks, but when a trained policy is transferred to another similar task, GNNs significantly outperform MLPs [\[55,](#page-12-0) [20\]](#page-11-0). This is largely due to the capacity of a single GNN to operate over arbitrary graph topologies (patterns of connectivity between nodes) and sizes without modification. However, so far GNNs in RL have only shown competitive performance with MLPs on lower-dimensional locomotion control tasks. For higher-dimensional tasks, one must therefore choose between superior training task performance (MLPs) and superior transfer performance (GNNs).
6
+
7
+ This paper investigates the factors underlying poor GNN scaling and introduces a method to combat them. We begin with an analysis of the GNN-based NERVENET architecture [\[55\]](#page-12-0), which we choose for its strong zero-shot transfer performance. We show that optimisation updates for the GNN
8
+
9
+ <sup>∗</sup>Corresponding author. Now at Graphcore, Bristol
10
+
11
+ <sup>†</sup>Now at Waymo, Oxford
12
+
13
+ policy have a tendency to cause excessive changes in policy space, leading to performance degrading. To combat this, current state-of-the-art algorithms [46, 48, 1] employ trust region-like constraints, inspired by natural gradients [2, 23], that limit the change in policy for each update. We outline how this policy instability can be framed as a form of overfitting—a problem GNN architectures like NERVENET are known to suffer from in supervised learning, and show that parameter regularisation (a standard remedy for overfitting) leads to a small improvement in NERVENET performance.
14
+
15
+ We then investigate which structures in the GNN contribute most to this overfitting, by applying different learning rates to different parts of the network. Surprisingly, the best performance is attained when training with a learning rate of zero in the parts of the GNN architecture that encode, decode, and propagate messages in the graph, in effect training only the part that updates node representations.
16
+
17
+ We use this approach as the basis of our method, SNOWFLAKE, which freezes the parameters of particular operations within the GNN to their initialised values, keeping them fixed throughout training while updating the non-frozen parameters as before. This simple technique enables GNN policies to be trained much more effectively in high-dimensional environments.
18
+
19
+ Experimentally, we show that applying SNOWFLAKE to NERVENET dramatically improves asymptotic performance and sample complexity on such tasks. We also demonstrate that a policy trained using SNOWFLAKE exhibits improved zero-shot transfer compared to regular NERVENET or MLPs on high-dimensional tasks.
20
+
21
+ We formalise an RL problem as a Markov decision process (MDP). An MDP is a tuple $\langle \mathcal{S}, \mathcal{A}, \mathcal{R}, \mathcal{T}, \rho_0 \rangle$ . The first two elements define the state space $\mathcal{S}$ and the action space $\mathcal{A}$ . At every time step t, the agent employs a policy $\pi(a_t|s_t)$ to output a distribution over actions, selects action $a_t \sim \pi(\cdot|s_t)$ , and transitions from state $s_t \in \mathcal{S}$ to $s_{t+1} \in \mathcal{S}$ , as specified by the transition function $\mathcal{T}(s_{t+1}|s_t,a_t)$ which defines a probability distribution over states. For the transition, the agent gets a reward $r_t = \mathcal{R}(s_t,a_t,s_{t+1})$ . The last element of an MDP specifies initial distribution over states, i.e., states an agent can be in at time step zero.
22
+
23
+ Solving an MDP means finding a policy $\pi^*$ that maximises an objective, in our case the expected discounted sum of rewards $J = \mathbb{E}_{\pi} \left[ \sum_{t=0}^{\infty} \gamma^t r_t \right]$ , where $\gamma \in [0,1)$ is a discount factor. Policy Gradients (PG) [52] find an optimal policy $\pi^*$ by doing gradient ascent on the objective: $\theta_{t+1} = \theta_t + \alpha \nabla_{\theta} J|_{\theta=\theta_t}$ with $\theta$ parameterising the policy.
24
+
25
+ Often, to reduce the variance of the gradient estimate, one learns a value function $V(s) = \mathbb{E}_{\pi}\left[\sum_{t=0}^{\infty} \gamma^{t} r_{t} \mid s_{0} = s\right]$ , and uses it as a critic of the policy. In the resulting actor-critic method, the policy gradient takes the form: $\nabla_{\theta} J(\theta) = \mathbb{E}_{\pi_{\theta}}\left[\sum_{t} A_{t}^{\pi_{\theta}} \nabla_{\theta} \log \pi_{\theta}(a_{t} \mid s_{t})\right]$ , where $A_{t}^{\pi_{\theta}}$ is an estimate of the advantage function $A_{t}^{\pi} = \mathbb{E}_{\pi}\left[\sum_{t=0}^{\infty} \gamma^{t} r_{t} \mid a_{t}, s_{t}\right] - \mathbb{E}_{\pi}\left[\sum_{t=0}^{\infty} \gamma^{t} r_{t} \mid s_{t}\right]$ [47].
26
+
27
+ Proximal policy optimisation (PPO) [47] is an actor-critic method that has proved effective for a variety of domains including locomotion control [17]. PPO approximates the natural gradient using a first order method, which has the effect of keeping policy updates within a "trust region". This is done through the introduction of a *surrogate objective* to be optimised:
28
+
29
+ $$J = \mathbb{E}_{\pi_{\theta'}} \left[ \min \left( \frac{\pi_{\theta}(a|s)}{\pi_{\theta'}(a|s)} A^{\pi_{\theta'}}(s, a), \operatorname{clip}\left( \frac{\pi_{\theta}(a|s)}{\pi_{\theta'}(a|s)}, 1 - \epsilon, 1 + \epsilon \right) A^{\pi_{\theta'}}(s, a) \right) \right]$$
30
+ (1)
31
+
32
+ where $\epsilon$ is a clipping hyperparameter that effectively limits how much a state-action pair can cause the overall policy to change at each update. This objective is computed over a number of optimisation epochs, each of which gives an update to the new policy $\pi_{\theta}$ . If during this process a state-action pair with a positive advantage $A^{\pi_{\theta'}}(s,a)$ reaches the upper clipping boundary, the objective no longer provides an incentive for the policy to be improved with respect to that data point. This similarly applies to state-action pairs with a negative advantage if the lower clipping limit is reached.
33
+
34
+ GNNs are a class of neural architecture designed to operate over graph-structured data. We define a graph as a tuple $\mathcal{G}=(V,E)$ comprising a set of nodes V and edges $E=\{(u,v)\mid u,v\in V\}$ . A labelled graph has corresponding feature vectors for each node and edge that form a pair of matrices $\mathcal{L}_{\mathcal{G}}=(V,E)$ , where $V=\{\mathbf{v}_v\in\mathbb{R}^p\mid v\in V\}$ and $E=\{\mathbf{e}_{u,v}\in\mathbb{R}^q\mid (u,v)\in E\}$ . For GNNs we often consider directed graphs, where the order of an edge (u,v) defines u as the sender and v as the receiver.
35
+
36
+ A GNN takes a labelled graph $\mathcal{G}$ and outputs a second graph $\mathcal{G}'$ with new labels. Most GNN architectures retain the same topology for $\mathcal{G}'$ as used in $\mathcal{G}$ , in which case a GNN can be viewed as a mapping from input labels $\mathcal{L}_{\mathcal{G}}$ to output labels $\mathcal{L}_{\mathcal{G}'}$ .
37
+
38
+ A common GNN framework is the message passing neural network (MPNN) [14], which generates this mapping using T steps or 'layers' of computation. At each layer $\tau \in \{0, \dots, T-1\}$ in the network, a *hidden state* $\mathbf{h}_v^{\tau+1}$ and *message* $\mathbf{m}_v^{\tau+1}$ is computed for every node $v \in V$ in the graph.
39
+
40
+ An MPNN implementation calculates these through its choice of *message functions* and *update functions*, denoted $M^{\tau}$ and $U^{\tau}$ respectively. A message function computes representations from hidden states and edge features, which are then aggregated and passed into an update function to compute new hidden states:
41
+
42
+ $$\mathbf{m}_{v}^{\tau+1} = \sum_{u \in N(v)} M^{\tau} \left( \mathbf{h}_{u}^{\tau}, \mathbf{h}_{v}^{\tau}, \mathbf{e}_{u,v} \right), \qquad \mathbf{h}_{v}^{\tau+1} = U^{\tau} \left( \mathbf{h}_{v}^{\tau}, \mathbf{m}_{v}^{\tau+1} \right), \tag{2}$$
43
+
44
+ for all nodes $v \in V$ , where $N(v) = \{u \mid (u,v) \in E\}$ is the neighbourhood of all sender nodes connected to receiver v by a directed edge. The node input labels $\mathbf{v}_v$ are used as the initial hidden states $\mathbf{h}_v^0$ . MPNN assumes only *node* output labels are required, using each final hidden state $\mathbf{h}_v^{\mathrm{T}}$ as the output label $\mathbf{v}_v'$ .
45
+
46
+ NERVENET is an MPNN designed for locomotion control, based on the gated GNN architecture [32]. NERVENET uses the morphology (physical structure) of the agent as the basis for the GNN's input graph $\mathcal{G}$ , with edges representing body parts and nodes representing the joints that connect them.
47
+
48
+ NERVENET assumes an MDP where the state s can be factored into input labels V, which are fed to the GNN to generate output labels: $\mathbf{V}' = \text{NERVENET}(\mathcal{G}, \mathbf{V})$ . These are then used to parameterise a normal distribution defining the stochastic policy: $\pi(a|s) = \mathcal{N}(\mathbf{V}', \text{diag}(\boldsymbol{\sigma}^2))$ , where the standard deviation is a separate vector of parameters learned during training. Actions a are vectors, where each element represents the force to be applied at a given joint for the subsequent timestep. The policy is trained using PPO, with parameter updates computed via the Adam optimisation algorithm [25].
49
+
50
+ Internally, NERVENET uses an encoder $F_{\rm in}$ to generate initial hidden states from input labels: $\mathbf{h}_v^0 = F_{\rm in}(\mathbf{v}_v)$ . This is followed by a message function $M^{\tau}$ consisting of a single MLP for all layers $\tau$ that takes as input only the state of the sender node: $\mathbf{m}_v^{\tau+1} = \sum_{u \in N(v)} \text{MLP}(\mathbf{h}_u^{\tau})$ . The update function $U^{\tau}$ is a single gated recurrent unit (GRU) [9] that maintains an internal hidden state: $\mathbf{h}_v^{\tau+1} = \text{GRU}(\mathbf{m}_v^{\tau+1} \mid \mathbf{h}_v^{\tau})$ . Nervenet propagates through T layers of message-passing and node-updating, before applying a decoder $F_{\rm out}$ to turn final hidden states into scalar node output labels: $\mathbf{v}_v' = F_{\rm out}(\mathbf{h}_v^T)$ . A diagram of the Nervenet architecture can be seen in Appendix A.4, Figure 10.
51
+
52
+ <span id="page-2-0"></span>![](_page_2_Picture_11.jpeg)
53
+
54
+ ![](_page_2_Figure_12.jpeg)
55
+
56
+ Figure 1: A MuJoCo rendering of Centipede-20 and its corresponding morphological graph.
57
+
58
+ In this section, we use NERVENET to analyse the challenges that limit GNNs' ability to scale. We focus on NERVENET as its architecture is more closely aligned with the GNN framework than alternative approaches to structured locomotion control (see Section 4). We use mostly the same experimental setup as Wang et al. [55], with details of any differences and our choice of hyperparameters outlined in Appendix A.2.
59
+
60
+ We focus on environments derived from the Gym [8] suite, using the MuJoCo [53] physics engine. The main set of tasks we use to assess scaling is the selection of Centipede-n agents [55], chosen because of their relatively complex structure and ability to be scaled up to high-dimensional inputaction spaces.
61
+
62
+ The morphology of a Centipede-n agent consists of a line of n/2 body segments, each with a left and right leg attached (see Figure 1). The graph used as the basis for the GNN corresponds to the physical structure of the agent's body. At each timestep in the environment, the MuJoCo engine sends a feature vector containing the positions of the agent's body parts and the forces acting on them, expecting a vector to be returned specifying forces to be applied at each joint (full details of the state representation are given in Appendix A.2). The agent is rewarded for forward movement along the y-axis as well as a small 'survival' bonus for keeping its body within certain bounds, and given negative rewards proportional to the size of its actions and the magnitude of force it exerts on the ground.
63
+
64
+ Existing work applying GNNs to locomotion control tasks avoid training directly on larger agents, i.e., those with many nodes in the underlying graph representation. For example, Wang et al. [55] state that for Nervenet, "training a CentipedeEight from scratch is already very difficult". Huang et al. [20] also limit training their SMP architecture to small agent types.
65
+
66
+ To demonstrate the poor scaling of NERVENET to larger agents, we compare its performance on a selection of Centipede-n tasks to that of an MLP policy. Figure 2 shows that for the smaller Centipede-n agents both policies are similarly effective, but as the size of the agent increases, the performance of NERVENET drops relative to the MLP. A visual inspection of the behaviour of these agents shows that for Centipede-20, NERVENET barely makes forward progress at all, whereas the MLP moves effectively.
67
+
68
+ As in previous literature [e.g., 55, 20], we are ultimately not concerned with outperforming MLPs on the specific training task, but rather matching their training task performance so that the *additional* benefits of GNNs can be realised. In our setting we particularly wish to leverage the strong transfer benefits of GNNs—as demonstrated by Wang et al. [55]—resulting from their capacity to process inputs of arbitrary size and structure.
69
+
70
+ <span id="page-3-0"></span>![](_page_3_Figure_8.jpeg)
71
+
72
+ Figure 2: Comparison of the scaling of NERVENET relative to an MLP-based policy. Performance is similar for the smaller agent sizes, but NERVENET scales poorly to the larger agents.
73
+
74
+ <span id="page-4-0"></span>Table 1: KL-divergence from the policy before each update to the policy after, calculated over each batch. We train on $10^7$ timesteps, recording in the table the mean taken over last 10% of steps.
75
+
76
+ | | Policy KL-divergence | | | |
77
+ |-------------|----------------------|-------------|--------------|--------------|
78
+ | Policy type | Centipede-6 | Centipede-8 | Centipede-12 | Centipede-20 |
79
+ | MLP | 0.021 | 0.024 | 0.031 | 0.044 |
80
+ | NERVENET | 0.115 | 0.137 | 0.118 | 0.123 |
81
+
82
+ In other words, the focus of this paper is on deriving a method that can close the gap in Figure 2, as doing so makes GNNs a better choice overall given the trained policy transfers better than the MLP equivalent (see Section 5 for experimental results).
83
+
84
+ As outlined in Section 2.2, one of the key challenges for on-policy RL is preventing individual updates from causing excessive changes in policy space (i.e., keeping it within the trust region). Table 1 shows the extent to which this problem contributes to NERVENET's poor scaling, calculating the average KL-divergence from the pre-update policy to the post-update policy for both policy types. NERVENET has a consistently higher KL-divergence than the MLP policy, indicating that PPO finds it harder to ensure stable policy updates for the GNN.
85
+
86
+ We emphasise that this discrepancy persists even with carefully-tuned hyperparameter values for limiting policy divergence. Figure 3 shows the performance of NERVENET across a range of PPO $\epsilon$ -clipping values (see Section 2.2), and in all cases NERVENET is still substantially inferior to an MLP (note that our experiments on NERVENET always use the best value of $\epsilon = 0.1$ found here). As we demonstrate later (in Figure 8), controlling policy divergence effectively is a key component in making GNNs scale, but we see here that PPO alone does not control the divergence sufficiently to achieve this.
87
+
88
+ Excessive policy divergence resulting from updates can be understood as a form of overfitting. Whereas the supervised interpretation of overfitting implies poor generalisation from training to test set, in this case we are concerned with poor generalisation across state-action distributions induced
89
+
90
+ <span id="page-4-1"></span>![](_page_4_Figure_8.jpeg)
91
+
92
+ Figure 3: Final performance of NER-VENET on Centipede-20 after ten million timesteps, across a range of $\epsilon$ clipping hyperparameter values. As $\epsilon$ increases (i.e., clipping is reduced) the KL divergence from the old to new policy (blue) increases. This improves performance (orange) up to a point, after which it begins to deteriorate.
93
+
94
+ ![](_page_4_Figure_10.jpeg)
95
+
96
+ Figure 4: L2 regularisation for NERVENET's message function across a range of values for the L2 penalty $\lambda$ , trained on Centipede-20. Increasing this penalty reduces the L2 norm of the weights learned (left). Improved performance for higher values of $\lambda$ (right) indicates the presence of overfitting for the message function.
97
+
98
+ by different iterations of the policy during training. Specifically, each update involves an optimisation step aiming to increase the expected reward over a batch of trajectories generated using the *pre-update* policy. The challenge for RL algorithms is that the agent is then evaluated and trained on trajectories generated using the *post-update* policy, i.e., a different distribution to the one optimised on.
99
+
100
+ For MPNN architectures like NERVENET, it is a known deficiency that in the supervised setting, message functions implemented as MLPs are prone to overfitting [16, p.55]. Here, we demonstrate that they also overfit (using the above interpretation) in our on-policy RL setting. Figure 4 shows the effect of applying L2 regularisation (a standard approach to reducing overfitting) to the NERVENET architecture. We regularise the parameters $\theta$ of NERVENET's message function MLP $M_{\theta}$ , adding a $\lambda ||\theta||_2^2$ term to our objective function. At the optimal value of $\lambda$ we see an improvement in performance (although still substantially inferior to using an MLP), indicating that the unregularised message-passing MLPs overfit.
101
+
102
+ We also investigate lowering the learning rate in different parts of the GNN, with the aim of identifying where overfitting is localised. If parts of the network are particularly prone to damaging overfitting, training them more slowly may reduce their contribution to policy instability across updates. Results for this experiment can be seen in Figure 5.
103
+
104
+ <span id="page-5-0"></span>![](_page_5_Figure_3.jpeg)
105
+
106
+ Figure 5: Colour-coded final NERVENET performance after 5M training steps on Centipede-20 when changing learning rates for *individual* GNN components, compared to the base learning rate of $3 \times 10^{-4}$ .
107
+
108
+ Not only does lowering the learning rate in parts of the model improve performance, but surprisingly the best performance is obtained when the encoder $F_{\rm in}$ , message function M and decoder $F_{\rm out}$ each have their learning rate set to zero. The encoder and decoder play a similar role to the message function, all of which are implemented as MLPs, whereas the update function U is a GRU (we experimented with using an MLP update function, but found that this significantly reduced performance.).
109
+
110
+ Training with a learning rate of zero is equivalent to parameter freezing (e.g., Brock et al. [7]), where parameters are fixed to their initialised values throughout training. NERVENET can learn a policy with some of its functions frozen, as learning still takes place in the un-frozen functions. For instance, if we consider freezing the encoder, this results in an arbitrary mapping of input features to the initial hidden states. As we still train the update function that processes this representation, so long as key information from the input features is not lost via the arbitrary encoding, the update function can still learn useful representations. The same logic applies to using a frozen decoder or message function.
111
+
112
+ Based on the effectiveness of parameter freezing within parts of the network, we propose a simple technique for improving the training of GNNs via gradient-based optimisation, which we name SNOWFLAKE (a naturally-occurring frozen graph structure). SNOWFLAKE assumes a GNN architecture made up internally of functions $F_{\theta}^1,\ldots,F_{\theta}^n$ , where $\theta$ denotes the parameters of a given function. Prior to training we select a fixed subset $\mathcal{Z}\subseteq\{F_{\theta}^1,\ldots,F_{\theta}^n\}$ of these functions. Their parameters are then placed in SNOWFLAKE's frozen set $\zeta=\{\theta\mid F_{\theta}\in Z\}$ . During training, SNOWFLAKE excludes parameters in $\zeta$ from being updated by the optimiser, instead fixing them to whatever values the GNN architecture uses as an initialisation. Gradients still flow through these operations during backpropagation, but their parameters are not updated. In practice, we found optimal performance for $\zeta=\{F_{\rm in},F_{\rm out},M^{\tau}\}$ , i.e. when freezing the encoder, decoder and message function of the GNN. If not stated otherwise, this is the architecture we refer to as SNOWFLAKE in subsequent sections. A visual representation of SNOWFLAKE applied to the NERVENET model can be seen in Figure 11, Appendix A.4.
113
+
114
+ For our experiments, we initialise the values in the GNN using the orthogonal initialisation [44]. We found this to be slightly more effective for frozen and unfrozen training than uniform and Xavier
115
+
116
+ initialisations [\[15\]](#page-10-9). For our message function, which has input and output dimensions of the same size, we find that performance with the frozen orthogonal initialisation is similar to that of simply using the identity function instead of an MLP. However, in the general case where the input and output dimensions of functions in the network differ (such as in the encoder and decoder, or in GNN architectures where layers use representations of different dimensionality), this simplification is not possible and freezing is required.
2104.07586/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2104.07586/paper_text/intro_method.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Sharing weight updates or gradients during training is the central idea behind collaborative, distributed, and federated learning of deep networks [1, 22, 24, 25, 28]. In the basic setting of federated stochastic gradient descent, each device learns on local data, and shares gradients to update a global model. Alleviating the need to transmit training data offers several key advantages. This keeps user data private, allaying concerns related to user privacy, security, and other proprietary concerns. Further, this eliminates the need to store, transfer, and manage possibly large datasets. With this framework, one can train a model on medical data without access to any individual's data [3, 32], or perception model
4
+
5
+ ![](_page_0_Figure_8.jpeg)
6
+
7
+ (a) Inverting averaged gradients to recover original image batches
8
+
9
+ ![](_page_0_Figure_10.jpeg)
10
+
11
+ (b) Overview of our proposed GradInversion method
12
+
13
+ Figure 1: We propose (a) GradInversion to recover hidden training image batches with high fidelity via inverting *averaged* gradients. GradInversion formulates (b) an optimization process that transforms noise to input images (Sec. 3.1). It starts with label restoration from the gradient of the fully connected layer (Sec. 3.2), then optimizes inputs to match target gradients under fidelity regularization (Sec. 3.3) and registration-based group consistency regularization (Sec. 3.4) to improve reconstruction quality. This enables recovery of 224 × 224 pixel ImageNet samples from ResNet-50 batch gradients, which was previously impossible (please zoom into examples above. More in Sec. 4).
14
+
15
+ for autonomous driving without invasive data collection [41].
16
+
17
+ While this setting might seem safe at first glance, a few recent works have begun to question the central premise of federated learning - is it possible for gradients to leak private information of the training data? Effectively serving as a "proxy" of the training data, the link between gradients to the data in fact offers potential for retrieving information: from revealing the positional distribution of original data [33, 44], to even enabling pixel-level detailed image reconstruction from gradients [13, 53, 55]. Despite remarkable progress, in-
18
+
19
+ verting an original image through gradient matching remains a very challenging task – successful reconstruction of images of high resolution for complex datasets such as ImageNet [\[9\]](#page-8-9) has remained elusive for batch sizes larger than one.
20
+
21
+ Emerging research on network inversion techniques offers insights into this task. Network inversion enables noise-toimage conversion via back-propagating gradients on appropriate loss functions to the learnable inputs. Initial solutions were limited to shallow networks and low-resolution synthesis [\[11,](#page-8-10) [39\]](#page-9-4), or creating an artistic effect [\[37\]](#page-8-11). However, the field has rapidly evolved, enabling high-fidelity, highresolution image synthesis on ImageNet from commonly trained classifiers, making downstream tasks data-free for pruning, quantization, continual learning, knowledge transfer, *etc*. [\[5,](#page-8-12) [17,](#page-8-13) [42,](#page-9-5) [48\]](#page-9-6). Among these, DeepInversion [\[48\]](#page-9-6) yields state-of-the-art results on image synthesis for ImageNet. It enables the synthesis of realistic data from a vanilla pretrained ResNet-50 [\[19\]](#page-8-14) classifier by regularizing feature distributions through batch normalization (BN) priors.
22
+
23
+ Building upon DeepInversion [\[48\]](#page-9-6), we delve into the problem of batch recovery via gradient inversion. We formulate the task as the optimization of the input data such that the gradients on that data match the ones provided by the client, while ensuring realism of the input data. However, since the gradient is also a function of the ground-truth label, one of the main challenges is to identify the ground-truth label for each data point in the batch. To tackle this, we propose a one-shot batch label restoration algorithm that uses gradients from the last fully connected layer.
24
+
25
+ Our goal is to recover the exact images that the client possesses. By starting from noisy inputs generated by different random seeds, multiple optimization processes are likely to converge to different minimas. Due to the inherently spatially-invariant nature of convolutional neural networks (CNNs), these resulting images share spatial information but differ in the exact location and arrangement. To allow for improved convergence towards the ground truth images, we compute a registered mean image from all candidates and introduce a group consistency regularization term on every optimization process to reduce deviation. We find that the proposed approach and group consistency regularization provide superior better image recovery compared to prior optimization approaches [\[13,](#page-8-8) [55\]](#page-9-3).
26
+
27
+ Our non-learning based image recovery method recovers more specific details of the hidden input data when compared to the state-of-the-art generative adversarial networks (GAN), such as BigGAN [\[4\]](#page-8-15). More importantly, we demonstrate that a full recovery of individual images of 224 ˆ 224 px resolution with high fidelity and visual details, by inverting gradients of the batch, is now made feasible even up to batch size of 48 images.
28
+
29
+ Our main contributions are as follows:
30
+
31
+ • We introduce GradInversion to recover hidden original
32
+
33
+ - images from random noise via optimization given batchaveraged gradients.
34
+ - We propose a label restoration method to recover ground truth labels using final fully connected layer gradients.
35
+ - We introduce a group consistency regularization term, based on multi-seed optimization and image registration, to improve reconstruction quality.
36
+ - We demonstrate that a full recovery of detailed individual images from batch-averaged gradients is now feasible for deep networks such as ResNet-50.
37
+ - We introduce a new *Image Identifiability Precision* metric to measure the ease of inversion over varying batch sizes, and identify samples vulnerable to inversion.
2106.03272/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-04T07:55:52.332Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15" etag="GIQFWeJeTKT_IvQlg4xj" version="14.2.9"><diagram id="vgU9q_BkmIba6w9yiacf" name="Page-1">7Vpbb+I4FP41SOWBURInITwWSndH2tGs1Idpn1YmcUJ2TJx1TIH99WvHzt1cOhBotZVosY+vOefzd45PGIDZavsbhenyGwkQHlhGsB2Ah4FlAcNx+ZeQ7KTENC0liWgcKFkleIr/RUpoKOk6DlDW6MgIwSxOm0KfJAnyWUMGKSWbZreQ4OaqKYxQR/DkQ9yV/ogDtpRSa1KT/47iaKlWtk0gG1aw6Kt6ZksYkI0U5c8G5gMwo4QwWVptZwgL5RVqkQp43NNark9Rwk4ZwOCUpraHH59TQB+/wq/J9/kI2GpzbFc8MAr486sqoWxJIpJAPK+kU0rWSYDEtCavVX3+ICRVwr8RYztlTLhmhIuWbIVVK9rG7LlWfuFl44ujag8CPEZR2RWVhNHdc9VRVF+KOUSlGpbXinEZo+RnaTqu9WlIEqa2ZoseUgPisfdqtjAhWVMfHVCn0iaDNELskNqtEgD85CCyQnzLfCBFGLL4tbkRqBAclf0qK/OCMvQbjK52+QrxWq00sFzMlGoaaHD/WZOiYZTlSrvnHYCTbqtGXorE9/0rouI0CSDa4uPMFpAOxlM8GD8MnDkv3SVDUeZTO7OQQp+LTCEYT5O8zyxbr/h43OgtJ1Nb5E8sdynX7MB3s4wZekphbqcNJ6Um9JrYDWOMZwQTmo8FfoAW3qIETa0FuGACAi2clDIRZWh7GEBde6sBI9dx5BjFkp6n2G9TUQ4oGHFZo5vi0F8eJJZ3S2ao2OClTgZHmKHOCzWa+BVmMJ3LMoN7IjNY9i2ZARgfzx1c0Ojgwu7gZKODWxrd7ckdPEAG83Y52YJWnC0I/cfA4vs1pvLr+S+j5Hm4EqSdLLJUztYZfponaOLyiF9oeYIAIi/0dZ7A9T20CHvzBDw+NhquwC5cQ80VWJ7OFUysngAy0QBEmOpZ56VbRuCqYC0PjDhw4CLvIM6b4INMka6o4jhKeNnnGkNc51Ohz5jH4/eqYRUHQc47KYkTlj+rMx04Dy0jJiRBHfspYYUMQxMP9mNYx2naFYzNrl2Bxq5OXx6+iCc0Bz9LYaI9+Avo/4xy9Y18qVNx/mm0uLMETg15mFvlYZcb3sowtqFjmGNEoqeJUiyfcg97fCzggv6AO/GawDXBiYTUH3Ctsz3WITw9cVr7dsfvIkvIOL1NObkN33YH+cROgZ2WMytJsH6vuS7pgT3OTHvl/NB27dOZ2c376nis4QTdfbU3uxZZxfdwXS1a3pbIqu4q7+C6aoETry43va5ausN8iatLmb8SWdzFgvPCPE9P8aM5iwmDd43Ql//NVF9+tLn4Uea78oPszOQHpiklW17AMIkw6ia5RPCidT3OjKoh18qDhaHvTyZazO3PjF2CV9xWkGx7btdfGLrLD+iNWJxPYrkksZyaIr9tTqTY5l5m+bVrjJ5tvqcsXvEwgsUkye9N7YzHnfXFGo74fzA8cqt5e5bkHJ4IDB8hLSs4rmOOkRZcffCELl1+XZoA5p64cv9LkM8Y86CNQcvGE3CikXuLMcE7ijEPvxK56kvPs19mqqF/CnhWAHCb5i/rxQzSg6hBlWXvKYW7WjcF+r3LjDxHu04FFDljBZvyEc9A0vkZjCOBqwwgRVS61r58LaPQA/z0jiJS30dOGHboqf83syXDHIhIbV0Go0dXM94Lnr1521ay1ijTs7XSUBSF1owcaiFcxXgnx5QvZYTCbWE3hF+R8DKdluYkaidijoTQFcTN5o1SmKYdc1JEdMSfyI+TSNtFuMqRcn+iWXnARnPMMZWoFfKcX72RcRhnIZ+0mD5BZYcNoUFz9frwA3lw2ytVatmTquzUFJyvHiCfUBn0dZcP4izFUKk/TnBcawsxgaw+ps/s+mEu+f9l2mve8zxmsVq5UdPr5tCAe834xtZdetoBTxLci1/VCfVjmGWx3zTd8cgjgNmypPlTw5CuGmtacg4o6cwgZORNjoQhMjzqhCHdmVzHvk5AM7b1W+41oikSwp9O6dMp3dApjcxPt3TmK7vWqx1rrLl2Ty7jlni1+v2zZKLqV+Rg/h8=</diagram></mxfile>
2106.03272/main_diagram/main_diagram.pdf ADDED
Binary file (58.9 kB). View file
 
2106.03272/paper_text/intro_method.md ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Stochastic differential games study the strategic interaction of rational decision-makers in an uncertain dynamical system, and have been widely applied to many areas, including social science, system science, and computer science. For
4
+
5
+ *Proceedings of the* 38 th *International Conference on Machine Learning*, PMLR 139, 2021. Copyright 2021 by the author(s).
6
+
7
+ realistic models, the problem usually lacks tractability and needs numerical methods. With a large number of players resulting in high-dimensional problems, conventional algorithms soon lose efficiency and one may resort to recently developed machine learning tools [\(Hu,](#page-10-0) [2021;](#page-10-0) [Han & Hu,](#page-10-0) [2020;](#page-10-0) [Han et al.,](#page-10-0) [2020\)](#page-10-0). On the other hand, one could utilize its limiting mean-field version, mean-field games (MFGs), to approximate the n-player game for large n (*e.g.*, [Han et al.](#page-10-0) [\(2021\)](#page-10-0)). Introduced independently in [Huang et al.](#page-10-0) [\(2006\)](#page-10-0); [Lasry & Lions](#page-10-0) [\(2007\)](#page-10-0), MFGs study the decision making problem of a continuum of agents, aiming to provide asymptotic analysis of the finite player model in which players interact through their empirical distribution. In an MFG, each agent is infinitesimal, whose decision can not affect the population law. Therefore, the problem can be solved by focusing on the optimal decision of a representative agent in response to the average behavior of the entire population and a fixed-point problem (*cf.* equation [\(2.5\)](#page-2-0)). The MFG model has inspired tremendous applications, not only in finance and economics, such as system risk [\(Carmona et al.,](#page-9-0) [2015\)](#page-9-0), high-frequency trading [\(Lachapelle et al.,](#page-10-0) [2016\)](#page-10-0) and crowd trading [\(Cardaliaguet & Lehalle,](#page-9-0) [2018\)](#page-9-0), but also to population dynamics [\(Achdou et al.,](#page-9-0) [2017;](#page-9-0) [Djehiche et al.,](#page-9-0) [2017;](#page-9-0) [Achdou & Lasry,](#page-9-0) [2019\)](#page-9-0) and sanitary vaccination [\(Hu](#page-10-0)[bert & Turinici,](#page-10-0) [2018;](#page-10-0) [Elie et al.,](#page-9-0) [2020a\)](#page-9-0), to list a few. For a systematical introduction of MFGs, see [Caines et al.](#page-9-0) [\(2017\)](#page-9-0); [Carmona & Delarue](#page-9-0) [\(2018a;b\)](#page-9-0).
8
+
9
+ In MFGs, the random shocks to the dynamical system can be from two sources: idiosyncratic to the individual players and common to all players, *i.e.*, decision-makers face correlated randomness. While MFGs were initially introduced with only idiosyncratic noise as seen in most of the literature, games with common noise, referred to as *MFGs with common noise*, have attracted significant attention recently [\(Lacker & Webster,](#page-10-0) [2015;](#page-10-0) [Carmona et al.,](#page-9-0) [2016;](#page-9-0) [Ahuja,](#page-9-0) [2016;](#page-9-0) [Graber,](#page-9-0) [2016\)](#page-9-0). The inclusion of common noise is natural in many contexts, such as multi-agent trading in a common stock market, or systemic risk induced through inter-bank lending/borrowing. In reality, players make decisions in a common environment (*e.g.*, trade in the same stock market). Therefore, their states are subject to correlated random shocks, which can be modeled by individual noises and a common noise. In this modeling, observing the state dynamics will be sufficient, and one does not need
10
+
11
+ <sup>\*</sup>Equal contribution <sup>1</sup>Department of Statistics and Applied Probability, University of California, Santa Barbara, CA 93106-3110, USA <sup>2</sup>Department of Mathematics, University of California, Santa Barbara, CA 93106-3080, USA. Correspondence to: Ruimeng Hu <rhu@ucsb.edu>.
12
+
13
+ to observe the noises. These applications make it crucial to develop efficient and accurate algorithms for computing MFGs with common noise.
14
+
15
+ Theoretically, MFGs with common noise can be formulated as an infinite-dimensional master equation, which is the type of second-order nonlinear Hamilton-Jacobi-Bellman equation involving derivatives with respect to a probability measure. Therefore, direct simulation is infeasible due to the difficulty of discretizing the probability space. An alternative way of solving MFGs with common noise is to formulate it into a stochastic Fokker-Planck/Hamilton-Jacobi-Bellman system, which has a complicated form with common noise, forward-backward coupling, and secondorder differential operators. The third kind of approaches turns it into forward backward stochastic differential equations (FBSDE) of McKean-Vlasov type (*cf.* [Carmona &](#page-9-0) [Delarue](#page-9-0) [\(2018b,](#page-9-0) Chapter 2)), which in general requires convexity of the Hamiltonian. For all three approaches, the common assumption is the monotonicity condition that ensures uniqueness. Regarding simulation, existing deep learning methods fix the sampling common noise paths and then solve the corresponding MFGs, which leads to a nestedloop structure with millions of simulations of common noise paths to produce accurate predictions for unseen common shock realizations. Then the computational cost becomes prohibitive and limits the applications to a large extent.
16
+
17
+ In this paper, we solve MFGs with common noise by directly parameterizing the optimal control using deep neural networks in spirit of [\(Han & E,](#page-10-0) [2016\)](#page-10-0), and conducting a global optimization. We integrate the signature from rough path theory, and fictitious play from game theory for efficiency and accuracy, and term the algorithm *Signatured Deep Fictitious Play* (Sig-DFP). The proposed algorithm avoids solving the three aforementioned complicated equations (master equation, Stochastic FP/HJB, FBSDE) and does not have uniqueness issues.
18
+
19
+ Contribution. We design a novel efficient single-loop deep learning algorithm, Sig-DFP, for solving MFGs with common noise by integrating fictitious play [\(Brown,](#page-9-0) [1949\)](#page-9-0) and Signature [\(Lyons et al.,](#page-10-0) [2007\)](#page-10-0) from rough path theory. To our best knowledge, this is the first work focusing on the common noise setting, which can address heterogeneous MFGs and heterogeneous extended MFGs, both with common noise.
20
+
21
+ We prove that the Sig-DFP algorithm can reach mean-field equilibria as both the depth M of the truncated signature and the stage n of the fictitious play approaching infinity, subject to the universal approximation of neural networks. We demonstrate its convergence superiority on three benchmark examples, including homogeneous MFGs, heterogeneous MFGs, and heterogeneous extended MFGs, all with common noise, and with assumptions even beyond the technical
22
+
23
+ requirements in the theorems. Moreover, the algorithm has the following advantages:
24
+
25
+ - 1. Temporal and spacial complexity are O(NLp+N p<sup>2</sup> ) and O(NLp), compared to O(N2L) (for both time and space) in existing machine learning algorithms, with N as the sample size, L as the time discretization size, p = O(n<sup>M</sup> 0 ), n<sup>0</sup> as the dimension of common noise.
26
+ - 2. Easy to apply the fictitious play strategy: only need to average over linear functionals with O(1) complexity.
27
+
28
+ Related Literature. After MFGs firstly introduced by [Huang et al.](#page-10-0) [\(2006\)](#page-10-0) and [Lasry & Lions](#page-10-0) [\(2007\)](#page-10-0) under the setting of a continuum of homogeneous players but without common noise, it has been extended to many applicable settings, *e.g.*, heterogeneous players games [\(Lacker & Za](#page-10-0)[riphopoulou,](#page-10-0) [2019;](#page-10-0) [Lacker & Soret,](#page-10-0) [2020\)](#page-10-0) and major-minor players games [\(Huang,](#page-10-0) [2010;](#page-10-0) [Nourian & Caines,](#page-10-0) [2013;](#page-10-0) [Car](#page-9-0)[mona & Zhu,](#page-9-0) [2016\)](#page-9-0). A recent line of work studies MFGs with common noise [\(Carmona et al.,](#page-9-0) [2015;](#page-9-0) [Bensoussan et al.,](#page-9-0) [2015;](#page-9-0) [Ahuja,](#page-9-0) [2016;](#page-9-0) [Cardaliaguet et al.,](#page-9-0) [2019\)](#page-9-0). Despite its theoretical progress and importance for applications, efficient numerical algorithms focusing on common noise settings are still missing. Our work will fill this gap by integrating machine learning tools with learning procedures from game theory and signature from rough path theory.
29
+
30
+ Fictitious play was firstly proposed in [Brown](#page-9-0) [\(1949;](#page-9-0) [1951\)](#page-9-0) for normal-form games, as a learning procedure for finding Nash equilibria. It has been widely used in the Economic literature, and adapted to MFGs [\(Cardaliaguet & Hadikhan](#page-9-0)[loo,](#page-9-0) [2017;](#page-9-0) [Briani & Cardaliaguet,](#page-9-0) [2018\)](#page-9-0) and finite-player stochastic differential games [\(Hu,](#page-10-0) [2021;](#page-10-0) [Han & Hu,](#page-10-0) [2020;](#page-10-0) [Han et al.,](#page-10-0) [2020;](#page-10-0) [Xuan et al.,](#page-11-0) [2021\)](#page-11-0).
31
+
32
+ Using machine learning to solve MFGs has also been considered, for both model-based setting [\(Carmona & Lauriere](#page-9-0) ` , [2019;](#page-9-0) [Ruthotto et al.,](#page-10-0) [2020;](#page-10-0) [Lin et al.,](#page-10-0) [2020\)](#page-10-0) and modelfree reinforcement learning setting [\(Guo et al.,](#page-9-0) [2019;](#page-9-0) [Tiwari](#page-11-0) [et al.,](#page-11-0) [2019;](#page-11-0) [Angiuli et al.,](#page-9-0) [2020;](#page-9-0) [Elie et al.,](#page-9-0) [2020b\)](#page-9-0), most of which did not consider common noise. Existing machine learning methods for MFGs with common noise were studied in [Perrin et al.](#page-10-0) [\(2020\)](#page-10-0), which have a nested-loop structure and require millions of simulations of common noise paths to produce accurate predictions for unseen common shock realizations.
33
+
34
+ The signature in rough path theory has been recently applied to machine learning as a feature map for sequential data. For example, [Kiraly & Oberhauser](#page-10-0) ´ [\(2019\)](#page-10-0); [Bonnier et al.](#page-9-0) [\(2019\)](#page-9-0); [Toth & Oberhauser](#page-11-0) [\(2020\)](#page-11-0); [Min & Ichiba](#page-10-0) [\(2020\)](#page-10-0) have used signatures in natural language processing, time series, and handwriting recognition, and [Chevyrev & Ober](#page-9-0)[hauser](#page-9-0) [\(2018\)](#page-9-0); [Ni et al.](#page-10-0) [\(2020\)](#page-10-0) studied the relation between signatures and distributions of sequential data. We refer to [Lyons & Qian](#page-10-0) [\(2002\)](#page-10-0); [Lyons et al.](#page-10-0) [\(2007\)](#page-10-0) for a more
35
+
36
+ <span id="page-2-0"></span>detailed introduction of the signature and rough path theory.
37
+
38
+ We first introduce the following notations to precisely define MFGs with common noise. For a fixed time horizon T, let $(W_t)_{0 \le t \le T}$ and $(B_t)_{0 \le t \le T}$ be independent n- and $n_0$ -dimensional Brownian motions defined on a complete filtered probability space $(\Omega, \mathcal{F}, \mathbb{F} = \{\mathcal{F}_t\}_{0 \le t \le T}, \mathbb{P})$ . We shall refer W as the *idiosyncratic noise* and B as the *common noise* of the system. Let $\mathcal{F}_t^B$ be the filtration generated by $(B_t)_{0 \le t \le T}$ , and $\mathcal{P}^p(\mathbb{R}^d)$ be the collection of probability measures on $\mathbb{R}^d$ with finite $p^{th}$ moment, i.e., $\mu \in \mathcal{P}^p(\mathbb{R}^d)$ if
39
+
40
+ $$\left(\int_{\mathbb{R}^d} \|x\|^p \,\mathrm{d}\mu(x)\right)^{1/p} < \infty. \tag{2.1}$$
41
+
42
+ We denote by $\mathcal{M}([0,T];\mathcal{P}^2(\mathbb{R}^d))$ the space of continuous $\mathcal{F}^B$ -adapted stochastic flow of probability measures with the finite second moment, and by $\mathcal{H}^2([0,T];\mathbb{R}^m)$ the set of all $\mathcal{F}$ -progressively measurable $\mathbb{R}^m$ -valued square-integrable processes.
43
+
44
+ Next, we introduce the concept of MFGs with common noise. Given an initial distribution $\mu_0 \in \mathcal{P}^2(\mathbb{R}^d)$ , and a stochastic flow of probability measures $\mu = (\mu_t)_{0 \le t \le T} \in \mathcal{M}([0,T];\mathcal{P}^2(\mathbb{R}^d))$ , we consider the stochastic control
45
+
46
+ $$\inf_{(\alpha_t)_{0 \le t \le T}} \mathbb{E}[\int_0^T f(t, X_t, \mu_t, \alpha_t) \, \mathrm{d}t + g(X_T, \mu_T)], \quad (2.2)$$
47
+
48
+ where
49
+ $$dX_t = b(t, X_t, \mu_t, \alpha_t) dt + \sigma(t, X_t, \mu_t, \alpha_t) dW_t$$
50
+
51
+ $+ \sigma^0(t, X_t, \mu_t, \alpha_t) dB_t,$ (2.3)
52
+
53
+ with $X_0 \sim \mu_0$ . Here the representative agent controls his dynamics $X_t$ through a $\mathbb{R}^m$ -dimensional control process $\alpha_t$ , and the drift coefficient b, diffusion coefficients $\sigma$ and $\sigma^0$ , running cost f and terminal cost g are all measurable functions, with $(b, \sigma, \sigma^0, f) : [0, T] \times \mathbb{R}^d \times \mathcal{P}^2(\mathbb{R}^d) \times \mathbb{R}^m \to \mathbb{R}^d \times \mathbb{R}^{d \times n} \times \mathbb{R}^{d \times n_0} \times \mathbb{R}$ , and $g : \mathbb{R}^d \times \mathcal{P}^2(\mathbb{R}^d) \to \mathbb{R}$ .
54
+
55
+ Note that since $\mu$ is stochastic, (2.2)–(2.3) is a control problem with random coefficients.
56
+
57
+ **Definition 2.1** (Mean-field equilibrium). The control-distribution flow pair $\alpha^* = (\alpha_t^*)_{0 \le t \le T} \in \mathcal{H}^2([0,T];\mathbb{R}^m)$ , $\mu^* \in \mathcal{M}([0,T];\mathcal{P}^2(\mathbb{R}^d))$ is a mean-field equilibrium to the MFG with common noise, if $\alpha^*$ solves (2.2) given the stochastic measure flow $\mu^*$ , and the conditional marginal distribution of the optimal path $X_t^{\alpha^*}$ given the common noise B coincides with the measure flow $\mu^*$ :
58
+
59
+ $$\mu_t^* = \mathcal{L}(X_t^{\alpha^*} | \mathcal{F}_t^B), \tag{2.4}$$
60
+
61
+ where $\mathcal{L}(\cdot|\mathcal{F})$ is the conditional law given a filtration $\mathcal{F}$ .
62
+
63
+ We remark that, with a continuum of agents, the measure $\mu^*$ is not affected by a single agent's choice, and the
64
+
65
+ MFG is a standard control problem plus an additional fixed-point problem. More precisely, denote by $\hat{\alpha}^{\mu}$ the optimal control of (2.2)–(2.3) given the stochastic measure flow $\mu \in \mathcal{M}([0,T];\mathcal{P}^2(\mathbb{R}^d))$ , then $\mu^*$ is a fixed point of
66
+
67
+ $$\mu_t = \mathcal{L}(X_t^{\hat{\alpha}^{\mu}} | \mathcal{F}_t^B). \tag{2.5}$$
68
+
69
+ MFGs without common noise: Note that with $\sigma^0 \equiv 0$ , (2.2)–(2.3) is a MFG without common noise, and the flow of measures $\mu_t$ becomes deterministic.
70
+
71
+ Extended MFGs: In extended mean field games, the interactions between the representative agent and the population happen via both the states and controls, thus the functions $(b, \sigma, \sigma^0, f, g)$ can also depend on $\mathcal{L}(\alpha_t | \mathcal{F}_t^B)$ .
72
+
73
+ The Signatured Deep Fictitious Play (Sig-DFP) algorithm is built on fictitious play, and propagates conditional distributions $\mu=\{\mu_t\}_{0\leq t\leq T}\in \mathcal{M}([0,T];\mathcal{P}^2(\mathbb{R}^d))$ by signatures. This section briefly introduces these two ingredients.
74
+
75
+ In the learning procedure of *fictitious play*, players myopically choose their best responses against the empirical distribution of others' actions at every subsequent stage after arbitrary initial moves. When Cardaliaguet & Hadikhanloo (2017); Cardaliaguet & Lehalle (2018) extended it to mean-field settings, the empirical distribution of actions is naturally replaced by the average of distribution flows. More precisely, let $\bar{\mu}^{(0)} \in \mathcal{M}([0,T];\mathcal{P}^2(\mathbb{R}^d))$ be the initial guess of $\mu^*$ in (2.4), and consider the following iterative algorithm: (1) take $\bar{\mu}^{(n-1)} \in \mathcal{P}^2(\mathbb{R}^d)$ as the given flow of measures in (2.2)–(2.3) for the n-th iteration, and solve the optimal control in (2.2) denoted by $\alpha^{(n)}$ ; (2) solve the controlled stochastic differential equation (SDE) (2.3) for $X^{\alpha^{(n)}}$ and then infer the conditional distribution flow $\mu^{(n)} = \mathcal{L}(X^{\alpha^{(n)}}|\mathcal{F}^B_t)$ ; (3) average distributions $\bar{\mu}^{(n)} = \frac{n-1}{n}\bar{\mu}^{(n-1)} + \frac{1}{n}\mu^{(n)}$ and pass $\bar{\mu}^{(n)}$ to the next iteration. If $\mu^{(n)}$ converges and the strategy corresponding to the limiting measure flow is admissible, then by construction, it is a fixed-point of (2.5) and thus a mean-field equilibrium.
76
+
77
+ Signatures of Paths. Let $T((\mathbb{R}^d)) := \bigoplus_{k=0}^{\infty} (\mathbb{R}^d)^{\bigotimes k}$ be the tensor algebra, and denote by $\mathcal{V}^p([0,T],\mathbb{R}^d)$ the space of continuous mappings from [0,T] to $\mathbb{R}^d$ with finite p-variation. For a path $x:[0,T]\to\mathbb{R}^d$ , define the p-variation
78
+
79
+ $$||x||_p := \left(\sup_{D \subset [0,T]} \sum_{i=0}^{r-1} ||x_{t_{i+1}} - x_{t_i}||^p\right)^{1/p}, \quad (3.1)$$
80
+
81
+ where $D \subset [0,T]$ denotes a partition $0 \le t_0 < t_1 < \ldots < t_r \le T$ . We equip the space $\mathcal{V}^p([0,T],\mathbb{R}^d)$ with the norm $\|\cdot\|_{\mathcal{V}^p} := \|\cdot\|_{\infty} + \|\cdot\|_p$ .
82
+
83
+ **Definition 3.1** (Signature). Let $X \in \mathcal{V}^p([0,T], \mathbb{R}^d)$ such that the following integral makes sense. The signature of
84
+
85
+ <span id="page-3-0"></span>X, denoted by S(X), is an element of $T((\mathbb{R}^d))$ defined by $S(X) = (1, X^1, \dots, X^k \dots)$ with
86
+
87
+ $$X^k = \int_{0 < t_1 < t_2 < \dots < t_k < T} dX_{t_1} \otimes \dots \otimes dX_{t_k}. \quad (3.2)$$
88
+
89
+ We denote by $S^M(X)$ the truncated signature of X of depth M, i.e., $S^M(X)=(1,X^1,\cdots,X^M)$ and has the dimension $\frac{d^{M+1}-1}{d-1}$ .
90
+
91
+ Note that when X is a semi-martingale (the case of our problems), equation (3.2) is understood in the Stratonovich sense. The following properties of the signature make it an ideal choice for our problem, with more details in Appendix A.
92
+
93
+ - 1. Signatures characterize paths uniquely up to the tree-like equivalence, and the equivalence is removed if at least one dimension of the path is strictly increasing (Boedihardjo et al., 2016). Therefore, we shall augment the original path with the time dimension in the algorithm, *i.e.*, working with $\hat{X}_t = (t, X_t)$ since $S(\hat{X})$ characterizes paths $\hat{X}$ uniquely.
94
+ - 2. Terms in the signature present a factorial decay property, which provides the accuracy of using a few terms in the signature (small M) to approximate a path.
95
+ - 3. As a feature map of sequential data, the signature has a universality detailed in the following theorem.
96
+
97
+ **Theorem 3.1** (Universality, Bonnier et al. (2019)). Let $p \ge 1$ and $f: \mathcal{V}^p([0,T],\mathbb{R}^d) \to \mathbb{R}$ be a continuous function in paths. For any compact set $K \subset \mathcal{V}^p([0,T],\mathbb{R}^d)$ , if S(x) is a geometric rough path for any $x \in K$ , then for any $\epsilon > 0$ there exist M > 0 and a linear functional $l \in T((\mathbb{R}^d))^*$ such that
98
+
99
+ $$\sup_{x \in K} |f(x) - \langle l, S(x) \rangle| < \epsilon. \tag{3.3}$$
100
+
101
+ # Method
102
+
103
+ We introduce two shorthand notations: if x is a path indexed by $t \in [0,T]$ , then $x := (x_t)_{0 \le t \le T}$ denotes the whole path and $x_{s:t} := (x_u)_{s \le u \le t}$ denotes the path between s and t.
104
+
105
+ With the presence of common noise, existing algorithms mostly consider a nested-loop structure, with the inner one for idiosyncratic noise W and the outer one for common noise B. More precisely, if one works with N idiosyncratic Brownian paths $\{W^k\}_{k=1}^N$ and N common Brownian paths $\{B^k\}_{k=1}^N$ , then for each $B^j$ , one needs to simulate N paths $\{X^{i,j}\}_{i=1}^N$ defined by (2.3) over all idiosyncratic Brownian paths and solve the problem (2.2) associated to $B^j$ . This requires a total of $N^2$ simulations of (2.3). With a sufficiently large N, $\mu_t = \mathcal{L}(X_t | \mathcal{F}_t^B)$ is approximated well by $\frac{1}{N^2} \sum_{i,j=1}^N \delta_{X_t^{i,j}} \mathbf{1}_{\omega^{(0,j)}}$ with $\omega^{0,j} \in \Omega$ corresponding to the
106
+
107
+ trajectory $B^j$ . The double summation is of $\mathcal{O}(N^2)$ which is computationally expensive for large N.
108
+
109
+ We shall address the aforementioned numerical difficulties by signatures. The key idea is to approximate $\mu_t$ by
110
+
111
+ $$\mu_t \equiv \mathcal{L}(X_t | \mathcal{F}_t^B) = \mathcal{L}(X_t | S(\hat{B}_t)) \approx \mathcal{L}(X_t | S^M(\hat{B}_t)),$$
112
+ with $\hat{B}_t = (t, B_t),$
113
+
114
+ $$(4.1)$$
115
+
116
+ where the equal sign comes from the unique characterization of signatures $S(\hat{B})$ to the paths $B_{0:t}$ , and the approximation is accurate for large M due to the factorial decay property of the signature. The last term is then computed by machine learning methods, e.g., by Generative Adversarial Networks (GANs). In addition, if the agents interact via some population average subject to common noise: $\mu_t = \mathbb{E}[\iota(X_t)|\mathcal{F}_t^B]$ , the approximation in (4.1) can be arbitrarily close to the true measure flow for sufficiently large M. The following lemma gives a precise statement.
117
+
118
+ **Lemma 4.1.** Suppose $\mu_t = \mathbb{E}[\iota(X_t)|\mathcal{F}_t^B]$ where $\iota : \mathbb{R}^d \to \mathbb{R}$ is a measurable function. View $\mu_t$ as $\mu(t, B_{0:t})$ with $\mu : \mathcal{V}^p([0,T],\mathbb{R}^{n_0+1}) \to \mathbb{R}$ continuous for some $p \in (2,3)$ , and let $K \subset \mathcal{V}^p([0,T],\mathbb{R}^{n_0+1})$ be a compact set, then for any $\epsilon > 0$ , there exist a positive integer M and a linear functional $l \in T((\mathbb{R}^{n_0+1}))^*$ , such that
119
+
120
+ $$\sup_{t \in [0,T]} \sup_{\hat{B} \in K} |\mu_t - \langle l, S^M(\hat{B}_{0:t}) \rangle| < \epsilon.$$
121
+ (4.2)
122
+
123
+ *Proof.* See Appendix A for details due to the page limit. $\Box$
124
+
125
+ With all the above preparations, we now explain how the approximation to $\mu=\{\mu_t\}_{0\leq t\leq T}$ using signatures is implemented. Given N pairs of idiosyncratic and common Brownian paths $(W^i,B^i)$ and assume $\alpha_t$ in (2.3) is already obtained (which will be explained in Section 4.2), we first sample the optimized state processes $(X^i_t)_{0\leq t\leq T}$ , producing N samples $\{X^i\}_{i=1}^N$ . Then the linear functional l in Lemma 4.1 is approximated by implementing linear regressions on $\{S^M(\hat{B}^i_{0:t})\}_{i=1}^N$ with dependent variable $\{\iota(X^i_t)\}_{i=1}^N$ at several time stamps t, i.e.,
126
+
127
+ $$\hat{l} = \underset{\beta}{\arg \min} \| \boldsymbol{y} - \boldsymbol{X}\boldsymbol{\beta} \|^{2},$$
128
+
129
+ $$\boldsymbol{y} = \{ \iota(X_{t}^{i}) \}_{i=1}^{N}, \ \boldsymbol{X} = \{ S^{M}(\hat{B}_{0:t}^{i}) \}_{i=1}^{N}.$$
130
+ (4.3)
131
+
132
+ In all experiments in Section 5, we get decent approximations of $\mu$ on [0,T] by considering only three time stamps $t=0,\frac{T}{2},T$ . Note that such a framework can also deal with multi-dimensional $\iota$ , where the regression coefficients become a matrix.
133
+
134
+ The choice in (4.3) is mainly motivated by Lemma 4.1 stating l is a linear functional, and by the probability model underlying ordinary linear regression (OLS) which interprets that the least square minimization (4.3) gives the best
135
+
136
+ <span id="page-4-0"></span>prediction of $E[\boldsymbol{y}|\boldsymbol{X}]$ restricting to linear relations. There are other benefits for choosing OLS: Once $\hat{l}$ is obtained in (4.3), the prediction for unseen common paths is efficient: $\mu_t(\tilde{\omega}) \approx \langle \hat{l}, S^M(\hat{B}_{0:t}(\tilde{\omega})) \rangle$ for any $\tilde{\omega}$ and t. Moreover, it is easy to integrate with fictitious play: averaging $\mu_t^{(n)}$ from different iterations, commonly needed in fictitious play, now means simply averaging $\hat{l}^{(n)}$ over n. Next, we analyze the temporal and spatial complexity of using signatures and linear regression as below.
137
+
138
+ Temporal Complexity: Suppose we discretize [0,T] into L time stamps: $0 = t_0 \le t_1 \le \ldots \le t_L = T$ , and simulate N paths of W, B and $X_t$ . The simulation cost is of $\mathcal{O}(NL)$ . For computing the truncated signature $S^M(\hat{B})$ of depth M, we use the Python package Signatory (Kidger & Lyons, 2020), yielding a complexity of $\mathcal{O}(NLp)$ where p = $\frac{(n_0+1)^{M+1}-1}{n_0}=\mathcal{O}(n_0^M).$ Note that one can choose a large N and reuse all sampled common noise paths B for each iteration of fictitious play, thus the computation of $S^M(B)$ is done only once, and $S^M(\hat{B}_{0:t})$ is accessible in constant time for all t. The linear regression (or Ridge regression) takes time $\mathcal{O}(Np^2)$ . Thus, the total temporal complexity is of $\mathcal{O}(NLp + Np^2)$ , which is linear in N given<sup>2</sup> $p \ll N$ . Comparing to the nested-loop algorithm, where the cost of simulating SDEs is $\mathcal{O}(N^2L)$ and computing conditional distribution flows takes time $\mathcal{O}(N^2L)$ , we claim that our algorithm reduced the temporal complexity by a factor of the sample size N by using signatures.
139
+
140
+ Spatial Complexity: In fictitious play, one may choose to average all past flow of measures $\mu^{(n)}$ as the given measures in (2.2)–(2.3) for the current iteration. Using signatures simplifies it to average $\hat{l}^{(n)}$ . To update it between iterations, one needs to store the current average which costs $\mathcal{O}(p)$ of the memory. Combining $\mathcal{O}(NL)$ and $\mathcal{O}(NLp)$ for storing SDEs and truncated signatures, the overall spacial complexity is $\mathcal{O}(NLp)$ . The complexity of the nested-loop case is again $\mathcal{O}(N^2L)$ , which we reduce by a factor of N.
141
+
142
+ We conclude this section by the following remark: For the general case $\mu_t = \mathcal{L}(X_t | \mathcal{F}^B_t)$ , though the linear regression is no longer available, the one-to-one mapping between $\mu$ and $S(\hat{B})$ persists. Therefore, one can train a Generative Adversarial Network (GAN, Goodfellow et al. (2014)) for generating samples following the distribution $\mu$ by taking truncated signatures as part of the network inputs.
143
+
144
+ Having explained the key idea on how to approximate $\mu$ efficiently, we describe the Sig-DFP algorithm in this sub-
145
+
146
+ ![](_page_4_Figure_9.jpeg)
147
+
148
+ Figure 1. Flowchart of one iteration in the Sig-DFP Algorithm. Input: idiosyncratic noise W, common noise B, initial position $X_0$ and measure flow $\hat{\mu}^{(n-1)}$ from the last iteration. Output: measure flow $\hat{\mu}^{(n)}$ for the next iteration.
149
+
150
+ section. The algorithm consists of repeatedly solving (2.2)–(2.3) for a given measure flow $\mu$ using deep learning in the spirit of Han & E (2016), and passing the yielded $\mu$ to the next iteration by using signatures. The flowchart of the idea is illustrated in Figure 1. Consider a partition $\pi$ of $[0,T]: 0=t_0<\cdots< t_L=T$ , denote by $\hat{\mu}^{(n-1)}$ the given flow of measures at stage n, the stochastic optimal control problem (2.2)–(2.3) is solved by
151
+
152
+ $$\inf_{\{\alpha_k\}_{k=0}^{N-1}} \frac{1}{N} \sum_{i=1}^{N} \left( \sum_{k=0}^{L-1} f(t_k, X_k^i, \hat{\mu}_k^{(n-1)}(\omega^i), \alpha_k^i) \Delta_k + g(X_L^i, \hat{\mu}_L^{(n-1)}(\omega^i)) \right), \tag{4.4}$$
153
+ where $X_{k+1}^i = X_k^i + b(t_k, X_k^i, \hat{\mu}_k^{(n-1)}(\omega^i), \alpha_k^i) \Delta_k + \sigma(t_k, X_k^i, \hat{\mu}_k^{(n-1)}(\omega^i), \alpha_k^i) \Delta W_k^i + \sigma^0(t_k, X_k^i, \hat{\mu}_k^{(n-1)}(\omega^i), \alpha_i^i) \Delta B_k^i, \tag{4.5}$
154
+
155
+ where we replace the subscript $t_k$ by k to simplify notations, and let $\Delta_k = t_{k+1} - t_k$ , $\Delta W_k^i = W_{t_{k+1}}^i - W_{t_k}^i$ , $\Delta B_k^i = B_{t_{k+1}}^i - B_{t_k}^i$ . Here, we use the superscript i to represent the $i^{th}$ sample path and $\hat{\mu}_k^{(n-1)}(\omega^i)$ to emphasize the stochastic measure's dependence on the $i^{th}$ sample path of B up to time $t_k$ . The control $\alpha_k$ is then parameterized by neural networks (NNs) in the feedback form:
156
+
157
+ $$\alpha_k^i := \alpha_{\varphi}(t_k, X_k^i, \hat{\mu}_k^{(n-1)}(\omega^i); \varphi), \tag{4.6}$$
158
+
159
+ where $\alpha_{\varphi}$ denotes the NN map with parameters $\varphi$ , and searching the infimum in (4.4) is translated into minimizing $\varphi$ . The yielded optimizer $\varphi^*$ gives $\alpha_k^{i,*}$ , with which the optimized state process paths $\{X^{i,*}\}_{i=1}^N$ are simulated and
160
+
161
+ <sup>&</sup>lt;sup>1</sup>We use the Python package scikit-learn (Pedregosa et al., 2011) to do the linear regression.
162
+
163
+ $<sup>^2</sup>M$ is usually small due to the factorial decay property of the signature. For $n_0$ not large, we have $p \ll N$ .
164
+
165
+ **Input:** $b, \sigma, \sigma_0, f, g, \iota$ and $X_0^i, (W_{t_k}^i)_{k=0}^L, (B_{t_k}^i)_{k=0}^L$ for $i=1,2,\ldots,N;$ $N_{\text{round}}$ : rounds for FP; B: minibatch size; $N_{\text{batch}}$ : number of minibatches. Compute the signatures of $B_{0:t_k}^i$ for i = 1, ..., N, k = $1,\ldots,L;$ Initialize $\hat{\mu}^{(0)}, \varphi$ ; for n=1 to $N_{\text{round}}$ do $\begin{array}{l} \mbox{for } r=1 \mbox{ to } N_{\rm batch} \mbox{ do} \\ \mbox{Simulate the } r^{th} \mbox{ minibatch of } X^{i,(n)} \mbox{ using } \hat{\mu}^{(n-1)} \end{array}$ and compute $J_B(\varphi, \hat{\mu}^{(n-1)})$ ; Minimize $J_B(\varphi, \hat{\mu}^{(n-1)})$ over $\varphi$ , then update $\alpha_{\varphi}$ ; Simulate $X^{i,(n)}$ with the optimized $\alpha_{\omega}^*$ , for i =Regress $\iota(X_0^{i,(n)}), \iota(X_{L/2}^{i,(n)}), \iota(X_L^{i,(n)})$ on $S^M(\hat{B}_{0:0}^i),$ $S^{M}(\hat{B}_{0:t_{L/2}}^{i}), S^{M}(\hat{B}_{0:t_{L}}^{i})$ to get $l^{(n)}$ ; Update $\bar{l}^{(n)} = \frac{n-1}{n} \bar{l}^{(n-1)} + \frac{1}{n} l^{(n)};$ Compute $\hat{\mu}^{(n)}$ by $\hat{\mu}_k^{(n)}(\omega^i)=\langle \bar{l}^{(n)},S^M(\hat{B}_{0:t_k}^i)\rangle$ , for $i = 1, 2, \dots, N, k = 1, \dots, L;$ end for **Output:** the optimized $\alpha_{\omega}^*$ and $\bar{l}^{(N_{\text{round}})}$ .
166
+
167
+ its conditional law $\mathcal{L}(X^*|\mathcal{F}^B)$ , denoted by $\mu^{(n)}$ , is approximated using signatures as described in Section 4.1. This finishes one iteration of fictitious play. Denote by $\tilde{\mu}^{(n)}$ the approximation of $\mu^{(n)}$ , we then pass $\tilde{\mu}^{(n)}$ to the next iteration via updating $\hat{\mu}^{(n)} = \frac{1}{n}\tilde{\mu}^{(n)} + \frac{n-1}{n}\hat{\mu}^{(n-1)}$ by averaging the coefficients in (4.3).
168
+
169
+ We summarize it in Algorithm 1, with implementation details deferred to Appendix B. Note that the simulation of $X^{i,(n)}$ and $J_B(\varphi, \bar{\mu}^{(n-1)})$ uses the equations (B.2) and (B.1) in Appendix B, respectively.
170
+
171
+ **Theorem 4.1** (Convergence analysis). Let $(\alpha^*, \mu^*)$ be the mean-field equilibrium in Definition 2.1, $\alpha^{(n)}$ be the optimal control, and $\mu^{(n)}$ be the measure flow of the optimized state process after the $n^{th}$ iteration of fictitious play, and $\tilde{\mu}^{(n)}$ be the approximation by truncated signatures. Under Assumption C.1 and $\sup_{t \in [0,T]} \mathbb{E}[\mathcal{W}_2^2(\tilde{\mu}_t^{(n)}, \mu_t^{(n)})] \leq \epsilon$ , we have
172
+
173
+ $$\sup_{t \in [0,T]} \mathbb{E}[\mathcal{W}_{2}^{2}(\tilde{\mu}_{t}^{(n)}, \mu_{t}^{*})] + \int_{0}^{T} \mathbb{E}|\alpha_{t}^{(n)} - \alpha_{t}^{*}|^{2} dt$$
174
+
175
+ $$\leq C(q^{n} \sup_{t \in [0,T]} \mathbb{E}[\mathcal{W}_{2}^{2}(\mu_{t}^{(0)}, \mu_{t}^{*})] + \epsilon),$$
176
+
177
+ for some constants C>0 and 0< q<1, where $W_2$ denotes the 2-Wasserstein metric.
178
+
179
+ Moreover, if we consider a partition of $[0,T]: 0 = t_0 < \cdots < t_L = T$ , and define $\pi(t) = t_k$ for $t \in [t_k, t_{k+1})$ with $\|\pi\| = \max_{1 < k < L} |t_k - t_{k-1}|$ , then
180
+
181
+ **Theorem 4.2** (Convergence in discrete time). Let $\mu_{t_k}^{(n)}$ be the conditional law of the discretized optimal process $X_{t_k}^{(n)}$ after the $n^{th}$ iteration of fictitious play (cf. (4.5)), and $\tilde{\mu}_{t_k}^{(n)}$ be the approximation by truncated signatures. Under Assumption C.1 and $\sup_{0 \leq k \leq L} \mathbb{E}[\mathcal{W}_2^2(\tilde{\mu}_{t_k}^{(n)}, \mu_{t_k}^{(n)})] \leq \epsilon$ , one has
182
+
183
+ $$\begin{split} \sup_{t \in [0,T]} \mathbb{E}[\mathcal{W}_2^2(\tilde{\mu}_{\pi(t)}^{(n)}, \mu_t^*)] + \int_0^T \mathbb{E}|\alpha_{\pi(t)}^{(n)} - \alpha_t^*|^2 \, \mathrm{d}t \\ & \leq C(q^n \sup_{0 \leq k \leq L} \mathbb{E}[\mathcal{W}_2^2(\mu_{t_k}^{(0)}, \mu_{t_k}^*)] + \epsilon + \|\pi\|), \end{split}$$
184
+
185
+ for some constants C>0 and 0< q<1, where $\alpha_{t_k}^{(n)}=\hat{\alpha}(t_k,X_{t_k},Y_{t_k},\tilde{\mu}_{t_k}^{(n-1)})$ , and $(X_t,Y_t)$ solves (C.3) with $\mu$ replaced by $\tilde{\mu}_{t_k}^{(n-1)}$ .
186
+
187
+ The proofs of Theorems 4.1 and 4.2 are given in Appendix C due to the page limit.
188
+
189
+ Remark that the Sig-DFP framework is flexible. We choose to solve (2.2)-(2.3) by direct parameterizing control policies $\alpha_t$ for the sake of easy implementation and the possible exploration of multiple mean-field equilibria. If the equilibrium is unique, with proper conditions on the coefficients $b, \sigma, \sigma^0, f$ and g, one can reformulate (2.2)-(2.3) into McKean-Vlasov FBSDEs or stochastic FP/HJB equations, and solve them by fictitious play and propagating the common noise using signatures.
2108.02866/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-10-08T03:48:06.351Z" agent="5.0 (Macintosh)" etag="nu9rl2H27omwyYagUMYd" version="13.7.7" type="device"><diagram id="uPUdQjiBOamGTPtIVt4N" name="Page-1">5Vtbc9soFP41foxH98tjYrvNtNvdNM5M2qcdLGGLqSRchGJ7f/2ChKwbStLGWM6uZzISBwFH3/k4HA7KxJwl+48EbKMvOITxxNDC/cScTwzD8HyXXbjkUEp0XbdKyYagUMhqwRL9A4VQE9IchTBrPUgxjinatoUBTlMY0JYMEIJ37cfWOG6PugUb2BMsAxD3pY8opFH1GppWV9xCtInE0J4tKhJQPSwEWQRCvGuIzMXEnBGMaXmX7Gcw5uhVuJTtPgzUHhUjMKWvaeB80swFptfZ3e29PX/4+/O15l/pwj5PIM7FGwtt6aGCgOA8DSHvRZ+YN7sIUbjcgoDX7pjVmSyiSSyqQ5BFx2fXOKXCpMzs5k1fZfEWT5BQuG+IxCt8hDiBlBzYI6LWrYghCGVWhtjV1jEdIYsahtErIRCM2Bz7rkFjNwK3X8DQPjGEaxTHMxxjUrQ112voBAGTZ5TgH7BRE7r+ir2+Epw7MBuOO7V7QOu+DGjfVgS0oxboEEBvLQXaCTy4WqsB2ugA7V4A0JXxG0jPc8Cb3UMQQnJ1B0gGyWnRt6EXWjL0PWNlOo4a9B3dquCuDGD1HYpjVw818Td8VR5F13v4Twwnpty/oqcW5s7PnC8gBTZXWQHONXtAt7b7AqGqnt1txDWunv/NjrR1adJaNsMJCljFEqQZu3xZnm3cnCDOQ+1PuOuPybTZsVreXzk4M0Y5fqWTxmiWJysE2O0dCmhOWKwx9PBRXNigkp5wCqw0aEJHNgU06Gmep2hFNbz2imr2J4DuuX3+m8robwzS//9A2+xn/Cxnl4s/FrMHfkPzEGF28+H+ry88MAarGDL0TMt2XM9nksfbxf2CXR8QZXqzpYaroTU0iXHG5Ys0YLylkGQtdeQKnDS2WRsDsY2zcmxFTr/HeKe/4pqGdUbGG8OMj/ST+PvVm3qZskrxdyTFSuIZubIDNGG2oW0utG2e4hR2CCJEIEablBUDyBnKBNzSiG3RrkVFgsKQDyMlX01PRZEy88xTtx1DWLrEhZp9Plmq+GQOBxA9s/2Sa3uLX7uBIOEDQUCC6OVVdnXhVFK+0zXapDIlpPIly7JxAlLl8Sz5eps/aftg/fXw3cy/wccrGacuYfvVg1QC/CDKx1zBoVN+efelCmfZWnAJ+YS34ex2cHZHx9k8V5T5rFf8mcOMIpw+G/KNFf4+Roi7ajagiDTXJe/YxqkfOZofzv4CzyLLriDhc6HSKTtKKrWKlYRdivD4Eg0gC9A1vOY6R1z+ECHCzfEZpWFDzYFV9TLRD9jghRe5HhHo5RbBeAXJhtXzCZkAWvC8hJm5UB667HAeh4XGGS10tYwpW4G1hHlYPoX52QQvgh+8EeKCCBUWI2iD0iKDpvuuyT0nosXMKhpIsxDadDo9Qpausu1ze7Jh3r7/FdrrrBze6CuH9Z9coTuRkC7ZFavLQ0txVnyyMhKfO5GQ7o6Os+xg5cybQ3Vh0PuOgVQvwTxlqC4Ceiv2zVzmKWObM6HajGxGQI+DZt9ExZHdxOaZ3zoPXCSSK0aXxTkiMKCYHAWPmMQh80q8xUeCs6zskOBd2Zs0OJ0Mn6tobSa9Nog5v33Pb6olhU+Qx4t1HFr0ZXheK74sY8IRY8CRYpNODKh7o6+Zsg9nGke0JfqfMCpIdA8JSH9wzik/RHzFOfoJDGJ0ghjpqbnEHoanKib3Xo4VYRpe80/CWCmIQZahoI0ke3dy+DYp07tF4TsvTO2qON83K+eHqrRHtGg29X1blMuWluOKct2UF5ot7yBBDAKejp5rR+vAsPddWi+XnDFHJlzNQMZWbFQoIBtIXyJz39gNY9oSY1YyAmNA0VNbX5mBxQh3YlZU5PP8qe9p9c+Q7/eqHsvXFp3UrOn1a3c4anePW0pYeh0V9Dui8PuM9E/ASMEsvcUq+3lOSVmsv8DhE7LOfSXpnDFJZ3e+u7K7yYL3wjLJt1ojOD7PMpoUVeb0RqOLZU2tzlcDv+uX+t+c9bpSzRnZ0ccZXNOlW9kZSjX+qold44WOVBu4n6GcFr+emd9h/qwTeXrn/WBWjnc/g6YA73H2XlYHb18p3qxY/0tEOR3q/ywxF/8C</diagram></mxfile>
2108.02866/main_diagram/main_diagram.pdf ADDED
Binary file (57 kB). View file
 
2108.02866/paper_text/intro_method.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Open-domain question answering (ODQA) is a task to answer factoid questions without a pre-specified domain. Recently, generative models have achieved the state-of-the-art performance on many ODQA tasks. These approaches all share the common pipeline where the first stage is retrieving evidence from the free-form text in Wikipedia. However, a large amount of world's knowledge is not stored as plain text but in structured databases, and need to be accessed using query languages such as SQL. Furthermore, query languages can answer questions that require complex reasoning, as well as offering full explainability. In practice, an ideal ODQA model should be able to retrieve evidence from both unstructured textual and structured tabular information sources, as some questions are better answered by tabular evidence from databases. For example, the current state-of-the-art ODQA models struggle on questions that involve aggregation operations such as counting or averaging.
4
+
5
+ One line of research on accessing databases, although not open domain, is translating natural language questions into SQL queries . These methods all rely on knowing the associated table for each question in advance, and hence are not trivially applicable to the open-domain setting, where the relevant evidence might come from millions of tables.
6
+
7
+ In this paper, we provide a solution to the aforementioned problem by empowering the current generative ODQA models with the Text2SQL ability. More specifically, we propose a \textbf{dual reader-parser (\textsc{DuRePa})} framework that can take both textual and tabular data as input, and generate either direct answers or SQL queries based on the context\footnote{Our code is available at https://github.com/AlexanderYogurt/Hybrid-Open-QA}. If the model chooses to generate a SQL query, we can then execute the query on the corresponding database to get the final answer. Overall, our framework consists of three stages: retrieval, joint ranking and dual reading-parsing.
8
+
9
+ First we retrieve supporting candidates of both textual and tabular types, followed by a joint reranker that predicts how relevant each supporting candidate is to the question, and finally we use a fusion-in-decoder model for our reader-parser, which takes all the reranked candidates in addition to the question to generate direct answers or SQL queries.
10
+
11
+ To evaluate the effectiveness of our \textsc{DuRePa}, we construct a hybrid dataset that combines SQuAD and WikiSQL questions. We also conduct experiments on NaturalQuestions (NQ) and OTT-QA to evaluate DuRePa performance. As textual and tabular open-domain knowledge, we used textual and tabular data from Wikipedia via Wikidumps (from Dec. 21, 2016) and Wikitables . We study the model performance on different kinds of questions, where some of them only need one supporting evidence type while others need both textual and tabular evidence. On all question types, \textsc{DuRePa} performs significantly better than baseline models that were trained on a single evidence type. We also demonstrate that \textsc{DuRePa} can generate human-interpretable SQLs that answer questions requiring complex reasoning, such as calculations and superlatives.
12
+
13
+ Our highlighted contributions are as follows:
14
+ [noitemsep,topsep=0pt,leftmargin=*]
15
+
16
+ - We propose a multi-modal framework that incorporates hybrid knowledge sources with the Text2SQL ability for ODQA tasks. To the best of our knowledge, this is the first work that investigates Text2SQL in the ODQA setting.
17
+ - We propose a simple but effective generative approach that takes both textual and tabular evidence and generates either direct answers or SQL queries, automatically determined by the context. With that, we achieve the state-of-the-art performance on OpenSQuAD using a T5-base model.
18
+ - We conduct comprehensive experiments to demonstrate the benefits of Text2SQL for ODQA tasks. We show that interpretable SQL generation can effectively answer questions that require complex reasoning in the ODQA setting.
19
+
20
+ # Method
21
+
22
+ In this section, we describe our method for hybrid open-domain question answering. It mainly consists of three components: (1) a retrieval system; (2) a joint reranker and (3) a dual Seq2Seq model that uses fusion-in-decoder to generate direct answer or SQL query.
23
+ [htb]
24
+
25
+ \centering
26
+ \includegraphics[width=0.80\linewidth]{./figs/main.pdf}
27
+
28
+ \caption{The pipeline of our proposed hybrid model. The candidates are retrieved from knowledge source such as Wikipedia including both paragraphs and tables.
29
+ Then a generative Seq2Seq model reads the question and all the candidates, and produces $k$ outputs using beam search. Each output can be either a final answer or an intermediate SQL query. The types and order of the outputs are automatically determined by the model itself.
30
+
31
+ }
32
+
33
+ For the hybrid open-domain setting, we build two separate search indices -- one for textual input and another for tabular input.
34
+
35
+ For paragraphs, we split them into passages of at most 100 words. For tables, we flattened each table into passages by concatenating cell values along each row. If the flattened table exceeds 100 words, we split it into a separate passage, respecting row boundaries. The column headers are concatenated to each tabular passage. Some examples of flattened tables are given in the Appendix .
36
+
37
+ Given a natural language question, the retrieval system retrieves 100 textual and 100 tabular passages as the support candidates from the textual and tabular indices, respectively, using BM25 ranking function.
38
+
39
+ The purpose of our reranking model is to produce a score $s_i$ of how relevant a candidate (either an unstructured passage or table) is to a question.
40
+
41
+ Specifically, the reranker input is the concatenation of question, a retrieved candidate-content, and its corresponding title if available\footnote{Wikipedia passages have page titles, and tables have table titles.}, separated by special tokens shown in Figure .
42
+
43
+ The candidate content can be either the unstructured text or flattened table. We use $\text{BERT}_{base}$ model in this paper. Following , we finetune the BERT model using the following loss:
44
+
45
+ L = - \sum_{i \in \mathcal{I}_{pos}} \log(s_i) - \sum_{i \in \mathcal{I}_{neg}} \log (1-s_i).
46
+
47
+ The $\mathcal{I}_{pos}$ is sampled from all relevant BM25 candidates, and the set $\mathcal{I}_{neg}$ is sampled from all non-relevant BM25 candidates. Different from , during training, for each question, we sample 64 candidates including one positive candidate and 63 negative candidates, that is, $|\mathcal{I}_{pos}| = 1$ and $|\mathcal{I}_{neg}| = 63$. If none of the 200
48
+ candidates is relevant, we skip the question. During inference, we use the hybrid reranker to assign a score to each of the 200 candidates, and choose the top 50 candidates as the input to the next module -- the reader-parser model. For the top 50 candidates, we choose them from the joint pool of all candidates, according to the scores assigned by the reranker.
49
+
50
+ Our dual reader-parser model is based on the fusion-in-decoder (FID) proposed in , and is initialized using the pretrained T5 model. The overall pipeline of the reader-parser is shown in Figure . Each retrieved candidate is represented by its title and content, in the following formats:
51
+
52
+ \paragraph{Textual Candidate} We represent each textual candidate as the concatenation of the passage title and content, appended by special tokens [text title] and [text content] respectively.
53
+
54
+ \paragraph{Tabular Candidate} In order to represent a structured table as a passage, we first flatten each table into the following format: each flattened table starts with the complete header names and then followed by rows. Figure presents an example for this conversion.
55
+
56
+ Finally, a tabular candidate is the concatenation of the table title and content flattened as a passage, appended by special tokens [table title] and [table content] respectively. We use the table ID as the title so that it can be copied to the generated SQL queries by the model.
57
+
58
+ \paragraph{Prefix of the Targets}
59
+ During training, we also add special tokens answer: or sql: to a targeted sentence depending on whether it is a plain text or a SQL query. For those questions that have both textual answer and SQL query annotations (for example, WikiSQL questions), we create two training examples for each question. During inference, the generated outputs will also contain these two special prefixes, indicating which output type the model has generated.
60
+
61
+ \paragraph{Dual Reader-Parser} Our generative Seq2Seq model has reader-parser duality. During inference, the model reads the question and all the candidates, and produces $k$ outputs using beam search. Each output can be either a final answer or an intermediate SQL query. Depending on the context, the types and order of the outputs are automatically determined by the model itself. All the generated SQL queries will then be executed to produce the final answers. In this paper, we fix $k=3$ and always generate three outputs for each question.
2109.14710/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2109.14710/paper_text/intro_method.md ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Convolutional Neural Networks (CNNs) have achieved state-of-the-art performance on a wide range of computer vision tasks such as image classification [@he2016deep], video recognition [@feichtenhofer2019slowfast] and object detection [@ren2015faster]. Despite achieving remarkably low generalization errors, modern CNN architectures are typically over-parameterized and consist of millions of parameters. As the size of state-of-the-art CNN architectures continues to grow, it becomes more challenging to deploy these models on resource constrained edge devices that are limited in both memory and energy. Motivated by studies demonstrating that there is significant redundancy in CNN parameters [@Denil_Predicting_Parameters_in_Deep_Learning_2013], model compression techniques such as pruning, quantization, tensor decomposition and knowledge distillation have emerged to address this problem.
4
+
5
+ <figure id="fig:resnet18_first_layer_reconstruction" data-latex-placement="t">
6
+ <div class="minipage">
7
+ <p>Original Tensor</p>
8
+ </div>
9
+ <div class="minipage">
10
+ <p>Reconstructed Tensor</p>
11
+ </div>
12
+ <div class="minipage">
13
+ <div class="flushright">
14
+ <p>Reconstruction Error</p>
15
+ </div>
16
+ </div>
17
+ <figure>
18
+ <embed src="figures/reconstructions_tucker.pdf" />
19
+ <figcaption>SVD (Tucker)</figcaption>
20
+ </figure>
21
+ <figure>
22
+ <embed src="figures/reconstructions_kp1.pdf" />
23
+ <figcaption>GKPD - 1</figcaption>
24
+ </figure>
25
+ <figure>
26
+ <embed src="figures/reconstructions_kp16.pdf" />
27
+ <figcaption>GKPD - 8</figcaption>
28
+ </figure>
29
+ <figcaption> A compression rate of <span class="math inline">2×</span> achieved for an arbitrary tensor from the first layer of ResNet18 using SVD (Tucker) in (a), and the proposed GKPD in (b) and (c). A larger summation, GKPD-8 achieves a lower reconstruction error in comparison with both a smaller summation, GKPD-1, as well as SVD (Tucker) decomposition.</figcaption>
30
+ </figure>
31
+
32
+ Decomposition methods have gained more attention in recent years as they can achieve higher compression rates in comparison to other approaches. Namely, Tucker [@Deok_Compression_of_Deep_Convolutional_Neural_Networks_for_Fast_and_Low_Power_Mobile_Applications_2016], CP [@Lebedev_Speeding-up_Convolutional_Neural_Networks_Using_Fine-tuned_CP-Decomposition_2015], Tensor-Train [@Garipov_Ultimate_Tensorization_2016] and Tensor-Ring [@Wang_Tensor_Ring_Nets_2018] decompositions have been widely studied for DNNs. However, these methods still suffer significant accuracy loss for computer vision tasks.
33
+
34
+ Kronecker Product Decomposition (KPD) is another decomposition method that has recently shown to be very effective when applied to RNNs [@thakker2019compressing]. KPD leads to model compression via replacing a large matrix with two smaller Kronecker factor matrices that best approximate the original matrix. In this work, we generalize KPD to tensors, yielding the *Generalized Kronecker Product Decomposition* (GKPD), and use it to decompose convolution tensors. GKPD involves finding the summation of Kronecker products between factor tensors that best approximates the original tensor. We provide a solution to this problem called the *Multidimensional Nearest Kronecker Product Problem*. By formulating the convolution operation directly in terms of the Kronecker factors, we show that we can avoid reconstruction at runtime and thus obtain a significant reduction in memory footprints and floating-point operations (FLOPs). Once all convolution tensors in a pre-trained CNN have been replaced by their compressed counterparts, we retrain the network. If a pretrained network is not available, we show that we are still able to train our compressed network from a random initialization. Furthermore, we show that these randomly initialized networks retain universal approximation capability by building on [@Hornik_UniversalMLP_1991] and [@Zhou_UniversalDeepCNN_2020].
35
+
36
+ Applying GKPD to an arbitrary tensor leads to multiple possible decompositions, one for each configuration of Kronecker factors. As shown in Figure [1](#fig:resnet18_first_layer_reconstruction){reference-type="ref" reference="fig:resnet18_first_layer_reconstruction"}, we find that for any given compression factor, choosing a decomposition that consists of a larger summation of smaller Kronecker factors (as opposed to a smaller summation of larger Kronecker factors) leads to a lower reconstruction error as well as improved model accuracy.
37
+
38
+ To summarize, the main contributions of this paper are:
39
+
40
+ - Generalizing the Kronecker Product Decomposition to multidimensional tensors
41
+
42
+ - Introducing the Multidimenesional Nearest Kronecker Product Problem and providing a solution
43
+
44
+ - Providing experimental results for image classification on CIFAR-10 and ImageNet using compressed ResNet [@he2016deep], MobileNetv2 [@sandler2018mobilenetv2] and SeNet [@hu2018squeeze] architectures.
45
+
46
+ # Method
47
+
48
+ Given matrices $\mat{a} \in {\rm I\!R}^{m_1 \times n_1}$ and $\mat{b} \in {\rm I\!R}^{m_2 \times n_2}$, their Kronecker product is the $m_1 m_2 \times n_1 n_2$ matrix $$\begin{equation}
49
+ \mat{a} \otimes \mat{b} \triangleq
50
+ \begin{bmatrix}
51
+ a_{1,1} \mat{b} & \dots & a_{1,n_1} \mat{b} \\
52
+ \vdots & \ddots & \vdots \\
53
+ a_{m_1,1} \mat{b} & \dots & a_{m_1,n_1} \mat{b}
54
+ \end{bmatrix}.
55
+ \label{eq:kron-matrix}
56
+ \end{equation}$$ As shown in @van2000ubiquitous, any matrix $\mat{w} \in {\rm I\!R}^{m_1 m_2 \times n_1 n_2}$ can be decomposed into a sum of Kronecker products as $$\begin{equation}
57
+ \mat{w} = \sum_{r=1}^R \mat{a}_r \otimes \mat{b}_r,
58
+ \label{eq:kronecker_sum_matrix_reconstruction}
59
+ \end{equation}$$ where $$\begin{equation}
60
+ R = \min(m_1n_1, m_2n_2)
61
+ \end{equation}$$ is the rank of a reshaped version of matrix $\mat{w}$. We call this $R$ the *Kronecker rank* of $\mat{w}$. Note that the Kronecker rank is not unique, and is dependent on the dimensions of factors $\mat{a}$ and $\mat{b}$.
62
+
63
+ To compress a given $\mat{w}$, we can represent it using a small number $\widehat{R} < R$ of Kronecker products that best approximate the original tensor. The factors are found by solving the Nearest Kronecker Product problem $$\begin{equation}
64
+ \underset{\{\mat{a}_r\}, \{\mat{b}_r\}}{\min} \left\| \mat{w} - \sum_{r=1}^{\widehat{R}}\mat{a}_r \otimes \mat{b}_r \right\|_F^2.
65
+ \label{eq:nkp-matrices}
66
+ \end{equation}$$ As this approximation replaces a large matrix with a sequence of two smaller ones, memory consumption is reduced by a factor of $$\begin{equation}
67
+ \frac{ m_1 m_2 n_1 n_2 }{\widehat{R}(m_1 n_1 + m_2 n_2)}.
68
+ \end{equation}$$ Furthermore, if a matrix $\mat{w}$ is decomposed into its Kronecker factors then the projection $\mat{w}\Vec{x}$ can be performed without explicit reconstruction of $\mat{w}$. Instead, the factors can be used directly to perform the computation as a result of the following equivalency relationship: $$\begin{equation}
69
+ \Vec{y} = (\mat{a} \otimes \mat{b})\Vec{x}
70
+ \equiv
71
+ \mat{Y} = \mat{B} \mat{X} \mat{A}^\top,
72
+ \label{eq:kronecker-equivalency}
73
+ \end{equation}$$ where $\text{vec}(\mat{x})=\Vec{x}$, $\text{vec}(\mat{y}) = \Vec{y}$ and $\text{vec}(\cdot)$ vectorizes matrices $\mat{X} \in {\rm I\!R}^{n_2 \times n_1}$ and $\mat{Y} \in {\rm I\!R}^{m_2 \times m_1}$ by stacking their columns.
74
+
75
+ In this section, we extend KPD to tensors yielding GKPD. First, we define the multidimensional Kronecker product, then we introduce the Multidimensional Nearest Kronecker Product problem and its solution. Finally, we describe our *KroneckerConvolution* module that uses GKPD to compress convolution tensors and avoids reconstruction at runtime.
76
+
77
+ We now turn to generalizing the Kronecker product to operate on tensors. Let $\tensor{a} \in {\rm I\!R}^{a_1 \times a_2 \times \dots \times a_N}$ and $\tensor{b} \in {\rm I\!R}^{b_1 \times b_2 \times \dots \times b_N}$ be two given tensors. Intuitively, tensor $(\tensor{a} \otimes \tensor{b}) \in {\rm I\!R}^{a_1 b_1 \times a_2 b_2 \times \dots \times a_N b_N}$ is constructed by *moving around* tensor $\tensor{B}$ in a non-overlapping fashion, and at each position scaling it by a corresponding element of $\tensor{A}$ as shown in Figure [2](#fig:kpd_of_weight_tensor){reference-type="ref" reference="fig:kpd_of_weight_tensor"}. Formally, the Multidimensional Kronecker product is defined as follows $$\begin{equation}
78
+ (\tensor{A} \otimes \tensor{B})_{i_1, i_2, \dots, i_N} \triangleq \tensor{A}_{j_1, j_2, \dots, j_N} \tensor{B}_{k_1, k_2, \dots, k_N},
79
+ \end{equation}$$ where $$\begin{equation}
80
+ j_n = \left \lfloor \frac{i_n}{b_n} \right\rfloor \text{and} \hspace{7pt}
81
+ k_n = i_n\mod{b_n}
82
+ \label{eq:kron-indexes}
83
+ \end{equation}$$ represent the integer quotient and the remainder term of $i_n$ with respect to divisor $b_n$, respectively.
84
+
85
+ <figure id="fig:kpd_of_weight_tensor" data-latex-placement="t">
86
+ <embed src="figures/kpd_of_weight_tensor_with_labels2.pdf" />
87
+ <figcaption>Illustration of Kronecker Decomposition of a single convolution filter (with spatial dimensions equal to one for simplicity). </figcaption>
88
+ </figure>
89
+
90
+ As with matrices, any multidimensional tensor $\tensor{w} \in {\rm I\!R}^{w_1 \times w_2 \times \cdots \times w_N}$ can be decomposed into a sum of Kronecker products $$\begin{equation}
91
+ \tensor{w} = \sum_{r=1}^{R} \tensor{a}_r \otimes \tensor{b}_r,
92
+ \label{eq:kronecker_sum_tensor_reconstruction}
93
+ \end{equation}$$ where $$\begin{equation}
94
+ R = \min(a_1 a_2 \cdots a_N, b_1 b_2 \cdots b_N)
95
+ \end{equation}$$ denotes the Kronecker rank of tensor $\tensor{w}$. Thus, we can approximate $\tensor{w}$ using GKPD by solving the Multidimensional Nearest Kronecker Product problem $$\begin{equation}
96
+ \underset{\{\tensor{a}_r\}, \{\tensor{b}_r\}}{\min} \left\| \tensor{w} - \sum_{r=1}^{\widehat{R}} \tensor{a}_r \otimes \tensor{b}_r \right\|_F^2,
97
+ \label{eq:nkp-tensor-problem}
98
+ \end{equation}$$ where $\widehat{R}$ $<$ $R$. For the case of matrices (2D tensors), @Loan_Approximation_With_Kronecker_Products_1992 solved this problem using SVD. We extend their approach to the multidimensional setting. Our strategy will be to define rearrangement operators $$\begin{align*}
99
+ \mat{R}_w & : {\rm I\!R}^{w_1 \times w_2 \times \dots \times w_N} \to {\rm I\!R}^{a_1 a_2 \dots a_N \times b_1 b_2 \dots b_N} \\
100
+ \vec{r}_a &: {\rm I\!R}^{a_1 \times a_2 \times \dots \times a_N \to a_1 a_2 \dots a_N}\\
101
+ \vec{r}_b &: {\rm I\!R}^{b_1 \times b_2 \times \dots \times b_N \to b_1 b_2 \dots b_N}
102
+ \end{align*}$$ and solve $$\begin{equation}
103
+ \underset{\{\tensor{a}_r\}, \{\tensor{b}_r\}}{\min} \left \| \mat{R}_w(\tensor{w}) - \sum_{r=1}^{\widehat{R}} \vec{r}_a(\tensor{a}_r) \vec{r}_b(\tensor{b}_r) ^\top \right\|_F^2
104
+ \label{eq:nkp-tensor-problem-reshaped}
105
+ \end{equation}$$ instead. By carefully defining the rearrangement operators, the sum of squares in [\[eq:nkp-tensor-problem-reshaped\]](#eq:nkp-tensor-problem-reshaped){reference-type="eqref" reference="eq:nkp-tensor-problem-reshaped"} is kept identical to that in [\[eq:nkp-tensor-problem\]](#eq:nkp-tensor-problem){reference-type="eqref" reference="eq:nkp-tensor-problem"}. The former corresponds to finding the best low-rank approximation which has a well known solution using SVD. We define the rearrangement operators as follows: $$\begin{align*}
106
+ % \label{eq:rearrangment-w}
107
+ R_w(\tensor{w})_{i, :} &= \text{vec}(\text{unfold}(\tensor{w}, \Vec{d}_{\tensor{b}})_i) \\
108
+ % \Vec{d}_\tensor{b}, \Vec{s}_\tensor{b}) \\
109
+ % % \text{Shape}(\tensor{b})) \\
110
+ % \label{eq:rearrangment-a}
111
+ \vec{r}_a(\tensor{a}) &= \text{unfold}(\tensor{a}, \, \Vec{d}_{\tensor{I}_{\tensor{a}}}) \\
112
+ % \label{eq:rearrangment-b}
113
+ \vec{r}_b(\tensor{b}) &= \text{vec}(\tensor{b})
114
+ \end{align*}$$ where $$\begin{equation*}
115
+ \text{unfold}(\tensor{w},\, \Vec{d}): {\rm I\!R}^{w_1 \times w_2 \times \dots \times w_N} \to {\rm I\!R}^{N_p \times d_1 \times d_2 \dots d_N}
116
+ \end{equation*}$$ extracts $N_p$ non-overlapping patches of shape $\Vec{d}$ from tensor $\tensor{w}$, $\text{vec}(\cdot)$ flattens its input into a vector, tensor $\tensor{i}_{\tensor{a}}$ has the same number of dimensions as $\tensor{a}$ with each dimension equal to unity and $\Vec{d}_{\tensor{b}}$ is a vector describing the shape of tensor $\tensor{b}$. While the ordering of patch extraction and flattening is not important, it must remain consistent across the rearrangement operators.
117
+
118
+ <figure id="fig:kronecker-convolution" data-latex-placement="t">
119
+ <figure>
120
+ <embed src="figures/conv_labelled3.pdf" />
121
+ <figcaption> Conv2d</figcaption>
122
+ </figure>
123
+ <figure>
124
+ <embed src="figures/kconv_labelled3.pdf" />
125
+ <figcaption> KroneckerConv2d</figcaption>
126
+ </figure>
127
+ <figcaption>Illustration of the KroneckerConvolution operation. Although (a) and (b) result in identical outputs, the latter is more efficient in terms of memory and FLOPs.</figcaption>
128
+ </figure>
129
+
130
+ The convolution operation in CNNs between a weight tensor $\tensor{W} \in {\rm I\!R}^{F \times C \times K_w \times K_h}$ and an input $\tensor{x} \in {\rm I\!R}^{C \times H \times W}$ is a multilinear map that can be described in scalar form as $$\begin{equation}
131
+ \tensor{y}_{f,x,y} = \sum_{i=1}^{K_h} \sum_{j=1}^{K_w} \sum_{c=1}^{C} \tensor{W}_{f,c,i,j} \tensor{X}_{c,i+x,j+y}.
132
+ \label{eq:scalar-form-conv}
133
+ \end{equation}$$ Assuming $\tensor{w}$ can be decomposed to KPD factors $\tensor{A} \in {\rm I\!R}^{F_1 \times C_1 \times K_{w1} \times K_{h1}}$ and $\tensor{B} \in {\rm I\!R}^{F_2 \times C_2 \times K_{w2} \times K_{h2}}$, we can rewrite [\[eq:scalar-form-conv\]](#eq:scalar-form-conv){reference-type="eqref" reference="eq:scalar-form-conv"} as $$\begin{equation}
134
+ \tensor{y}_{f,x,y} = \sum_{i=1}^{K_h} \sum_{j=1}^{K_w} \sum_{c=1}^{C} (\tensor{A} \otimes \tensor{B})_{f,c,i,j} \tensor{X}_{c,i+x,j+y}.
135
+ \label{eq:scalar-form-conv-kpd1}
136
+ \end{equation}$$ Due to the structure of tensor $\tensor{A} \otimes \tensor{B}$, we do not need to explicitly reconstruct it to carry out the summation in [\[eq:scalar-form-conv-kpd1\]](#eq:scalar-form-conv-kpd1){reference-type="eqref" reference="eq:scalar-form-conv-kpd1"}. Instead, we can carry out the summation by *directly* using elements of tensors $\tensor{a}$ and $\tensor{b}$ as shown in Lemma [\[lem:kpd-equivalence\]](#lem:kpd-equivalence){reference-type="ref" reference="lem:kpd-equivalence"}. This key insight leads to a large reduction in both memory and FLOPs. Effectively, this allows us to replace a large convolutional layer (with a large weight tensor) with two smaller ones, as we demonstrate in the rest of this section.
137
+
138
+ ::: restatable
139
+ lemkpdequiv []{#lem:kpd-equivalence label="lem:kpd-equivalence"} Suppose tensor $\tensor{w} \in {\rm I\!R}^{w_1 \times w_2 \times \dots \times w_N}$ can be decomposed into KPD factors such that $\tensor{w} = \tensor{a} \otimes \tensor{b}$. Then, the multilinear map involving $\tensor{w}$ can be written directly in terms of its factors $\tensor{A} \in {\rm I\!R}^{a_1 \times a_2 \times \dots \times a_N}$ and $\tensor{B} \in {\rm I\!R}^{b_1 \times b_2 \times \dots \times b_N}$ as follows $$\begin{multline*}
140
+ \tensor{w}_{i_1, i_2, \dots, i_N} \tensor{x}_{i_1, i_2,\dots, i_N} = \\
141
+ \tensor{a}_{j_1, j_2, \dots, j_N} \tensor{b}_{k_1, k_2, \dots, k_N} \tensor{x}_{g(j_1, k_1), g(j_2, k_2), \dots, g(j_N, k_N)},
142
+ \end{multline*}$$ where $\tensor{x} \in {\rm I\!R}^{d_1 \times d_2 \times \dots \times d_N}$ is an input tensor, $g(j_n, k_n) \triangleq j_n b_n + k_n$ is a re-indexing function; and $j_n,k_n$ are as defined in [\[eq:kron-indexes\]](#eq:kron-indexes){reference-type="eqref" reference="eq:kron-indexes"}. The equality also holds for any valid offsets to the input's indices $$\begin{multline*}
143
+ \tensor{w}_{i_1, i_2, \dots, i_N} \tensor{x}_{i_1 + o_1,\, i_2 + o_2,\, \dots,\, i_N + o_N} =
144
+ \tensor{a}_{j_1, j_2, \dots, j_N} \\
145
+ \tensor{b}_{k_1, k_2, \dots, k_N}
146
+ \tensor{x}_{g(j_1, k_1) + o_1,\, g(j_2, k_2) + o_2,\, \dots,\, g(j_N, k_N) + o_N},
147
+ \end{multline*}$$ where $o_i \in \mathbb{N}$.
148
+ :::
149
+
150
+ ::: proof
151
+ *Proof.* See Supplementary Material. ◻
152
+ :::
153
+
154
+ Applying Lemma [\[lem:kpd-equivalence\]](#lem:kpd-equivalence){reference-type="ref" reference="lem:kpd-equivalence"} to the summation in [\[eq:scalar-form-conv-kpd1\]](#eq:scalar-form-conv-kpd1){reference-type="eqref" reference="eq:scalar-form-conv-kpd1"} yields $$\begin{multline*}
155
+ \tensor{y}_{f,x,y} = \sum_{i_1, i_2} \sum_{j_1, j_2} \sum_{c_1, c_2}
156
+ \tensor{A}_{f_1, c_1, i_1, j_1} \tensor{B}_{f_2, c_2, i_2, j_2} \\
157
+ \tensor{X}_{g(c_1,c_2), g(i_1, i_2)+x, g(j_1, j_2)+y},
158
+ % \label{eq:scalar-form-conv-kpd2}
159
+ \end{multline*}$$ where indices $i_1, j_1, c_1$ enumerate over elements in tensor $\tensor{a}$ and $i_2, j_2, c_2$ enumerate over elements in tensor $\tensor{b}$. Finally, we can separate the convolution operation into two steps by exchanging the order of summation as follows: $$\begin{multline}
160
+ \tensor{y}_{f,x,y} = \sum_{i_1, j_1, c_1} \tensor{A}_{f_1, c_1, i_1, j_1} \\
161
+ \sum_{i_2, j_2, c_2} \tensor{B}_{f_2, c_2, i_2, j_2} \tensor{X}_{g(c_1,c_2),g(i_1, i_2)+x,g(j_1, j_2)+y}.
162
+ \label{eq:scalar-form-conv-kpd3}
163
+ \end{multline}$$ The inner summation in [\[eq:scalar-form-conv-kpd3\]](#eq:scalar-form-conv-kpd3){reference-type="eqref" reference="eq:scalar-form-conv-kpd3"} corresponds to a 3D convolution and the outer summation corresponds to *multiple* 2D convolutions, as visualized in Fig. [3](#fig:kronecker-convolution){reference-type="ref" reference="fig:kronecker-convolution"} for the special case of $F=1$.
164
+
165
+ ::: algorithm
166
+ $\tensor{x}' \gets \text{Unsqueeze}(\tensor{x}) \in {\rm I\!R}^{1 \times C \times W \times H}$ $\tensor{y}' \gets \text{Conv3d}(\tensor{b}, \tensor{x}') \in {\rm I\!R}^{F_2 \times C_1 \times W \times H}$
167
+
168
+ $\tensor{y}'' \gets \text{BatchConv2d}(\tensor{a}, \tensor{y}') \in {\rm I\!R}^{F_2 \times F_1 \times W \times H}$
169
+
170
+ $\tensor{y} \gets \text{Reshape}(\tensor{y}'') \in {\rm I\!R}^{F_1 F_2 \times W \times H}$
171
+ :::
172
+
173
+ Overall, [\[eq:scalar-form-conv-kpd3\]](#eq:scalar-form-conv-kpd3){reference-type="eqref" reference="eq:scalar-form-conv-kpd3"} can be carried out efficiently in tensor form using Algorithm [\[alg:fw_pass\]](#alg:fw_pass){reference-type="ref" reference="alg:fw_pass"}. Effectively, the input is collapsed in two stages instead of one as in the multidimensional convolution operation. Convolving a multi-channel input with a single filter in $\tensor{w}$ yields a scalar value at a particular output location. This is done by first scaling all elements in the corresponding multidimensional patch, then collapsing it by means of summation. Since tensor $\tensor{w}$ is comprised of multidimensional patches $\tensor{b}$ scaled by elements in $\tensor{a}$, we can equivalently collapse each *sub-patch* in the input using tensor $\tensor{b}$ followed by a subsequent collapsing using tensor $\tensor{a}$ to obtain the same scalar value.
174
+
175
+ The GKPD of a convolution layer is not unique. Different configurations of Kronecker factors will lead to different reductions in memory and number of operations. Namely, for a KroneckerConvolution layer using $\widehat{R}$ Kronecker products with factors $\tensor{a} \in {\rm I\!R}^{F_1 \times C_1 \times K_{w1} \times K_{h1}}$ and $\tensor{b} \in {\rm I\!R}^{F_2 \times C_2 \times K_{w2} \times K_{h2}}$ the memory reduction is
176
+
177
+ $$\begin{equation}
178
+ \frac{ F_1 C_1 K_{w1} K_{h1} F_2 C_2 K_{w2} K_{h2}}{\widehat{R}(F_1 C_1 K_{w1} K_{h1} + F_2 C_2 K_{w2} K_{h2})},
179
+ \end{equation}$$ whereas the reduction in FLOPs is $$\begin{equation}
180
+ \frac{F_1 C_1 K_{w1} K_{h1} F_2 C_2 K_{w2} K_{h2}}{\widehat{R}(F_2 \cdot F_1 C_1 K_{w1} K_{h1} + C_1 \cdot F_2 C_2 K_{w2} K_{h2})}.
181
+ \end{equation}$$ For the special case of using separable $3\times3$ filters, and $\widehat{R} = 1$ the reduction in FLOPs becomes $$\begin{equation}
182
+ \frac{3F_1 C_2}{F_1 + C_2},
183
+ \end{equation}$$ implying that $F_1$ and $C_2$ should be sufficiently large in order to obtain a reduction in FLOPs. In contrast, memory reduction is unconditional in the KroneckerConvolution layer.
184
+
185
+ Universal approximation applied to shallow networks have been around for a long time [@Hornik_UniversalMLP_1991],[@Ripley_NNBook_1996 pp 173--180] whilst such studies for deep networks are more recent [@Zhou_UniversalDeepCNN_2020]. In this section, we build off of these foundations to show that neural networks with weight tensors represented using low Kronecker rank summations of Kronecker products, remain universal approximators. For brevity, we refer to such networks as "Kronecker networks".
186
+
187
+ First, we show that a shallow Kronecker network is a universal approximator. For simplicity, this is shown only for one output. Then, we can generalize the resulting approximator via treating each output dimension separately.
188
+
189
+ Consider a single layer neural network constructed using $n$ hidden units and an $L$-Lipschitz activation function $a(\cdot)$ $$\begin{align*}
190
+ %\setlength{\abovedisplayskip}{0pt}
191
+ %\setlength{\belowdisplayskip}{0pt}
192
+ \hat{f}_{\mat{w}}(x) & \triangleq \vec{w}_2 ^ \top a\left(\mat{W}\vec{x}\right) = \sum_{j=1}^n w_{2j} a\left(\mathbf w_{1j}^\top \mathbf x+ w_{0j}\right),
193
+ \end{align*}$$ that is defined on a compacta $K$ in ${\rm I\!R}^d$. As shown in [@Hornik_UniversalMLP_1991], such a network serves as a universal approximator, i.e., for a given positive number $\epsilon$ there exists an $n$ such that $$\begin{equation}
194
+ % \setlength{\abovedisplayskip}{0pt}
195
+ %\setlength{\belowdisplayskip}{0pt}
196
+ \left\lVert f - \hat{f}_{\mat{W}}\right\rVert^2 _{2, \mu} \triangleq \int_K \left | f(\mathbf x) - \hat{f}_{\mat{w}}(\mathbf x) \right|^2 d\mu \\
197
+ \leq \epsilon.
198
+ \end{equation}$$ Similarly, a shallow Kronecker network consisting of $n$ hidden units $$\begin{align}
199
+ % \setlength{\abovedisplayskip}{0pt}
200
+ % \setlength{\belowdisplayskip}{0pt}
201
+ \hat{f}_{\mat{w}_{\widehat{R}}}(x) & \triangleq \vec{w}_2 ^ \top a\left(\mat{W}_{\widehat{R}}\vec{x}\right), \;
202
+ \mat{w}_{\widehat{R}} &= \sum_{r=1}^{\widehat{R}} \mat{a}_r \otimes \mat{b}_r, \label{eq:shallow-kronecker-network}
203
+ \end{align}$$ is comprised of a weight matrix $\mat{w}_{\widehat{R}}$ made of a summation of Kronecker products between factors $\mat{a}_r \in {\rm I\!R}^{a_1 \times a_2}$ and $\mat{b}_r \in {\rm I\!R}^{b_1 \times b_2}$. From [\[eq:shallow-kronecker-network\]](#eq:shallow-kronecker-network){reference-type="eqref" reference="eq:shallow-kronecker-network"}, we can see that any shallow neural network with $n$ hidden units can be represented exactly using a Kronecker network with a full Kronecker rank $R = \min(a_1 a_2, b_1 b_2)$. Thus, shallow Kronecker networks with full Kronecker rank also serve as universal approximators. In Theorem [\[theo:shallow\]](#theo:shallow){reference-type="ref" reference="theo:shallow"} we show that a similar result holds for shallow Kronecker networks $f_{\mat{w}_{\widehat{R}}}$, with low Kronecker ranks $\widehat{R} < R$, provided that the $R-\widehat{R}$ smallest singular values of the reshaped matrix $R_w(\mat{W})$ of the approximating neural network $\hat{f}_{\mat{W}}$ are small enough.
204
+
205
+ ::: restatable
206
+ theoshallownet []{#theo:shallow label="theo:shallow"} Any shallow Kronecker network with a low Kronecker rank $\widehat{R}$ and $n$ hidden units defined on a compacta $K\subset {\rm I\!R}^d$ with $L$-Lipschitz activation is dense in the class of continuous functions $C(K)$ for a large enough $n$ given $$\sum\limits_{r=\hat R+1}^R \sigma^2_{r} < \epsilon (L \left\lVert K\right\rVert^2 \left\lVert\mathbf w_2\right\rVert^2 )^{-1},$$ where $\sigma_r$ is the $r^\text{th}$ singular value of the reshaped version of the weight matrix $R_w(\mat{W})$, in an approximating neural network $\hat{f}_{\mat{W}}$ with $n$ hidden units satisfying $\|f - \hat{f}_{\mat{W}}\|_{2,\mu}^2 < \epsilon$, for $f \in C(K)$.
207
+ :::
208
+
209
+ ::: proof
210
+ *Proof.* See Supplementary Material. ◻
211
+ :::
212
+
213
+ In Theorem [\[theo:deep\]](#theo:deep){reference-type="ref" reference="theo:deep"}, we extend the preceding result to deep convolutional neural networks, where each convolution tensor is represented using a summation of Kronecker products between factor tensors.
214
+
215
+ ::: restatable
216
+ theodeepconv []{#theo:deep label="theo:deep"} Any deep Kronecker convolution network with Kronecker rank $\hat R_j$ in layer $j$ on compacta $K\subset {\rm I\!R}^d$ with $L$-Lipschitz activation, is dense in the class of continuous functions $C(K)$ for a large enough number of layers $J$, given $$\prod_{j=1}^J
217
+ \left(\sum_{r=\hat R_j+1}^{R_j}\sigma^2_{r, j}\right) < \epsilon(L^J\left\lVert\mathbf w_2\right\rVert^2 \left\lVert K\right\rVert^2 )^{-1},$$ where $\sigma_{r, j}$ is the $r^\text{th}$ singular value of the matrix $R_w(\tensor{w}^{j})$ of the reshaped weight tensor in the $j^\text{th}$ layer of an approximating convolutional neural network.
218
+ :::
219
+
220
+ ::: proof
221
+ *Proof.* See Supplementary Material. ◻
222
+ :::
223
+
224
+ The result is achieved by extending the recent universal approximation bound [@Zhou_UniversalDeepCNN_2020] for the GKPD networks. One can derive the convergence rates using [@Zhou_UniversalDeepCNN_2020 Theorem 2] as well. These results assure that the performance degradation of Kronecker networks is small, in comparison to uncompressed networks, for an appropriate choice of Kronecker rank $\widehat{R}$.
225
+
226
+ As GKPD provides us with a set of possible decompositions for each layer in a network, a selection strategy is needed. For a given compression rate, there is a trade-off between using a larger number of terms $\widehat{R}$ in the GKPD summation [\[eq:nkp-tensor-problem\]](#eq:nkp-tensor-problem){reference-type="eqref" reference="eq:nkp-tensor-problem"} together with a more compressed configuration and a smaller $\widehat{R}$ with a less compressed configuration. To guide our search, we select the decomposition that best approximates the original uncompressed tensor obtained from a pretrained network. This means different layers in a network will be approximated by a different number of Kronecker products. Before searching for the best decomposition, we limit our search space to configurations that satisfy a desired reduction in FLOPs. Unless otherwise stated all GKPD experiments use this approach.
2110.13947/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2110.13947/paper_text/intro_method.md ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ A multi-agent trajectory forecasting system aims to predict future trajectories of multiple agents based on their observed trajectories and surroundings [@DBLP:journals/thms/StahlDJ14; @2016Supporting]. Precise trajectory prediction provides essential information for decision making and safety in numerous intelligent systems, including autonomous vehicles [@liang2020learning; @Gao_2020_CVPR; @Ye_2021_CVPR; @gilles2021home], drones [@DBLP:conf/icc/XiaoZHY19], and industrial robotics [@DBLP:conf/icml/JetchevT09; @DBLP:conf/aimech/RosmannO0B17].
4
+
5
+ The rapid development of deep learning has enabled a number of deep-learning-based algorithms to handle multi-agent trajectory forecasting [@liang2020learning; @Gao_2020_CVPR; @Ye_2021_CVPR; @gilles2021home; @Zhao_2019_CVPR; @Choi_2019_ICCV; @{salzmann2020trajectron++}; @zeng2021lanercnn; @li2020evolvegraph; @DBLP:conf/nips/KosarajuSM0RS19]. These methods exhibit state-of-the-art performances, with some having been integrated into real-world systems. However, deep-learning-based forecasting is not always reliable or interpretable [@Gal2016Uncertainty; @NIPS2017_2650d608; @zhao2020uncertainty]. In circumstances when noises from the environment are overwhelmingly distracting, or when the situation has never been encountered before, a deep-learning-based algorithm could provide baffling predictions, which might cause terrible tragedies. A fundamental challenge is to know when we could rely on those deep-learning-based forecasting algorithms. To tackle this problem, one solution is to report the uncertainty of each prediction. Finding ways to best conceptualize and measure the prediction uncertainty of deep-learning-based algorithms becomes an imperative, which motivates this work.
6
+
7
+ <figure id="example_out" data-latex-placement="t">
8
+ <div class="center">
9
+ <embed src="pipeline_full_larger.pdf" />
10
+ </div>
11
+ <figcaption><strong>Uncertainty modeling in multi-agent trajectory forecasting.</strong> (a) a typical pipeline of an encoder in multi-agent trajectory forecasting systems. (b) and (c) illustrate the decoder pipeline of previous methods and our method respectively. Previous methods output the predicted trajectory <span class="math inline"><em>Ŷ</em></span> and individual uncertainty <span class="math inline"><em>σ</em><sub><em>i</em></sub></span>, and our method additionally outputs <em>collaborative uncertainty</em> <span class="math inline"><em>σ</em><sub><em>i</em><em>j</em></sub></span>.</figcaption>
12
+ </figure>
13
+
14
+ There are two main types of uncertainty to model in deep-learning-based algorithms [@der2009aleatory]: 1) aleatoric uncertainty, regarding information aside from statistic models, which data cannot explain; 2) epistemic uncertainty, the uncertainty inside a model, when the model lacks knowledge of the system/process being modeled (e.g., due to limited training data). As it is most effective to model aleatoric uncertainty in big data regimes such as those common to deep learning with image data [@NIPS2017_2650d608], this work focuses on aleatoric uncertainty. In the following passage, we use the term "uncertainty" to represent aleatoric uncertainty. [@Gal2016Uncertainty] uses the predictive variance to approximate uncertainty in the Bayesian deep learning model, which has been widely adapted in many works [@DBLP:conf/cvpr/BhattacharyyaFS18; @DBLP:conf/corl/Jain0LXFSU19; @DBLP:conf/cvpr/HongSP19; @Choi_2019_ICCV] for uncertainty modeling in multi-agent trajectory forecasting. However, the predictive variance of a single agent alone may not suffice to reflect the complete landscape of uncertainty, especially when agent-wise interaction is present. Recent works that attempt to exploit the interaction among agents have impressively boosted the prediction precision, which further highlights the need to better measure uncertainty in multi-agent trajectory forecasting. We seek to build a more sophisticated and robust measurement for capturing the previously neglected uncertainty brought by correlated predictions.
15
+
16
+ In this paper, we coin a concept *individual uncertainty* (IU) to describe the uncertainty that can be approximated by the predictive variance of a single agent. Relatively, we propose a new concept, *collaborative uncertainty* (CU) to estimate the uncertainty resulting from the usage of interaction modules in prediction models. We further introduce an original probabilistic CU-based framework to measure both individual and collaborative uncertainty in the multi-agent trajectory forecasting task. We apply this framework to two special cases: multivariate Gaussian distribution and multivariate Laplace distribution. In each case, our CU-based framework allows our model to simultaneously learn the mappings that are from input data to 1) accurate prediction, 2) individual uncertainty, and 3) collaborative uncertainty; see Figure [1](#example_out){reference-type="ref" reference="example_out"} for model illustration. Extensive experiments demonstrate that CU modeling yields significantly larger performance gains in prediction models equipped with interaction modules (See Figure [3](#compare_a2a){reference-type="ref" reference="compare_a2a"}), confirming that CU is highly related to the existence of the interaction modeling procedure, and adding CU modeling benefits accurate predictions.
17
+
18
+ The contributions of this work are summarized as follows:
19
+
20
+ $\bullet$ We propose, analyze, and visualize a novel concept, *collaborative uncertainty* (CU), to model the uncertainty brought by the interaction modules in multi-agent trajectory forecasting.
21
+
22
+ $\bullet$ We design a general CU-based framework to empower a prediction model to generate a probabilistic output, where the mean is the future trajectory and the covariance reflects the corresponding uncertainty. Under this framework, we show two special cases based on multivariate Gaussian and Laplace distributions respectively.
23
+
24
+ $\bullet$ We conduct extensive experiments to validate the CU-empowered prediction model on both synthetic datasets and two large-scale real-world datasets. On self-generated synthetic datasets, we validate the proposed method is able to closely reconstruct the ground-truth distribution. On the public benchmarks, the CU-empowered prediction model consistently outperforms the corresponding one without CU. Specially, by leveraging the proposed CU, VectorNet improves by $57$ cm regarding Final Displacement Error (FDE) on nuScenes dataset!
25
+
26
+ # Method
27
+
28
+ Consider $m$ agents in a data sample, and let $\mathop{\mathrm{X}}\!=\!\{\!\mathbf{x}_{1}\!,\!\mathbf{x}_{2}\!,...,\!\mathbf{x}_{m}\!\}$, $\mathop{\mathrm{Y}}\!=\!\{\!\mathbf{y}_{1}\!,\!\mathbf{y}_{2}\!,\!...\!,\!\mathbf{y}_{m}\!\}$ be the past observed and the future trajectories of all agents, where $\mathbf{x}_{i}\!\in\!\mathbb{R}^{2T_{-}}$ and $\mathbf{y}_{i}\!\in\!\mathbb{R}^{2T_{+}}$ are the past observed and the future trajectories of the $i$-th agent. Each $\mathbf{x}_i/\mathbf{y}_i$ consists of two-dimensional coordinates at different timestamps of $T_{\!-}/T_{\!+}$. We assume that a training dataset $\mathcal{D}$ consists of $N$ individual and identically distributed data samples $\{(\!\mathop{\mathrm{X}}^{i}\!,\mathop{\mathrm{Y}}^{i}\!)\}_{i=1}^{N}$. For predicting future trajectories of multiple agents and modeling the uncertainty over the predictions, we seek to use a probabilistic framework to model the predictive distribution $p(\!\mathop{\mathrm{Y}}|\mathop{\mathrm{X}}\!)$ of multiple agents' future trajectories based on the training dataset $\mathcal{D}$.
29
+
30
+ <figure data-latex-placement="h">
31
+ <div class="center">
32
+
33
+ </div>
34
+ <figcaption><strong>Graphical model for deep learning networks in the three-agent trajectory forecasting setting</strong>: (a) represents the model that predicts the trajectory of each agent independently; (b) shows the model that explicitly captures the interaction among multiple agents. <span class="math inline"><strong>x</strong><sub><em>i</em></sub></span> is the observed trajectory of the <span class="math inline"><em>i</em></span>-th agent; <span class="math inline"><strong>h</strong><sub><em>i</em></sub></span> and <span class="math inline"><strong>y</strong><sub><em>i</em></sub></span> are its corresponding hidden feature and future trajectory respectively.</figcaption>
35
+ </figure>
36
+
37
+ Previous works in uncertainty modeling [@Gal2016Uncertainty; @NIPS2017_2650d608; @pmlr-v48-gal16] use Gaussian distribution to approximate $p(\!\mathop{\mathrm{Y}}|\mathop{\mathrm{X}}\!)$. The assumption behind this approach is that $p(\!\mathbf{y}_{i}|\mathbf{x}_{i}\!)$ is independent for every $i\in\{1,2,3,...,m\}$. Mathematically, they set the covariance matrix of $p(\!\mathop{\mathrm{Y}}|\mathop{\mathrm{X}}\!)$ as a diagonal matrix. This assumption is valid for the regression task that uses the model shown in Figure [\[fig:Individual\]](#fig:Individual){reference-type="ref" reference="fig:Individual"}. We refer the uncertainty under the independence assumption as *individual uncertainty* in this paper. However, Figure [\[fig:Collaborative\]](#fig:Collaborative){reference-type="ref" reference="fig:Collaborative"} considers a prediction model that includes interaction modeling among multiple agents: $\mathbf{y}_{i}$ is no longer dependent solely on $\mathbf{x}_{i}$, but also on other agents $\mathbf{x}_{j}$ where $j\!\neq\!i$ in the scene. We call the uncertainty brought by this interaction *collaborative uncertainty*. The existence of collaborative uncertainty turns $p(\!\mathop{\mathrm{Y}}|\mathop{\mathrm{X}}\!)$ from the individual distribution into the joint distribution of multiple agents.
38
+
39
+ Contrary to existing methods, we consider collaborative uncertainty and model $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}})$ more accurately by making the covariance matrix a full matrix without imposing any restrictions on its form. In the following subsection, we will introduce an approach to modeling both individual uncertainty and collaborative uncertainty using a unified CU-based framework.
40
+
41
+ <figure id="framework" data-latex-placement="ht">
42
+ <div class="center">
43
+ <embed src="framework_shorter.pdf" />
44
+ </div>
45
+ <figcaption><strong>Proposed uncertainty estimation framework.</strong> The encoder may contain a module that exploits agent-wise interaction. Decoders output the mean <span class="math inline"><em>μ</em><sub><strong>w</strong></sub>(X )</span>, covariance <span class="math inline"><em>Σ</em><sub><strong>w</strong></sub>(X )</span> containing individual and collaborative uncertainty, and auxiliary parameters <span class="math inline"><em>Φ</em><sub><strong>w</strong></sub>(X )</span>. The outputs formulate the training loss with the ground truth <span class="math inline">Y </span>. <span class="math inline"><em>Φ</em><sub><strong>w</strong></sub>(⋅)</span> is only used in Laplace collaborative uncertainty.</figcaption>
46
+ </figure>
47
+
48
+ In this work, to model collaborative uncertainty, we abandon the independence assumption held by previous works [@Gal2016Uncertainty; @NIPS2017_2650d608; @pmlr-v48-gal16], setting $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}})$ as a joint multivariate distribution, whose mean is $\mu \in \mathbb{R}^{m \times 2T_{+}}$ and covariance $\Sigma\in \mathbb{R}^{m\times m\times2T_{+}}$. Element $\mu_{i,t}$ is the expected position of the $i$-th agent at timestamp $t$. As the diagonal elements of $\Sigma$ are considered individual uncertainty [@Gal2016Uncertainty; @NIPS2017_2650d608; @pmlr-v48-gal16], we further let *off-diagonal* elements describe collaborative uncertainty. Diagonal element $\Sigma_{i,i,t}$ models the variance of the $i$-th agent at timestamp $t$; off-diagonal element $\Sigma_{i,j,t}$ models the covariance between the $i$-th and $j$-th agents at timestamp $t$. Therefore, we can simultaneously obtain individual and collaborative uncertainty by estimating the covariance $\Sigma$ of $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}})$. Accordingly, we propose a CU-based comprehensive uncertainty estimation framework (see Figure [2](#framework){reference-type="ref" reference="framework"}) with the following steps:
49
+
50
+ **Step 1:** *Choose a probability density function,* $p(\!\mathop{\mathrm{Y}}|\mathop{\mathrm{X}}; \mu, \Sigma, \Phi\!)$, for the predictive distribution, which includes a mean $\mu\!\in\!\mathbb{R}^{\!m\!\times\!2T_{+}}$ used to approximate the future trajectories, a covariance $\Sigma\!\in\!\mathbb{R}^{\!m\times\!m\times\!2T_{+}}$ used to quantify individual uncertainty and collaborative uncertainty, and some auxiliary parameters $\Phi$ used to describe the predictive distribution. Further, we set covariance matrix $\Sigma_{t}$, which represents the covariance matrix at timestamp $t$, as a full matrix instead of an identity or diagonal matrix.
51
+
52
+ **Step 2:** *Design a prediction model,* $\mathcal{F}[\mu_{\boldsymbol{w}}(\mathop{\mathrm{X}}),\Sigma_{\boldsymbol{w}}(\mathop{\mathrm{X}}),\Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}})]$, where $\mu_{\boldsymbol{w}}(\mathop{\mathrm{X}})$, $\Sigma_{\boldsymbol{w}}(\mathop{\mathrm{X}})$ and $\Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}})$ are three neural networks, which approximate values of mean $\mu$, covariance $\Sigma$ and auxiliary parameters $\Phi$ respectively. Note that $\boldsymbol{w}$ only indicates the parameters of these neural networks are trainable, and does not mean they share same parameters.
53
+
54
+ **Step 3:** *Derive a loss function* from $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}};\mu,\Sigma,\Phi)$ via maximum likelihood estimation: $\mathcal{L}(\boldsymbol{w})=-\sum\limits_{i=1}^{N}\log p(\mathop{\mathrm{Y}}^{i}|\mathop{\mathrm{X}}^{i};\mu_{\boldsymbol{w}}(\mathop{\mathrm{X}}^{i}),\Sigma_{\boldsymbol{w}}(\mathop{\mathrm{X}}^{i}),\Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}}^{i}))$ minimized to update trainable parameters in $\mu_{\boldsymbol{w}}(\cdot)$, $\Sigma_{\boldsymbol{w}}(\cdot)$ and $\Phi_{\boldsymbol{w}}(\cdot)$.
55
+
56
+ :::::: table*
57
+ ::::: center
58
+ :::: footnotesize
59
+ ::: sc
60
+ +----------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
61
+ | Assumption | Loss Function of Two Special Cases |
62
+ +:======================================:+:==========================================================================================================================================================:+:======================================================================================================================================:+
63
+ | 2-3 | Gaussian Distribution | Laplace Distribution |
64
+ +----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+
65
+ | DIA$: | $\!\frac{1}{2}\! \sum\limits_{i=1}^{m}\![\sigma_{ii}^{-2}||\!\mathbf{y}_i\!-\!\mu_{\boldsymbol{w}}(\!\mathbf{x}_i\!)\!||_2^{2}\!+\!\log\!\sigma_{ii}^{2}]$ | $\!\sum\limits_{i=1}^{m}[\sigma_{ii}^{-2}||\!\mathbf{y}_i\!-\!\mu_{\boldsymbol{w}}(\!\mathbf{x}_i\!)\!||_1\!+\!\log\!\sigma_{ii}^{2}]$ |
66
+ | \begin{pmatrix} | | |
67
+ | \sigma_{11} \!& \!0\!&\!\cdots\!&\!0\\ | | |
68
+ | 0\!&\!\sigma_{22}\!&\!\cdots\!&\!0\\ | | |
69
+ | \vdots\!&\!\vdots\!&\!&\!\vdots \\ | | |
70
+ | 0\!&\!0\!&\!\cdots\!&\!\sigma_{mm} \\ | | |
71
+ | \end{pmatrix}$ | | |
72
+ +----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+
73
+ | | | |
74
+ +----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+
75
+ :::
76
+ ::::
77
+ :::::
78
+ ::::::
79
+
80
+ In multi-agent trajectory forecasting, based on Laplace and Gaussian distributions, the $\ell_1$- and $\ell_2$-based loss functions are commonly adopted to train prediction models [@liang2020learning; @Gao_2020_CVPR; @DBLP:conf/nips/KosarajuSM0RS19; @mangalam2020journey]. Here we apply the probabilistic framework proposed in Section [3.2](#3.2){reference-type="ref" reference="3.2"} to model the individual and collaborative uncertainty based on multivariate Gaussian distribution and multivariate Laplace distribution respectively, which leads to two novel loss functions. Mathematically, the essential difference between our proposed loss functions and previous loss functions derived from Gaussian distribution and Laplace distribution for modeling individual uncertainty is that they have different assumptions about the covariance matrix; see a summary in Table [\[com-table\]](#com-table){reference-type="ref" reference="com-table"}. We regard the covariance as a full matrix.
81
+
82
+ We start by the multivariate Gaussian distribution, as it has a simpler probability density function than the multivariate Laplace distribution.
83
+
84
+ **Probability density function.** We follow the framework proposed in Section [3.2](#3.2){reference-type="ref" reference="3.2"} and choose the probability density function as the multivariate Gaussian distribution: $$\begin{equation}
85
+ \setlength{\abovedisplayskip}{4pt}
86
+ \setlength{\belowdisplayskip}{4pt}
87
+ \begin{small}
88
+ p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}};\mu,\Sigma,\Phi)=(2\pi)^{-\frac{m}{2}}\cdot {\rm det} [\Sigma]^{-\frac{1}{2}}\cdot
89
+ e^{ -\frac{1}{2}(\mathop{\mathrm{Y}}- \mu)\Sigma^{-1}(\mathop{\mathrm{Y}}- \mu)^{T}},
90
+ \label{g_pdf}
91
+ \end{small}
92
+ \end{equation}$$ where ${\rm det} [\Sigma]$ represents the determinant of covariance $\Sigma$.
93
+
94
+ **Model design.** Based on ([\[g_pdf\]](#g_pdf){reference-type="ref" reference="g_pdf"}), we can approximate the value of mean $\mu$ via a neural network $\mu_{\boldsymbol{w}}(\cdot)$. When using the same way to approximate the value of covariance $\Sigma$, however, we face two challenges: 1) each covariance matrix $\Sigma_t$ in covariance $\Sigma$ needs to be inverted, which could lead to numerical instability; 2) it is computationally expensive and numerically unstable to compute the determinant of each covariance matrix $\Sigma_t$ in covariance $\Sigma$ directly given a large amount of trainable parameters.
95
+
96
+ For the first challenge, we use a neural network $\Sigma^{-1}_{\!\boldsymbol{w}}(\cdot)$ to directly approximate the inverse of covariance $\Sigma$. For the second challenge, similar to [@8578672] and [@DBLP:conf/cvpr/GundavarapuSMSJ19], we apply the square-root-free Cholesky decomposition to each $\Sigma^{-1}_{t_{\boldsymbol{w}}}$ in $\Sigma^{-1}_{\!\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)$: $\Sigma^{-1}_{\!\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)\!=\!L_{\!\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)D_{\!\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!) L^{T}_{\!\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)$,
97
+
98
+ where $L_{\!\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)$ is a lower unit triangular matrix and $D_{\!\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)$ is a diagonal matrix. Then, the determinant of the inverse of covariance $\Sigma^{-1}$ is obtained by $\prod\limits_{j=1}^m\!d_{jj}$, where $d_{jj}$ is the $j$-th diagonal element in $D_{\!\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)$. We can thus get the parameterized form of ($\ref{g_pdf}$) as: $p(\!\mathop{\mathrm{Y}}\!|\!\mathop{\mathrm{X}}\!;\!\boldsymbol{w}\!)\!=\!(\!2\pi\!)^{-\!\frac{m}{2}}(\!\prod\limits_{j=1}^m\!d_{jj})^{\frac{1}{2}}\!e^{-\!\frac{q_{\boldsymbol{w}}(\!\mathop{\mathrm{Y}}\!,\!\mathop{\mathrm{X}}\!)}{2}},$ where $q_{\boldsymbol{w}}(\!\mathop{\mathrm{Y}},\!\mathop{\mathrm{X}}\!)\!=\![\!\mathop{\mathrm{Y}}\!-\!\mu_{\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)\!]\Sigma^{-1}_{\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)[\!\mathop{\mathrm{Y}}\!-\!\mu_{\boldsymbol{w}}(\!\mathop{\mathrm{X}}\!)\!]^{T}$.
99
+
100
+ As there are no auxiliary parameters in the parameterized form of ($\ref{g_pdf}$), we can get the prediction model $\mathcal{F}[\mu_{\boldsymbol{w}}(\mathop{\mathrm{X}}),\Sigma^{-1}_{\boldsymbol{w}}(\mathop{\mathrm{X}})]$, whose framework is illustrated in Figure [2](#framework){reference-type="ref" reference="framework"}. Once $\Sigma^{-1}_{\boldsymbol{w}}(\mathop{\mathrm{X}})$ is fixed and given, individual and collaborative uncertainty are computed through the inversion.
101
+
102
+ **Loss function.** According to the square-root-free Cholesky decomposition and the parameterized form of ($\ref{g_pdf}$), the Gaussian collaborative uncertainty loss function is then: $$\begin{equation}
103
+ \setlength{\abovedisplayskip}{4pt}
104
+ \setlength{\belowdisplayskip}{4pt}
105
+ \begin{small}
106
+ \mathcal{L}_{\rm Gau-cu}(\boldsymbol{w})=\frac{1}{2} \frac{1}{N} \sum_{i=1}^{N}[q_{\boldsymbol{w}}(\mathop{\mathrm{Y}}^i,\mathop{\mathrm{X}}^i)-\sum_{j=1}^{m} \log (d^{i}_{jj})].
107
+ \label{loss_gau}
108
+ \end{small}
109
+ \end{equation}$$ We update the trainable parameters in $\mu_{\boldsymbol{w}}(\cdot)$ and $\Sigma^{-1}_{\boldsymbol{w}}(\cdot)$ through minimizing ([\[loss_gau\]](#loss_gau){reference-type="ref" reference="loss_gau"}). Note that $q_{\boldsymbol{w}}(\cdot,\cdot)$ is related to $\mu_{\boldsymbol{w}}(\cdot)$ and $\Sigma^{-1}_{\boldsymbol{w}}(\cdot)$, and $d^{i}_{jj}$ is related to $\Sigma^{-1}_{\boldsymbol{w}}(\cdot)$.
110
+
111
+ In multi-agent trajectory forecasting, previous methods [@liang2020learning; @Gao_2020_CVPR; @DBLP:conf/nips/KosarajuSM0RS19] have found that the $\ell_1$-based loss function derived from Laplace distribution usually leads to better prediction performances than the $\ell_2$-based loss function from Gaussian distribution, because the former is more robust to outliers. It is thus important to consider multivariate Laplace distribution.
112
+
113
+ **Probability density function.** We follow the framework proposed in Section [3.2](#3.2){reference-type="ref" reference="3.2"} and choose the probability density function as the multivariate Laplace distribution: $$\begin{equation}
114
+ \setlength{\abovedisplayskip}{4pt}
115
+ \setlength{\belowdisplayskip}{4pt}
116
+ \begin{small}
117
+ p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}};\mu,\Sigma,\Phi) = \frac{2{\rm det} [\Sigma]^{-\frac{1}{2}} }{(2\pi)^{\frac{m}{2}}\lambda}\cdot\frac{K_{(\frac{m}{2}-1)}(\sqrt{\frac{2}{\lambda}(\mathop{\mathrm{Y}}\!-\!\mu)\Sigma^{-1}(\mathop{\mathrm{Y}}\!-\!\mu)^{T}})}{(\sqrt{\frac{\lambda}{2}(\mathop{\mathrm{Y}}\!-\!\mu)\Sigma^{-1}(\mathop{\mathrm{Y}}\!-\!\mu)^{T}})^{\frac{m}{2}}},
118
+ \label{la_pdf}
119
+ \end{small}
120
+ \end{equation}$$ where ${\rm det} [\!\Sigma\!]$ denotes the determinant of covariance $\Sigma$, and $K_{\!(\frac{m}{2}-1)\!}(\cdot)$ denotes the modified Bessel function of the second kind with order $(\!\frac{m}{2}-1\!)$.
121
+
122
+ **Model design.** Similar to Section [3.3.1](#g_cu){reference-type="ref" reference="g_cu"}, we employ two neural networks $\mu_{\boldsymbol{w}}(\cdot)$ and $\Sigma^{-1}_{\boldsymbol{w}}(\cdot)$ to approximate the values of $\mu$ and $\Sigma^{-1}$ respectively, and represent $\Sigma^{-1}_{\boldsymbol{w}}(\mathop{\mathrm{X}})$ via its square-root-free Cholesky decomposition we used in the Gaussian collaborative uncertainty. Since the modified Bessel function is intractable for a neural network to work with, different from Section [3.3.1](#g_cu){reference-type="ref" reference="g_cu"}, we should simplify ([\[la_pdf\]](#la_pdf){reference-type="ref" reference="la_pdf"}).
123
+
124
+ Inspired by [@1618702], we simplify ([\[la_pdf\]](#la_pdf){reference-type="ref" reference="la_pdf"}) by utilizing the multivariate Gaussian distribution to approximate the multivariate Laplace Distribution. We reformulate a multivariate Laplace distribution by introducing auxiliary variables. Let $\mathbf{z}\in\mathbb{R}^{+}$ be a random variable with the probability density function: $p(\mathbf{z}|\mathop{\mathrm{X}};\boldsymbol{w}) = \frac{1}{\lambda}e^{-\frac{\mathbf{z}}{\lambda}}$, then we can get: $p(\!\mathop{\mathrm{Y}}|\mathbf{z}\!,\mathop{\mathrm{X}};\boldsymbol{w}\!)\!=\!\frac{{\rm det}\![\!\Sigma^{-1}_{\boldsymbol{w}}\!(\mathop{\mathrm{X}}\!)\!]^{\frac{1}{2}}}{(2\pi\mathbf{z})^{\frac{m}{2}}}e^{-\!\frac{q_{\boldsymbol{w}}(\mathop{\mathrm{Y}},\mathop{\mathrm{X}})}{2\mathbf{z}}}$, where $q_{\boldsymbol{w}}(\mathop{\mathrm{Y}},\mathop{\mathrm{X}})=[\mathop{\mathrm{Y}}-\mu_{\boldsymbol{w}}(\mathop{\mathrm{X}})]\Sigma^{-1}_{\boldsymbol{w}}(\mathop{\mathrm{X}})[\mathop{\mathrm{Y}}-\mu_{\boldsymbol{w}}(\mathop{\mathrm{X}})]^{T}$. Further, if the value of $\mathbf{z}$ is given, $p(\mathop{\mathrm{Y}}|\mathbf{z},\mathop{\mathrm{X}};\boldsymbol{w})$ is a multivariate Gaussian distribution. In this work, instead of drawing a value for $\mathbf{z}$ from the exponential distribution, we use a neural network $\Phi_{\boldsymbol{w}}(\cdot)$ to directly output a value for $\mathbf{z}$. The intuition is that, in the training process of the prediction model, the value of $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}};\boldsymbol{w})$ is the conditional expectation of $\mathbf{z}$ given $\mathop{\mathrm{X}}$ and $\mathop{\mathrm{Y}}$, which makes $p(\mathop{\mathrm{Y}}|\mathbf{z},\mathop{\mathrm{X}};w)$ a function of $\mathbf{z}$ whose domain is $\mathbb{R}^{+}$. Thus, there should exist an appropriate $\mathbf{z}^{*}\in\mathbb{R}^{+}$ to make: $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}};\boldsymbol{w})=p(\mathop{\mathrm{Y}}|\mathbf{z}^{*},\mathop{\mathrm{X}};\boldsymbol{w})$ (see proof in the appendix). To find such a $\mathbf{z}^{*}$, we use $\Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}})$, which can employ its learning ability. Then, we can get the parameterized form of $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}};\boldsymbol{w})$ as: $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}};\boldsymbol{w}) = \frac{{\rm det} [\Sigma^{-1}_{\boldsymbol{w}}(\mathop{\mathrm{X}})]^{\frac{1}{2}}}{(2\pi \Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}}))^{\frac{m}{2}}} e^{-\frac{q_{\boldsymbol{w}}(\mathop{\mathrm{Y}},\mathop{\mathrm{X}})}{2\Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}})}}$.
125
+
126
+ Finally, we can get the prediction model $\mathcal{F}[\mu_{\boldsymbol{w}}(\mathop{\mathrm{X}}),\Sigma^{-1}_{\boldsymbol{w}}(\mathop{\mathrm{X}}),\Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}})]$, whose framework is illustrated in Figure [2](#framework){reference-type="ref" reference="framework"}. Individual and collaborative uncertainty are indirectly learned by the $\Sigma^{-1}_{\boldsymbol{w}}(\mathop{\mathrm{X}})$.
127
+
128
+ **Loss function.** On the basis of the square-root-free Cholesky decomposition and the parameterized form of $p(\mathop{\mathrm{Y}}|\mathop{\mathrm{X}};\boldsymbol{w})$, the Laplace collaborative uncertainty loss function is then: $$\begin{eqnarray}
129
+ \vspace{-1mm}
130
+ \begin{small}
131
+ \mathcal{L}_{\rm Lap-cu}(\boldsymbol{w}) = \frac{1}{2} \frac{1}{N}\sum\limits_{i=1}^{N} [\frac{q_{\boldsymbol{w}}(\mathop{\mathrm{Y}}^i,\mathop{\mathrm{X}}^i)}{\Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}}^i)}+m\log \Phi_{\boldsymbol{w}}(\mathop{\mathrm{X}}^i) - \sum\limits_{j=1}^{m} \log (d_{jj}^{i})].
132
+ \label{loss_la}
133
+ \end{small}
134
+ \vspace{-1mm}
135
+ \end{eqnarray}$$ where $d^{i}_{jj}$ is the $j$-th diagonal element in $D_{\boldsymbol{w}}(\mathop{\mathrm{X}}^{i})$. The parameters of $\mu_{\boldsymbol{w}}(\cdot)$, $\Sigma^{-1}_{\boldsymbol{w}}(\cdot)$ and $\Phi_{\boldsymbol{w}}(\cdot)$ are updated by minimizing ([\[loss_la\]](#loss_la){reference-type="ref" reference="loss_la"}). And $q_{\boldsymbol{w}}(\cdot,\cdot)$ is related to $\mu_{\boldsymbol{w}}(\cdot)$ and $\Sigma^{-1}_{\boldsymbol{w}}(\cdot)$, and $d^{i}_{jj}$ belongs to $\Sigma^{-1}_{\boldsymbol{w}}(\cdot)$.
2111.04138/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-03-05T16:47:46.521Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36" version="14.4.4" etag="XHLcCql7RE4qjkGQOCzC" type="google"><diagram id="zPa9rAeUeLnJb3nVGZL8">7ZtLc5s6FMc/DZsuMkYCAcvG6WuRmc540XtXdwjIRlOMXCHXdj/9lUAyD1FiNxhTJ/ZkbB09QL+/JJ8jEQvO1/tPLNwkjzTGqQVm8d6CDxYAtuO54kNaDqUlALA0rBiJVaHKsCC/sDLOlHVLYpw3CnJKU042TWNEswxHvGELGaO7ZrElTZtX3YQrbBgWUZia1m8k5klp9YFX2T9jskr0lW0UlDnrUBdWTeRJGNNdaSo6Bz9YcM4o5eW39X6OUwlPcykJfPxN7vHGGM74KRVAWeFnmG5V39R98YPuLKPbLMay/MyC97uEcLzYhJHM3Ql5hS3h61SkbPE1DvOkKCsTOWf0+5GQ6Nv9kqTpnKaUFU3DOMT+MjqWrOWgyMdPS5Gj7g8zjve/7aN9JCeGHKZrzNlBFFEVXFcNGzXagKPSu0o7T5mSmmxA1wvVcFkdm66Iii8Kajdg+AoAA/+KgJ3nAeMsfi9nvUhFaZjnJGoylZ0nYna/T8kqE7Y1iWNZt4Nv2TaOjfXhWVY1Fm4HC21jOA05+dlsvouPusJXSsSFKylctyGF46JmEzndsgirWvWFodWQ64H+hnjIVpgbDRVyHbt9koLum4I9CsJgIAWNhoZTEL0p2KcgHErBdkPDKei9KdijIPCCYRQ0GhpOQd9Q8MtaurEv8jasIbyvJgFk+gZg1uUc2AM4B4EB5ZFcnQgIWu6SayLpG9wvAaKhTpsItEckYv8VRLwRiZhB4QSJOHBEImYU90Uk34m/CZCB6IrriRl+TZDIqOuJGc5MkciY64kZHkyQyKjrSYe7jWaP/xFL31QNjOgib/a+uZmT0Qy3dn6UKVSeeCSQYNbnonfhbgoyyAoOm+uUHRjEnQ7iYAjipnssWMtF3HLnNKbSZ791/hD51+NveuIF/1vmbcNW6KPTI/DWbfQF9HkSbuRXLvqPf1FZ9X6DGREXk/Qq+9fK+NzCvCR7rA9s7AIqF4E4lfADiXVJM17bhS5fXfvTwJFvc0cbPsj3UBPCvrOba5K47h30DZmA49/pQ6u6VNr2IqnMAMQCKOWKliWPsXT30Y+tPBkSnYfL4lU3oZX8vE/D6Luo9ET3upknpjPFfFsWc67MELdWXkLl3uxUdBC6a23KuSNORjOgWlzfATL2phA0iMAOInAIImZANUEiEJgu4cWImIHUFIn4IxIxA6kJEnE6zjovRuS8cxa1FJ+yRS9qfSTyugXF4Y6Vp7vFD+3m74Hb3m4+dYsfuXZ/Q8Nt8Z95SmPqL9Czwz9SZfFrqJL/1vMe9moIlKnD5QdESUdpcqtjxNW7LXrRQJcbIx2Bbs8Y6TzJew3zH4GmJLC9QJ88//1nGhpQ244g+qz5/yqEbT3t9YKFvSVscDFh9ZB5m7TnaWtI8seT9oLamuH9gj7RokgWkwjnhtRn+bRXeeLP4DczveCuJ/7sIcJpaIbTZ8+WqY90rz3S2x7DqSPdax8kX871gGZQ/xmHfC1G7LWjtjaFcZ9CgR3PqKLZ4qY3v/2Wy3vBzW+RrB6YL8dt9W8H8MP/</diagram></mxfile>
2111.04138/main_diagram/main_diagram.pdf ADDED
Binary file (18.8 kB). View file
 
2111.04138/paper_text/intro_method.md ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Deep neural networks are now being deployed in numerous domains including medicine, transportation, security or finances with broad societal implications. Yet, these networks have become nearly inscrutable, and for most real-world applications, these systems are used to make critical decisions -- often without any explanation. In recent years, numerous explainability methods have been proposed [@simonyan2014deep; @zeiler2014visualizing; @springenberg2014striving; @ribeiro2016lime; @selvaraju2017gradcam; @fong2017perturbation; @sundararajan2017axiomatic; @petsiuk2018rise]. In addition to helping improve people's trust in these systems, these methods have helped identify and correct biases in datasets [@ribeiro2016lime; @selvaraju2017gradcam; @dancette2021assessing]. This has, in turn, helped improve these systems' robustness and accelerate their broad deployment. An important limitation of standard explainability methods is that they require access to the system's internal states including hidden layer activations or input gradients [@simonyan2014deep; @selvaraju2017gradcam; @fong2017perturbation; @kapishnikov2019xrai]. As a result, these so-called white-box methods cannot be applied in the most general situations for which the internal states of network are not publicly accessible. For instance, it is common for companies to use neural networks provided by third parties (e.g., through web APIs or specialized hardware). However, only a handful of so-called black-box methods have been proposed to address this challenge with limited successes [@zeiler2014visualizing; @ribeiro2016lime; @petsiuk2018rise]. It is thus critical to develop more general methods that can reliably interpret and characterize the underlying decision processes of a wider array of models.
4
+
5
+ <figure id="fig:sobol" data-latex-placement="t">
6
+ <img src="images/sobol.png" style="width:99.0%" />
7
+ <figcaption><strong>(Left)</strong> <strong>Sobol Attribution Method overview.</strong> Our method aims to explain the prediction of a black-box model for a given image. We first sample a set of real-valued masks <span class="math inline"><strong>M</strong></span> drawn from a Quasi-Monte Carlo (QMC) sequence. We apply these masks to the input image through a perturbation function <span class="math inline"><strong>Φ</strong></span> (here the <em>Inpainting</em> function) to form perturbed inputs <span class="math inline"><strong>X</strong></span> that we forward to the black box <span class="math inline"><strong>f</strong></span> to obtain prediction scores. Using the masks <span class="math inline"><strong>M</strong></span> and the associated prediction scores, we finally produce an explanation <span class="math inline">𝒮<sub><em>T</em><sub><em>i</em></sub></sub></span> which characterizes the importance of each region by estimating the total order Sobol indices. While <span class="math inline">𝒮<sub><em>T</em><sub><em>i</em></sub></sub></span> encompasses the effects of first and all higher-order non-linear interactions between pixel regions, we can also produce the first-order Sobol indices <span class="math inline">𝒮<sub><em>i</em></sub></span> that reflect the importance of a region in isolation (e.g., the eyes of the cats). <strong>(Right)</strong> <strong>Sample explanations for ResNet50V2.</strong> Comparing explanations produced with <span class="math inline">𝒮<sub><em>i</em></sub></span> and <span class="math inline">𝒮<sub><em>T</em><sub><em>i</em></sub></sub></span> helps highlight the importance of individual image regions in isolation vs. jointly (e.g., the lynx tips are important but conditioned on the presence of the presence of an eye). </figcaption>
8
+ </figure>
9
+
10
+ Common approaches to explaining a model's prediction consists of attributing a score for each input dimension such as image pixels for computer vision systems or individual words for natural language processing. Shown in Fig. [1](#fig:sobol){reference-type="ref" reference="fig:sobol"} is an example image and associated importance map for an image categorization system, whereby scores for individual pixels are displayed as heatmaps where hotter locations correspond to pixels that contribute the most to the system's final prediction. In the context of black-box models, a core challenge is to derive these heatmaps using only the output predictions available through the network's forward pass. A simple approach consists in applying a given perturbation at a specific location on the input image to then measure how the corresponding prediction is affected. In the case of image models, pixel intensities are simply set to a default value corresponding to a pure black or gray value; in the case of language models, individual words are removed entirely from the text [@arras2017relevant; @arras2017explaining]. However, evaluating the impact of these perturbations one dimension at a time fails to identify all of the non-linear interactions between input variables that are known to prevail in a complex system such as a deep neural network. However, estimating the combined effect of perturbations across multiple locations quickly becomes combinatorially intractable. Methods have been recently proposed to try to address some of these issues by grouping dimensions together, such as by grouping pixels within a neighborhood of the image (superpixel) [@zeiler2014visualizing] or sampling perturbation masks that affect multiple regions of the input [@ribeiro2016lime; @petsiuk2018rise]. A first limitation of these approaches includes the use of Monte Carlo sampling methods which require a high number of forward passes -- making these approaches computationally expensive. A second limitation is that they rely on relatively simple perturbations such as flipping pixels on or off [@zeiler2014visualizing; @ribeiro2016lime; @petsiuk2018rise]. This severely constrains the space of perturbations considered and limits the efficiency with which the space of perturbations can be explored.
11
+
12
+ We address these limitations by introducing an attribution method that leverages variance-based sensitivity analysis techniques, and more specifically Sobol indices [@sobol2001]. These methods were initially introduced to help identify the input variables that have the most influence on the variance of the output of a non-linear mathematical system [@saltelli2002]. These were traditionally used in physics and economics to estimate the part of the variance induced by a group of variables on a system's output [@saltelli2010variance; @iooss2015; @wagener2019has]. Our main contribution is a general framework to explain predictions from black-box models by adapting Sobol indices to be used in conjunction with perturbation masks. One of the originalities of our attribution method is that it is designed to support standard perturbations and real-valued intensity perturbations that can generate a continuous range of perturbations. Our second contribution is a tractable method for the calculation of Sobol indices. This is done by first sampling the perturbation masks following Quasi-Monte Carlo sequences, which efficiently covers the space of perturbations. This can be done best by leveraging the most efficient estimator borrowed from the sensitivity analysis literature [@jansen1994; @jansen1999; @janon2014asymptotic; @puy2020comprehensive]. As a result, the proposed method can be efficiently applied to high-dimensional inputs such as images. It produces on par with or better explanations than the state-of-the-art with at least half the number of forward passes. Another benefit of our method is that it allows to characterize not only the main effect of image regions but also higher-order interactions by decomposing the variance of the system's prediction using the Sobol indices. We run extensive experiments to demonstrate the benefits of the proposed method on several recent complementary benchmarks including the "Pointing Game" and "Deletion". Our primary focus is image classification, but our method is general and we include results on text classification benchmarks as well.
13
+
14
+ # Method
15
+
16
+ A widely used family of attribution methods uses gradient computations via the backpropagation algorithm. The first such method was introduced in [@baehrens2010explain] and later refined in [@simonyan2014deep] in the context of deep convolutional networks. The idea is to backpropagate the gradient from a classification unit all the way down to the input pixels. The resulting gradient image indicates which pixels affect the decision score the most. Other gradient-based methods including DeConvNet [@zeiler2014visualizing] and Guided Backprop [@springenberg2014striving] were specifically developed to deal with certain activation functions such as ReLU. However, this family of methods are limited by the fact that they focus on the influence of individual pixels in an infinitesimal neighborhood around the input image in the image space. For instance, it has been shown that gradients often vanish when the prediction score to be explained is near the maximum value [@sundararajan2017axiomatic]. Integrated Gradient [@sundararajan2017axiomatic] partially addresses this issue by accumulating gradients along a straight interpolation path from a baseline state to the original image.
17
+
18
+ Yet another class of gradient-based methods [@fong2017perturbation; @fong2019extremal] iteratively optimize a perturbation mask over regions of an image. It leads to a different type of explanation given by the final perturbation mask. Such an approach might leverage more meaningful perturbations but it turns out to be computationally very slow with 20 seconds per explanation -- limiting its broad applicability. In comparison, our proposed approach also works on image regions but it does not require iterative gradient computation -- and can thus be parallelized.
19
+
20
+ Another family of attribution methods relies on the neural network's activations. Popular examples include CAM [@zhou2016cam] which computes an attribution score based on a weighted sum of feature channels activities -- right before the classification layer. GradCAM [@selvaraju2017gradcam] extends CAM via the use of gradients to reweigh each feature channel to take into account their importance for the predicted class. These methods also work on image regions because they are computed on the output of feature maps which are themselves at a coarser resolution than the original image. In comparison, our proposed approach is model-agnostic and hence does not require access to internal computations.
21
+
22
+ Most similar to our approach are attribution methods that can be used to explain the predictions of truly black-box models. These methods probe a neural network's responses to perturbations over image regions and combine the resulting predictions into an influence score for each individual pixel or group of pixels. The simplest method, "Occlusion" [@zeiler2014visualizing], masks individual image regions -- one at a time -- with an occluding mask set to a baseline value and assigns the corresponding prediction scores to all pixels within the occluded region. Then the explanation is given by these prediction scores and can be easily interpreted. However, occlusion fails to account for the joint (higher-order) interactions between multiple image regions. For instance, occluding two image regions -- one at a time -- may only decrease the model's prediction minimally (say a single eye or mouth component on a face) while occluding these two regions together may yield a substantial change in the model's prediction if these two regions interact non-linearly as is expected for a deep neural network.
23
+
24
+ Our work, together with related methods such as LIME [@ribeiro2016lime] and RISE [@petsiuk2018rise], addresses this problem by randomly perturbating the input image in multiple regions at a time. Obviously, perturbating multiple image locations simultaneously leads to a combinatorial explosion in the number of combinations and methods have been proposed to make these approaches more tractable. For instance, a popular method, LIME [@ribeiro2016lime], takes superpixels as regions to perturbate instead of individual pixels. An influence score is then computed for a set of connected pixel patches indicating how strongly a patch is correlated to the model predictions.
25
+
26
+ RISE [@petsiuk2018rise] relies on Monte Carlo sampling to generate a set of binary masks, each value in the masks representing a pixel region. By probing the model with randomly masked versions of the input, RISE [@petsiuk2018rise] produces a importance map by considering the average of the masks weighted by their associated prediction scores. Instead of using binary masks, our method considers a continuous range of perturbations which allows for a finer exploration of the model's response. Our method can still use the same perturbations as used in Occlusion [@zeiler2014visualizing], LIME [@ribeiro2016lime] and RISE [@petsiuk2018rise], but it also enables the use of more advanced perturbation functions that take continuous inputs.
27
+
28
+ More importantly, the aforementioned methods lack a rigorous framework. Here, we introduce a theoretical framework that decomposes the influence score of each individual region between multiple orders of influence. The first-order approximates Occlusion [@zeiler2014visualizing] by considering the influence of one region at a time, while the second-order considers two regions at a time, etc. The decomposition also includes higher-orders.
29
+
30
+ Our attribution method builds on the variance-based sensitivity analysis framework. The approach was introduced in the 70s [@cukier1973study] and reached a cornerstone with the Sobol indices [@sobol1993sensitivity]. Sobol indices are currently used in many fields (including those that are said to be safety-critical), especially for the analysis of physical phenomena [@iooss2015]. They are used to identify the input dimensions that have the highest influence on the output of a model or a mathematical system. Several statistical estimators to compute these indices are available [@saltelli2010variance; @marrel2009calculations; @janon2014asymptotic; @owen2013better; @tarantola2006random] and have asymptotic guarantees [@janon2014asymptotic; @da2013efficient; @tissot2012bias]. We build on this literature by adapting these Sobol indices in the context of black-box models to compute the influence of regions of an image on the output predictions using perturbation masks.
31
+
32
+ In this work, we formulate the feature attribution problem as quantifying the contribution of a collection of $d$ real-valued features $\bm{x} = (x_1, ..., x_d)$ with respect to a model decision. Specifically, we consider a black-box decision function $\bm{f} : \mathcal{X} \to \mathbb{R}^k$ whose internal states and analytical form are unknown (for instance, $\bm{f}$ can score the probability for the input to belong to a specific class). Our goal is to quantify the importance of each feature to the decision score $\bm{f}(\bm{x})$, not just individually but also collectively. To capture these higher-order interactions, our method consists in estimating the Sobol indices of the features $\bm{x}$ by randomly perturbating them and evaluating the impact of these perturbations on the prediction of the black-box model (Fig. [1](#fig:sobol){reference-type="ref" reference="fig:sobol"}).
33
+
34
+ Considering variations of $\bm{f}(\bm{x})$ in response to meaningful perturbations of the input $\bm{x}$ is a natural way to interpret the local behavior of a the decision function $\bm{f}$ around $\bm{x}$. Several methods build on this idea, e.g., by removing one or a group of input features [@zeiler2014visualizing; @ribeiro2016lime; @fong2017perturbation; @petsiuk2018rise; @fong2019extremal] or by back-propagating the gradient to the input space through the model [@simonyan2014deep; @sundararajan2017axiomatic; @smilkov2017smoothgrad; @selvaraju2017gradcam]. Most of these methods use the model's internal representations and/or require computing the gradient w.r.t. the input, which makes them unusable in a black-box setting. Moreover, these methods focus on estimating the intrinsic contribution of each feature, neglecting the combinatorial components. Our method applies perturbations directly on the input in order to deal with a black-box scenario, and allows us to estimate higher-order interactions between the features $\bm{x}=(x_1, ...,x_d)$ that contribute to $\bm{f}(\bm{x})$.
35
+
36
+ []{#sec:perturbation_masks label="sec:perturbation_masks"}
37
+
38
+ Formally, let us define a probability space $(\Omega,\mathcal{X},P)$ of possible input perturbations and a random vector $\bm{X} = (X_1,...,X_d)$ as a stochastic perturbation of the original input $\bm{x}$ (see Fig. [1](#fig:sobol){reference-type="ref" reference="fig:sobol"}). There are several ways to define random perturbations corresponding to different coverage of the data manifold around $\bm{x}$. For instance, we can consider the perturbation mask operator $\mathbf{\Phi}: \mathcal{X} \times \mathcal{M} \to \mathcal{X}$ which combines a stochastic mask $\bm{M} = (M_1, ..., M_d) \in \mathcal{M}$ (i.e., an i.i.d sequence of real-valued random variables on $[0, 1]^d$) with the original input $\bm{x}$. This formulation encompasses *Inpainting* perturbations: $\mathbf{\Phi}(\bm{x}, \bm{M}) = \bm{x} \odot \bm{M} + (\mathbf{1} - \bm{M}) \mu$ with $\mu \in \mathbb{R}$ a baseline value, and $\odot$ the Hadamard product. This consists in linearly varying the pixel intensities towards a baseline intensity such as a pure black with a value of zero [@fong2017perturbation; @ribeiro2016lime; @zeiler2014visualizing; @petsiuk2018rise]. Similarly, *Blurring* consists of applying a blur operator with various intensities to certain regions of the image [@fong2017perturbation]. Different perturbation domains can be considered for other types of data such as textual or tabular data that we discuss further in the experimental section. In the next section, we explain how we adapt the Sobol-based sensitivity analysis using a class of perturbations to explain the predictions of a black-box model.
39
+
40
+ We first briefly review the classical Sobol-Hoeffding decomposition from [@hoeffding1948] and introduce the Sobol indices. Let $(X_1,...,X_d)$ be independent variables and assume that $\bm{f}$ belongs to $\mathbb{L}^2(\mathcal{X},P)$. Moreover we denote the set $\mathcal{U} =\{1, ..., d\}$, $\bm{u}$ a subset of $\mathcal{U}$, its complementary ${\sim}\rvu$ and $\mathbb{E}(\cdot)$ the expectation over the perturbation space. The Hoeffding decomposition allows us to express the function $\bm{f}$ into summands of increasing dimension, denoting $\bm{f}_{\bm{u}}$ the partial contribution of variables $\bm{X}_u = (X_i)_{i\in \bm{u}}$ to the score $\bm{f}(\bm{X})$: $$\begin{equation}
41
+ \label{eq:anova}
42
+ \begin{aligned}
43
+ \bm{f}(\bm{X}) &= \bm{f}_{\emptyset} + \sum_i^d \bm{f}_i(X_i)
44
+ + \sum_{1 \leqslant i < j \leqslant d} \bm{f}_{i,j}(X_i, X_j)
45
+ + \cdots + \bm{f}_{1,...,d}(X_1, ..., X_d) \\
46
+ &= \sum_{\substack{\rvu \subseteq \mathcal{U}}} \bm{f}_{\bm{u}}(\bm{X}_{\bm{u}})
47
+ \end{aligned}
48
+ \end{equation}$$
49
+
50
+ Eq. [\[eq:anova\]](#eq:anova){reference-type="ref" reference="eq:anova"} consists of $2^d$ terms and is unique under the following orthogonality constraint: $$\begin{equation}
51
+ \label{eq:anova_ortho}
52
+ \begin{aligned}
53
+ \forall (\bm{u},\bm{v}) \subseteq \mathcal{U}^2 \; s.t. \; \bm{u} \neq \bm{v}, \;\; \E\big(\bm{f}_{\bm{u}}(\bm{X}_{\bm{u}}) \bm{f}_{\bm{v}}(\bm{X}_{\bm{v}})\big) = 0
54
+ \end{aligned}
55
+ \end{equation}$$
56
+
57
+ Furthermore, orthogonality yields the characterization $\bm{f}_{\bm{u}}(\bm{X}) = \mathbb{E}(\bm{f}(\bm{X})|\bm{X}_{\bm{u}}) - \sum_{\bm{v}\subset \bm{u}}\bm{f}_{\bm{v}}(\bm{X})$ and allows us to decompose the model variance as: $$\begin{equation}
58
+ \label{eq:var_decomposition}
59
+ \begin{aligned}
60
+ \Var(\bm{f}(\bm{X})) &= \sum_i^d \Var(\bm{f}_i(X_i)) +
61
+ \sum_{1 \leqslant i < j \leqslant d} \Var(\bm{f}_{i,j}(X_i, X_j)) +
62
+ ... + \Var(\bm{f}_{1,...,d}(X_1, ..., X_d)) \\
63
+ &=\sum_{\substack{\rvu \subseteq \mathcal{U}}} \Var(\bm{f}_{\bm{u}}(\bm{X}_{\bm{u}}))
64
+ \end{aligned}
65
+ \end{equation}$$ Building from Eq. [\[eq:var_decomposition\]](#eq:var_decomposition){reference-type="ref" reference="eq:var_decomposition"}, it is natural to characterize the influence of any input subset $\bm{u}$ as its own variance w.r.t. the total variance. This yields, after normalization by $\Var(\bm{f}(\bm{X}))$, the general definition of Sobol indices.
66
+
67
+ ::: {#def:sobol_indice .definition}
68
+ **Definition 1** (Sobol indices [@sobol1993sensitivity]). *The sensitivity index $\mathcal{S}_{\bm{u}}$ which measures the contribution of the variable set $\bm{X}_{\bm{u}}$ to the model response $\bm{f}(\bm{X})$ in terms of fluctuation is given by: $$\begin{equation}
69
+ \label{eq:sobol_indice}
70
+ \mathcal{S}_{\bm{u}} = \frac{ \Var(\bm{f}_{\bm{u}}(\bm{X}_{\bm{u}})) }{ \Var(\bm{f}(\bm{X})) }
71
+ = \frac{ \Var(\mathbb{E}(\bm{f}(\bm{X}) | \bm{X}_{\bm{u}})) - \sum_{\bm{v}\subset \bm{u}}\Var(\mathbb{E}(\bm{f}(\bm{X}) | \bm{X}_{\bm{v}} ))}{ \Var(\bm{f}(\bm{X})) }
72
+ \end{equation}$$*
73
+ :::
74
+
75
+ Sobol indices give a quantification of the importance of any subset of features with respect to the model decision, in the form of a normalized measure of the model output deviation from $\bm{f}(\bm{X})$. Thus, Sobol indices sum to one : $\sum_{\bm{u} \subseteq \mathcal{U}} \mathcal{S}_{\bm{u}} = 1$.
76
+
77
+ For each subset of variables $\bm{X}_{\bm{u}}$, the associated Sobol index $\mathcal{S}_{\bm{u}}$ describes the proportion of the model's output variance explained by this subset. In particular, the first-order Sobol indices $\mathcal{S}_i$ capture the intrinsic share of total variance explained by a particular variable, without taking into account its interactions. Many attribution methods construct such intrinsic importance estimator. However, the framework of Sobol indices enables us to capture higher-order interactions between features. In this view, we define the Total Sobol indices.
78
+
79
+ ::: {#def:total_sobol_indice .definition}
80
+ **Definition 2** (Total Sobol indices [@homma1996importance]). *The total Sobol index $\mathcal{S}_{T_i}$ which measures the contribution of the variable $X_i$ as well as its interactions of any order with any other input variables to the model output variance is given by: $$\begin{equation}
81
+ \label{eq:sobol_total}
82
+ \mathcal{S}_{T_i}
83
+ = \sum_{\substack{\bm{u} \subseteq \mathcal{U} \\ i \in \bm{u} }} \mathcal{S}_{\bm{u} }
84
+ = 1 - \frac{\Var_{ \bm{X}_{\sim i} }(\mathbb{E}_{ X_i }(\bm{f}(\bm{X}) | \bm{X}_{\sim i})) }{ \Var(\bm{f}(\bm{X}))}
85
+ = \frac{ \mathbb{E}_{\bm{X}_{\sim i}}( \Var_{X_i} ( \bm{f}(\bm{X}) | \bm{X}_{\sim i} )) }{ \Var(\bm{f}(\bm{X})) }
86
+ \end{equation}$$*
87
+ :::
88
+
89
+ Where $\mathbb{E}_{\bm{X}{\sim i}}( \Var_{X_i} ( \bm{f}(\bm{X}) | \bm{X}_{\sim i}))$ is the expected variance that would be left if all variables but $X_i$ were to be fixed. $\mathcal{S}_{T_i}$ is the sum of the Sobol indices for the all the possible groups of variables where $i$ appears, i.e. first and higher order interactions of variable $X_i$.
90
+
91
+ Since the total interaction index contains the first order index, it is natural that it is greater than or equal to the first order index. We thus note the property which can easily be deduced: $\forall i, 0 \leq \mathcal{S}_i \leq \mathcal{S}_{T_i} \leq 1$. We will now see why these two indices and the difference between them make them relevant for the explainability of a black-box model.
92
+
93
+ These statistics quantify the intrinsic (first-order indices) and relational (total indices) impact of each variable to the model output. A variable with a low total Sobol index is therefore not important to explain the model decision. Also, a variable has a weak interaction with other variables when $\mathcal{S}_{T_i} \approx \mathcal{S}_i$, while it has a strong interaction when the difference between its two indices is high. A strong interaction means that the effect of one variable on the variation of the model output depends on other variables. Thus, using Sobol indices allows to describe fine grained interactions between inputs which leads to the model decision. We next present an efficient method to estimate these indices.
94
+
95
+ As models are becoming more and more complex, the proposed estimator must take into account the computational cost of model evaluation. Many efficient estimators have been proposed in the literature [@iooss2015]. In this work, we use the Jansen [@jansen1999] estimator which is often considered as one of the most efficient [@puy2020comprehensive]. Jansen is typically used with a Monte Carlo sampling strategy. We improve over Monte Carlo by using a Quasi-Monte Carlo (QMC) sampling strategy which generates low-discrepancy sample sequences allowing a faster and more stable convergence rate [@gerber2015]. For more information, see appendix [9](#ap:efficient){reference-type="ref" reference="ap:efficient"}. We will now describe the procedure to implement these estimators.
96
+
97
+ We start by drawing two independent matrices of size $N \times d$ of perturbation masks from a Sobol low discrepancy $LP_{\tau}$ sequences. $N$ will be our number of designs and we recall that $d$ is our dimensions (e.g, $121$ for $11$ by $11$ masks). Once the perturbation operator is applied to $\bm{x}$ with these masks, we obtain two matrices $\mathbf{A}$ and $\mathbf{B}$ of the same size as the perturbed inputs (i.e., partially masked images). We note $\mathbf{A}_{ji}$ and $\mathbf{B}_{ji}$ the elements of the matrices such that $i = 1, ..., d$ the number of variables studied and $j = 1, ..., N$ the number of samples in each matrix. We form the new matrix $\mathbf{C}^{(i)}$ in the same way as $\mathbf{A}$ except for the fact that the column corresponding to the variable $i$ is now replaced by the column of $\mathbf{B}$. We denote $f_{\emptyset} = \frac{1}{N} \sum_{j=0}^N \bm{f}(\mathbf{A}_j)$ and the empirical variance $\hat{\text{V}}= \frac{1}{N-1} \sum_{j=0}^N (\bm{f}(\mathbf{A}_j) - f_{\emptyset})^2$. The empirical estimators for first ($\hat{\mathcal{S}}_i$) and total order ($\hat{\mathcal{S}}_{T_i}$) can be formulated as:
98
+
99
+ $$\begin{equation}
100
+ \label{eq:jansen_estimator}
101
+ \hat{\mathcal{S}}_i = \frac
102
+ { \hat{\text{V}}- \frac{1}{2N} \sum_{j=1}^N (\bm{f}(\mathbf{B}_j) - \bm{f}(\mathbf{C}_j^{(i)}))^2 }
103
+ { \hat{\text{V}}}
104
+ \text{ }\text{ }\text{ }\text{ }\text{ }\text{ }\text{ }\text{ }
105
+ \hat{\mathcal{S}}_{T_i} = \frac
106
+ { \frac{1}{2N} \sum_{j=1}^N ( \bm{f}(\mathbf{A}_j) - \bm{f}(\mathbf{C}_j^{(i)}) )^2 }
107
+ { \hat{\text{V}}} \\
108
+ \end{equation}$$ Hence, to compute the set of first order and total indices, it is necessary to perform $N(d+2)$ forwards of the model. We study in section [3.3](#sec:efficient_estimator){reference-type="ref" reference="sec:efficient_estimator"} how to choose a sufficient number of forwards ($N$). To ease understanding and demonstrate that these estimators can be easily implemented, we show in Algorithm [\[algo:total_order_indices\]](#algo:total_order_indices){reference-type="ref" reference="algo:total_order_indices"} a minimal pythonic implementation of the total order estimator that outputs $\hat{\mathcal{S}}_{T_i}$ indices. The input `Y` contains the prediction scores of the $N*(d+2)$ forwards. The scores are ordered following the same QMC sampling ordering of their associated mask. The output `STis` contains $d$ importance scores, one for each dimension of the mask. In the case of images, we obtain our final explanation map by applying a bilinear upsampling to match the dimensions of the input image.
109
+
110
+ ``` {#algo:total_order_indices .python language="Python" caption="Pythonic implementation of the Total Order indices ($\\hat{\\mathcal{S}}_{T_i}$) calculation." label="algo:total_order_indices"}
111
+ def total_order_estimator(Y, N=32, d=11*11):
112
+ fA, fB = Y[:N], Y[N:N*2]
113
+ fC = [Y[N*2+N*i:N*2+N*(i+1)] for i in range(d)]
114
+ f0 = mean(fA)
115
+ V = sum([(val - f0)**2 for val in fA]) / (len(fA) - 1)
116
+ STis = [sum((fA - fC[i])**2) / (2 * N) / V for i in range(d)]
117
+ return STis
118
+ ```
119
+
120
+ Although the proposed Sobol-based attribution method allows us to determine the impact of any variables for a given prediction and thus to identify diagnostic ones, it lacks the ability to highlight the type of contributions made, whether positive or negative. Simple methods such as "Occlusion" typically include this information. Hence, we propose a variant that combines the importance scores of the total Sobol indices with the sign of the occlusion. We compute the difference in score between the prediction on the original input $\bm{x}$ and a partial version $\bm{x}_{\setminus i}$ with the variable $x_i$ occluded. Intuitively, this provides an estimate of the direction of the variations generated by the variables studied with respect to a reference state. $$\begin{equation}
121
+ \label{eq:sobol_signed}
122
+ \hat{\mathcal{S}}_{T_i}^{\Delta} = \hat{\mathcal{S}}_{T_i} \times \text{sign}( \bm{f}(\bm{x}) - \bm{f}(\bm{x}_{\setminus i}) )
123
+ \end{equation}$$
2111.12892/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2111.12892/paper_text/intro_method.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Multi-person pose estimation approaches usually can be classified into two schemes: top-down or bottom-up. Unlike the top-down scheme that converts the pipeline into two independent tasks -- detection and single-pose estimation, the bottom-up scheme is confronted with more challenging problems. An unknown number of persons with any scale, posture, or occlusion condition may appear at any location of the input image. The bottom-up approaches need to detect all body joints first and group them into instances second. In the typical systems such as DeeperCut [@deepercut:insafutdinov2016deepercut], OpenPose [@openpose:cao2017realtime], Associative Embedding [@ae:newell2016associative], PersonLab [@personlab:papandreou2018personlab], PifPaf [@pifpaf:kreiss2019pifpaf] and CenterNet [@centernet:zhou2019objects], keypoint detection and grouping are usually regarded as two heterogeneous learning targets. This requires the model to learn the keypoint heatmaps encoding position information and the human knowledge guided signals encoding association information such as part hypotheses, part affinity fields, associative embeddings or offset vector fields.
4
+
5
+ In this paper we explore whether we can exploit the instance semantic clues implicitly used by the model to group the detected keypoints into individual instances. Our key intuition is that, when the model predicts a location of a specific keypoint, it may *know* the human instance region this keypoint belongs to, which means that the model has *implicitly* associated related joints together. For example, when an elbow is recognized, the model may learn its strong spatial dependencies in its adjacent wrist or shoulder but weak dependencies in the joints of other persons. Therefore, if we can read out such information learned and encoded in the model, the detected keypoints can be correctly grouped into instances, without the help of the human pre-defined associative signals.
6
+
7
+ We argue that the self-attention based Transformer [@vaswani2017attention] meets this requirement because it can provide image-specific pairwise similarities between any pair of positions without distance limitation, and the resulting attention patterns show object-related semantics. Hence, we attempt to use self-attention mechanism to perform multi-person pose estimation. But instead of following the top-down strategy with the single person region as the input, we feed Transformer *high-resolution* input images with the presence of multiple persons, and expect it to output the heatmaps encoding multi-person keypoint locations. Our initial results show that 1) the heatmaps outputted by Transformer can also accurately respond to multiple persons' keypoints at multiple candidate locations; 2) the attention scores between the detected keypoint locations tend to be higher within the same person but lower across different persons. Based on these findings, we introduce an attention-based parsing algorithm to group the detected keypoints into different human instances.
8
+
9
+ Unfortunately, the naive self-attention does not always show desirable properties. In many cases, a detected keypoint also probably have relatively higher attention scores with those belonging to different person instances. This will definitely lead to wrong associations and implausible human poses. To address this issue, we propose a novel method that leverages a loss function to explicitly supervise the attention area of each person instance by the mask of the instance. The results show that supervising self-attentions in such a way can achieve the expected *instance-discriminative* characteristics without affecting the standard forward propagation of Transformer. Such characteristics guarantee the effectiveness and accuracy of the attention-based grouping algorithm. The results on the COCO keypoint detection challenge show that our models with limited refinement can achieve comparable performances compared with the highly optimized bottom-up pose estimation systems [@openpose:cao2017realtime; @ae:newell2016associative; @personlab:papandreou2018personlab]. Meanwhile, we also can easily obtain the person instance masks by sampling the corresponding attention areas, thereby avoiding an extra pixel assignment or grouping algorithm.[^2]
10
+
11
+ ![We choose two examples to show differences between the naive and supervised self-attention patterns. The association reference for the naive self-attention is averaged from all attention layers in Transformer. The association reference for the supervised self-attention is directly taken from the fourth supervised attention layer.](naive-vs-supervised.pdf){#fig:navie-vs-supervised width="0.98\\columnwidth"}
12
+
13
+ **Using self-attention to unify keypoint detection, grouping and human mask prediction**. We use Transformer to solve the challenging multi-person keypoint detection, grouping and mask prediction in a unified way. We realize that the self-attention shows instance-related semantics, which can be served as the association information in a bottom-up fashion. We further use instance masks to supervise the self-attention. It ensures that each keypoint is assigned to the correct human instance according to the attention scores, making it easy to obtain the instance masks as well.
14
+
15
+ **Supervising self-attention "for your need\"**. A common practice of using Transformer models is to use task-specific signals to supervise the final output of transformer-based models, such as class labels, object box coordinates, keypoint positions or semantic masks. In this method, a key novelty is to use some type of constraint terms to control the behaviors of self-attention. The results show that under supervision the self-attention can achieve instance-aware characteristics for multi-person pose estimation and mask prediction, without destroying the standard forward of Transformer. This demonstrates that using appropriate guidance signals makes self-attention controllable and help the model learning, which is also applicable to other vision tasks such as instance segmentation [@vistr:wang2021end] and object detection [@detr:carion2020detr].
16
+
17
+ # Method
18
+
19
+ Given a RGB image $I$ of size $3\times H \times W$, the goal of 2D multi-person pose estimation is to estimate all persons' keypoints locations: $\sS=\left\{(x_i^k,y_i^k)|i=1,2,...,N;k=1,2,...,K\right\}$, where $N$ is the number of persons in this image and $K$ is the number of defined keypoint types.
20
+
21
+ We follow the bottom-up strategy. First, the model detects all the candidate locations for each type of keypoints in an image: $\sC=\sC_1\bigcup \sC_2\bigcup...\bigcup \sC_K$, where $\sC_k=\left\{(\hat{x}_i,\hat{y}_i)|i=1,2,...,N_k\right\}$ represents the $k$-th type of keypoint set with $N_k$ detected candidates. Second, a heuristic decoding algorithm $g$ groups all candidates into $M$ skeletons based on the association information $\mathcal{A}$, which determines a unique person ID for each keypoint location. We formulate this process as: $g((\hat{x}_i,\hat{y}_i),\sC,\mathcal{A})\rightarrow m\in\{1,2,...,M\}$.
22
+
23
+ Next, we present the model architecture and show how to use self-attention as the association information $\mathcal{A}$. We analyze the problems when using the naive self-attention as the grouping reference. We propose to supervise self-attention via instance masks for keypoints grouping. We present two types of grouping algorithm from the *body-first* and *part-first* views. Finally, we describe how we obtain the person instance masks and how we use the obtained masks to refine the results.
24
+
25
+ **Architecture.** We use a simple architecture combination that includes ResNet [@resnet:he2016deep] and Transformer encoder [@vaswani2017attention], like the design of TransPose [@transpose:yang2021transpose]. The downsampled feature maps of ResNet with $r$ stride are flattened to a sequence of $L\times d$ size and sent to Transformer where $L=\frac{H}{r}\times\frac{W}{r}$. Several transposed convolutions and a 1$\times$`<!-- -->`{=html}1 convolution are used to upsample the Transformer output into the target keypoint heatmap size $K\times \frac{H}{4}\times \frac{W}{4}$.
26
+
27
+ **Heatmap loss.** To observe what patterns the self-attentions layers spontaneously learn, we first only leverage the mean square error (MSE) loss between the predicted heatmap $\mathbf{\hat{H}}_k$ and the groundtruth heatmap $\mathbf{H}_k$ to train the model: $$\begin{equation}
28
+ \mathcal{L}_{heatmap}=\frac{1}{K}\sum_{k=1}^{K}\mathbf{M} \cdot\left\| \mathbf{\hat{H}}_k-\mathbf{H}_k\right\|,
29
+ \end{equation}$$ where $\mathbf{M}$ is a mask that masks out the crowd areas and small size person segments in the whole image. After the model is trained only by heatmap loss, the keypoint detection results show the trained model can accurately localize keypoints of multiple persons.
30
+
31
+ **Issues in naive self-attention.** We obtain the keypoint locations from heatmaps and further visualize the attention areas of these locations. As revealed by the examples shown in Figure [1](#fig:navie-vs-supervised){reference-type="ref" reference="fig:navie-vs-supervised"}, using the naive self-attention matrices as the association reference poses several challenges: 1) There are multiple attention layers in Transformer, each of which shows distinct characteristics. Selecting which attention layers as the association reference and how to process the raw attention require a very thoughtful fusion and post-processing strategy. 2) Although most of the sampled keypoint locations show local attention areas, especially for the people they belong to, some keypoints still spontaneously produce relatively high attention scores to the parts of other people at a longer distance. It is almost impossible to determine a perfect attention threshold for all situations, which makes keypoint grouping highly dependent on specific experimental observations. As a consequence, the attention-based grouping cannot ensure the correctness of the keypoint assignment, leading to inferior performance.
32
+
33
+ ![Model overview. The model architecture consists of three parts: a regular ResNet, a regular Transformer encoder, and several transposed convolutional layers. Two types of loss function are leveraged to supervise the model training. The final output of the model is supervised by the groundtruth keypoint heatmaps. One of the immediate self-attention layers is sparsely supervised by the instance masks. In particular, we sample the rows of the attention matrix of the chosen attention layer according to the visible keypoint locations of each human instance, reshape them into 2D-like maps, and then use the mask of each instance to supervise the average map. In this figure, we only show a few keypoints of each instance for simplicity.](model4.pdf){#fig:model width="0.98\\columnwidth"}
34
+
35
+ To address the aforementioned challenges of using the naive self-attention for keypoints grouping, we **S**upervise **S**elf-**A**ttention (**SSA**) to be what we expect. Ideally, the expected attention pattern should be that each keypoint location only attends to the person instance it belongs to. The value distribution (0 or 1) in a person instance mask provides an ideal guidance signal to supervise the pairwise keypoints's locations to have lower or higher attention scores. Then we propose a sparse sampling method based on the instance keypoint locations to supervise the specific attention matrix generated by the self-attention computation in Transformer, as illustrated in Figure [2](#fig:model){reference-type="ref" reference="fig:model"}.
36
+
37
+ **Instance mask loss.** We suppose that the $p$-th person's keypoints groudtruth locations are $\left\{(x_p^k,y_p^k,v_p^k)\right\}_{k=1}^K$, where $v_p^k\in\left\{0,1\right\}$ is a visibility flag, i.e., $v_p^k=0$: not labeled, $v_p^k=1$: labeled. We take out the immediate attention matrix $\mathbf{A}=\operatorname{Softmax}(\frac{\mathbf{Q}\mathbf{K}^\top}{\sqrt{d}})\in \mathbb{R}^{L\times L}$ of the specific layer in Transformer[^3] to leverage the supervision. We first reshape the attention matrix $\mathbf{A}$ into a tensor $\tA$ of $(h\times w) \times (h\times w)$ size, where $h=H/r,w=W/r$. Then we transform the keypoint coordinates into the coordinate system of the downsampled feature maps. And then we take out the corresponding rows of the attention matrix specified by these locations. So we can obtain the reshaped attention map at each keypoint location: $\tA[int(y_p^k/r),int(x_p^k/r),:,:]$. For a person instance, we sample and average the attention maps based on its *visible* keypoint locations to estimate the mean attention map. We name it as *person attention map* $\mathbf{A}_p$: $$\begin{equation}
38
+ \mathbf{A}_p=\frac{1}{\sum_{i=1}^Kv_p^i}\sum_{k=1}^K v_p^k\cdot\tA[int(y_p^k/r),int(x_p^k/r),:,:].
39
+ \end{equation}$$ Assuming the groundtruth instance mask of the $p$-th person in the image is $\mathbf{M}_p\in \mathbb{R}^{\frac{H}{4}\times \frac{W}{4}}$, we also use the MSE loss function to supervise the attention matrix sparsely. Since the self-attention scores have been normalized by the softmax function, we need to rescale the $\mathbf{A}_p$ by dividing its maximum value so that the rescaled $\mathbf{A}_p$ is closer to the value range (0 or 1) of the annotated mask. Note that the size of $\mathbf{A}_p$ is $\frac{H}{r}\times \frac{W}{r}$ while the grountruth instance mask is constructed to be $\frac{H}{4}\times \frac{W}{4}$ size. So we use $r/4$ times bilinear interpolation to resize the $\mathbf{A}_p$ to have the same size as the instance mask. We formulate the instance mask loss as: $$\begin{equation}
40
+ \mathcal{L}_{mask}=\operatorname{MSE}(\operatorname{bilinear}(\mathbf{A}_p/\operatorname{max}(\mathbf{A}_p)),\mathbf{M}_p)=\frac{1}{N}\sum_{p=1}^N\left\|\operatorname{bilinear}(\mathbf{A}_p/\operatorname{max}({\mathbf{A}_p}))-\mathbf{M}_p\right\|.
41
+ \end{equation}$$ **Objective.** So the overall objective for training the model is: $$\begin{equation}
42
+ \mathcal{L}_{train}=\alpha\cdot\mathcal{L}_{heatmap}+\beta\cdot\mathcal{L}_{mask},
43
+ \end{equation}$$ where $\alpha$ and $\beta$ are two coefficients to balance two types of learning. In the standard self-attention computation of Transformer, the attention matrix is computed by the inner products of *queries* and *keys*. Its gradient back-propagation information is entirely derived from the subsequent attention weighted sum of *values*. By introducing the instance mask loss to supervise the self-attention, the gradient learning direction for the supervised attention matrix has two sources: the implicit gradient signal from keypoint heatmaps learning and the explicit similarity constraint from instance mask learning. Choosing approximate values of $\alpha$ and $\beta$ is critical for training the model well. We set $\alpha=1,\beta=0.01$ to balance both heatmap learning and mask learning.
44
+
45
+ ::: wrapfigure
46
+ r8.5cm ![image](grouping.pdf){width="55%"}
47
+ :::
48
+
49
+ When the well-trained model makes a single forward pass for a given image, we can decode the multi-person human poses and masks from the outputted keypoint heatmaps and the supervised attention matrix in the immediate attention layer. We first conduct non-maximum suppression in a 7$\times$`<!-- -->`{=html}7 local window on the keypoint heatmaps and obtain all local maximum locations whose scores exceed the threshold $t$. We put all these candidates into a queue and decode them into skeletons using the attention-based algorithm. Using the self-attention similarity matrix with quadratic complexity inevitably brings redundant computation. However, in part, this also makes minimal assumptions about where the keypoints of the instances may appear and the number of persons in the image. Next we present the self-attention based algorithms from the *body-first* and *part-first* views.
50
+
51
+ **Body-first view**. This view aims to decode each person skeleton one-by-one from the queue. Assuming we have the sorted all types of candidate keypoints by descending order of score in a single queue, we pop out the first keypoint (maybe any keypoint type) to seed a new skeleton $\mathcal{S}$, and then greedily find the best matched adjacent candidate keypoint from the queue.
52
+
53
+ For the seeded $\mathcal{S}$ with the initial keypoint, we find the other keypoints along the search path according to a defined human skeleton kinematic tree. When looking for a certain type of joint, the founded joints (denoted as the set $\mathcal{S}_f$) of this skeleton $\mathcal{S}$ induce a basin of attraction to "*attract*\" the joint that most likely belongs to it, as illustrated in Figure [\[fig:group\]](#fig:group){reference-type="ref" reference="fig:group"}. For a certain unmatched point $p_{c}={(x,y,s)}$ in the candidate set $\sC_k$ of the keypoint type $k$, we use the mean attention scores between the current found keypoints and $p_c$ as the metric to measure the attraction from this skeleton[^4]: $$\begin{equation}
54
+ \operatorname{Attraction}(p_{c},\mathcal{S}_f)=\frac{1}{|\mathcal{S}_f|}\sum_{(x',y', s')\in \mathcal{S}_f}s'\cdot\tA[y,x,y',x'].
55
+ \end{equation}$$ Thus the candidate point with the highest $\operatorname{score} \times \operatorname{Attraction}$ is considered to belong to the current skeleton $\mathcal{S}$: $p_c^*= \operatorname{argmax}_{p_c\in \sC_k}s\cdot\operatorname{Attraction}(p_c,\mathcal{S}_f).$ We repeat the process above and record all the matched keypoints until all keypoints of this skeleton have been found. Then we need to decode the next skeleton. We pop the first unmatched keypoint to seed a new skeleton $\mathcal{S}'$ again. We follow the previous steps to find keypoints belonging to this instance. Note if the $\operatorname{Attraction}(p_c^*, \mathcal{S}_f)$ is smaller than a threshold $\lambda$ (empirically set to 0.0025), this type of keypoint in this skeleton to be empty (zero-filling). It is also worth noting that we also consider the keypoints that have already been claimed by a previous skeleton $\mathcal{S}$, but only when $\operatorname{Attraction}(p_c,\mathcal{S}'_f)>\operatorname{Attraction}(p_c,\mathcal{S})$, we assign the matched $p_c$ to the current skeleton $\mathcal{S}'$.
56
+
57
+ **Part-first view**. This view aims to decode all human skeletons part-by-part. Given all candidates for each keypoint type, we initialize multiple skeleton seeds $\left\{\mathcal{S}^1, \mathcal{S}^2, ..., \mathcal{S}^m\right\}$ with the most easily detected keypoints such as nose. Then we follow a fixed order to connect the candidate parts to the current skeletons. These skeletons can be seen as multiple clusters consisting of found keypoints. Like the *body-first* view, we also use the mean attention attraction $\operatorname{Attraction}(p_c, \mathcal{S}^t_f)$ from the found keypoints in the skeletons as the metric to assign the candidate parts (Figure [\[fig:group\]](#fig:group){reference-type="ref" reference="fig:group"}). But in the *part-first* view, we compute the pairwise distance matrix between the candidate parts and existing skeletons, and then we use the Hungarian algorithm [@hungarian:kuhn1955hungarian] to solve this bipartite graph matching problem. Note, if an $\operatorname{Attraction}(p_c, \mathcal{S}^t)$ that represents a matching in the solution is lower than a threshold $\lambda$, we use this corresponding candidate part to start a new skeleton seed. We repeat the process above until all types of candidate parts have been assigned. This part-first grouping algorithm can achieve the optimal solution for assigning local parts to the skeletons although it cannot guarantee the global optimal assignment. We choose the part-first grouping as the default. And we compare both algorithms on the performance, complexity and runtime in Appendix [6.5](#runtime){reference-type="ref" reference="runtime"}.
58
+
59
+ The instance masks are easy to obtain after the detected keypoints have been grouped into skeletons. To produce the instance segmentation results, we sample the visible keypoint locations $\left\{(\hat{x}_m^k,\hat{y}_m^k,\hat{v}_m^k)\right\}_{k=1}^K$ of the $m$-th instance from the supervised self-attention matrix: $\hat{\mathbf{A}}_m=\frac{\sum_{k}\delta(\hat{v}_m^k>0) \cdot \tA[\hat{y}_m^k,\hat{x}_m^k,:,:]}{\sum_{k}\delta(\hat{v}_m^k>0)}$. Then we achieve the estimated instance mask: $\hat{\mathbf{M}}_m=\frac{\hat{\mathbf{A}}_m}{\operatorname{max}(\hat{\mathbf{A}}_m)}>\sigma$, where $\sigma$ is a threshold (0.4 by default) to determine the mask region. When we obtain the initial skeletons and masks for all person instances, the joints of a person may fall in multiple incomplete skeletons, but their corresponding segments (sampled attention areas) may overlap. Thus we further perform non-maximum suppression to merge instances if the Intersection-over-Max (IoM) of two masks exceeds 0.3, where Max denotes the maximum area between two masks.
2201.08265/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-12-15T18:49:38.079Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36" etag="VQH3GN5_QuvPgJlegLAb" version="15.9.1" type="device"><diagram id="vkb3iL8UUjYdL3cQFyYd" name="Page-1">7V1bk9pIlv41FTHzQEXeJT36Nt7d6O7xjnt3e54cKhAUYwrRoLLL8+tXAqVARwlKCeVFuOzpcSEgoT5955rnnLyj755ePm7jzeOv6SxZ3RE0e7mj7+8IIZiF+T/FlR+HK0EQHC4stsvZ4RI+Xvi8/HdSXkTl1eflLNnVXpil6SpbbuoXp+l6nUyz2rV4u02/1182T1f1T93Ei/IT0fHC52m8Shov+7/lLHs8XA35yav/I1kuHuUnY1Q+8xTLF5dL7B7jWfr95LPohzv6bpum2eGnp5d3yaoAT+JyWOhvZ56tvtg2WWc6b/gX/vPNmyT7++b7LKJff5n8EX/814QcVvkWr57LX7j8stkPicA2fV7PkmIRdEfffn9cZsnnTTwtnv2e3/P82mP2tMof4fzH5pcqv+e3ZJslLyeXyi/5MUmfkmz7I3+J5AyPDm8pGYPDconvR/wZL689nmDPQl7e9/KeL6q1j7DkP5TIdECJeoTSBVQICm2iwgZGZb5crd6lq3S7fy+dThM+n+fXd9k2/ZqcPEMFjehsGLZNOKZ1ukWkASyNUBNYyoQhYHkXYLEVoZywqA5TrgObMHEFTJghQzCJEcBEQ+QapkABk1hlhcCl+S+a4yWNrvjzuTBGb/Hxx/ynRfnv/i0PNXzlq4qFJodl3uQvIHjzolqCFf/j77L4+ctT+ahcNv/FHuBH5dcOX1BevubebtMszpbputREs+U2dxYOj5N4V6xf3ORlbvffrJaL4vLTcjYrPqqmltbpOmlopPLiIMoIAdsXsaaW5032EGaIPOFN6HgW1GElUue70/GRj8oLeF6ERa6VlwwDvMaJoaYvZhsn3I5TvkweNSXtGMW7zSGUmi9fClzfLrbxbJkDdiKfQRKLBDUlehYn4XyqkmgxDZOH+TC3ACMCmNr0RojiBhBj+GsEUreEPwH4I9f4a4Rot4Q/B/gL1/hrBIM3hD+EP2jCjxVuGjYVimONkPGG4McBwB+7pr9GLJqsZ2+KhORdFTGcYJ5jsf3xR+lD7x/8s3hwz+XD9y+nT77/IR+9LLM/5Br5z/88+fn4luKBfMdZ+Hfp83aaaPgYWbxdJJmGMk5mMrd65nae3C6VuySvbZNVHrx9q2dkVfew/IRP6XIf2J4xVoyH9SUOv3n5LnKSQYULAdoxBBY6INNYaE+p6te+gmWqUL4Ty0q2IG22VLzENV62sNIGx5hfHBOAGrgnxwQIwULLFNMI+PUodlBdfpKMj1SRRXWS0aAnyQgDCxHLLNPIfmix7NTsHRnXwrKaicUeqDLuF8vCOjlw1JdlFCzE7LKMaOSObsgp1vCJrcYkRCcl1d0nbhNYtQFCF1XDFULONIVcktETIc+1U1g3AX2lfCLAQralnHhjS9rDtSuIRsZpTYj84ldbE+BhW7cmtjOM83nMkcKazOfTWe6vKazJLIge0EWadcmwsxrcXKjqTZq0MZZiIbYzjI7xJwB/4hp/2ylGx/hzgL+y3som/jopxtvBH8IfuYZfJ/d2O/DjAOBPXeN/dWLKWoZ9Fu8eq3KEs/ei1eOTDkd7aOGXxwctVxCAJfTT7WAh+F1Me3xDZalMp9utE074RTgBeEJ7Ek4StVxHILt8ozr5qqFz7zXu2E7ES6M6OhUX1Rknor4qTkAvExRymqbc1Tk6a4l460ou8ItyIWAK7kk5QusLMWFZyxENyt2OI+2bH0110lh+ZOWHknihKfGSmZ5I/ATLqiLp1/QV+YmoLyRsi7xO6s6XFP1QrNPeGPLLzhA8lJ0Rju2MTr6yK+k0o7c65XwjnGe+NMz0sb6+NPSQuGXCXV0D2j9dcKLWvKScZ3VUMOqCTNGmHIgDuW0dN1RBaM2w+mxWpY/WTjnP6kNhHACZom1W4b4szK6aptxQBaIjMav6hPNMx0Fr2DcL33AIQ8uEM1EramcDx7M0OTRW8EZqW73G3qywygg2VJ58NFZPO4XhmdVrGKu+eXKYtLS9NSNjEodbM/kd/Vx+WLrNHtNFuo5XH45X306ft9+aOzkgMjBX7q5vKP2KPyN8jwKMRBBiHApeH98iSKh69tqdbIEta0wyFH37JW8HIq/BjciRKti2+LM/QfX0a86Y+MfJyzbFC3bnvy8P6t+3/JizXwuoffD6/IfDFxhWWK7eIXGl6+u79gaL0fXFxa/k9tjEBeSpBGkRF1iWQGyIi8bmTqcJPd41VmFQF0Tk8JnTPVzRJK3sfBh+8KLRUuxBEGMAMdZATDGqMjSFl9HS6SHwkjM6PcFLI608cpHmQKQVQz/tirROWtUpRUHShygmE9qkqE5W0ClegvuEF9fImY1bpCMwOCNwLNFSw/jLUDhDL3DLUJ0chVO8wKBo13h1Gko+RolmdYmmrt1urhHpOGUomEpH3XqR3PcoRVCv4Bp6nLl38oxxXaBVk/btCrTRns4hEJNfqELMLUW9D1IoPMvBLV5DD/n2TqThtDrXJloYHfk0AGBwSJNbmyN8j1FC6hVcRkMUhXA/7P82hXvOi79K4d7/MRJRMxQ0hdtm65KwPYHHLfzQXaLCNf62J/A4xh/En4y4xt/2BB63+EP4qWv4bQ/gcUx/4NvRwDX+Vzc7jGDGunQxWisrpDL2pLICGishZ5V3n4hQX4gLsJDhUjkxVH+D6aEvNjjmV6UmBscu8aAnx0A7NGOWKTbYXCGPZ6wL3RIx3xRZCEhGe5KMUOC/hHZZFhjpkvBsxrq+KvOsFwccSEh5X5YRsBCyzDKjGSTvnGLffGKZThzBNJcrhFz3uA5JRk+EfILle6o+855SPgEVqNy2lA9VEu/3jHWhOxvMM2tC0FDWhDu2JhZqyV1nVcBpCaqx6jZ3zALfqzRg9T0VzbObLe4BBf7XkhOv8LJQS+56n6Au0oy7FmnvyzRA9T3jbinqfy058Qmv0EItueOdb5AgVA3ttCnRodEoewjA4F6dW4LqRMVuS8mRV3hZKCV3vJcMBNq11x0aLaUYAjAg0G6dyND3IEUgr+CyUEruujaqLtDUtc8del9KDqvJHPuQ3scoFHmFl4VScsdbK0CiXZvoyPdScrgX5dbmRL6HKCHyCi5ViCJWWSF96T4lf8RN/Pmcyicmu+W/86tv8hfkeLwcn8wvzONp/Q2/L5+SXf7Eb8n3/P//kT7F69M3iEX57/5jH7bnr3z87be7YvIVW3y54++yxySL/3L/18Ol8+/PgTn8LvLyNfpqm2ZxtkzXd+W8pOU2Z8jhcRLvivULKiyn8erNarkoLj8tZ7P9sKQByENAVYiq0UUoNngwHPQ2HIFUMdt1BGre/XNrzOOn5erHYZX8qfhps38jpSz/NytZt96zbluyrv4a7z52tylebvGD6yK1eVw2BCqnxeFb/TRSRYVzqVLF5VaIiUodXhdRHDkU0T5fybgcdflSv/7y6SBS872M7TRkbLzCRIO6g8NlwHdag0CsCpMqa+PEx/mUpqsb1J9Uo/1WVXZi8JarMk+OLOww7tBF6tyeEmkElc4ZpZFY2z3Gm+LH+Sp5KWvQ3p6Uo01X8W63nNZxBjmOB5TQpMhkNHIcKAlRGF6C15sqMFBaxPB9SFD1B94i/aKwbusarhGLNDKHr4QoXlFN4KmqRtE9Q1H1R+6qdK5G7basaT5066ZR3vtxlKfKnFFreWrkV98WA80OWPYsdCUeB80OAVjHMNMw6tZSM2aq6VZCR35VQlOwyxWKnjoOOkIhtqvVMNLIp497dhCjdb+iOmjqhCoS9VOqSDU4uKtZDcT0dg+D5a4XGCsrD0Nys42BkdGZLGYgU9QKWIXM+9qfJmSKMNAqZL7X/yggC5zuL2Lke5+CArKwaQCsQuZ7DY8CMunqu4JMIxg/eimoHTbgiUynCZ/PVZ4IFTSiA+E6YQzAKnv3T2ClkcJtpcxU3gsjjbi2kwM4CFAEarkm/6gyQSj9lOGBwkYLdaBrzJNwxlSEDMkDHaoWaoKDOs6q6jOrjeYYa8Qjo0MZJMgodo6y0QjESZg3wUBnUIVnZBllo0ELrJ4Mp8lUifJDyBm/mKfpgDKBKIfOUTYa5zjiMgIoE+co64RGY5+KV9n41nxkpUE9SUg2bAyXdaedd10g9zgGK5lOSWKdkHLco/G6EI16RjQM6EH6Eq2xkRzZJtrVkz5HMCCvso/j02ngtEIW9mUaBQtR20QbatinzzPyuug0v2YxVhvZ1TiivkSD+R14kLtxohkdMeHGFybe+cKk2/78OKfhVY6HhjhHfokzjWDlUG/DAQSa2RZoopMOG/9EvCrtNz7bIYayHdix7ZDMulBGv69hfn8sXsYXekZqHD1XCo3VPR771gn+Loufv3xtNFI8+NqpdGoeS0GsW8by4hBGkVFYn6FoY1YUtRBmzCi2Nwo6oQ//cBe83T1vNuk2uwvev9JJ6WMJ0tgkD/UIZc7LspnXtZQ9p43u8Oa+pmVf1mjJiyOUwZgcSpyjbLRKxk1cRuHkktA5ykYLaxyhDEa4Ueoc5auTWWPYCSLaWVPiV2l6Q/txWePRPfoF3OMErGQ8JOnWc6O3E3Q5YaJOzxvMmuoTTU4H9YZoBNCD9iUaw+Q+ImFECQ5DTOU5SdW+UHD6LEcghjHNQnp1us///UhpOjXUnWf5FxLVWciiviysehzkSsyyuqM6yb6R1b4x7/wXqspz9RFnz1Oq0lhriHTgmUiDESMUNsXpizQwUQT26RkXaQdZMeV8ifv8mfK/19RXLfUVRPewClpVlh/cW02n0qGPqBlEmYPiT9VR1bbL8qnNBJajsnyOnBtNmwksS2lCWDDOnacJqdUElh0uw4Jx7t4BNDoT2o+CcaZomLKM8tW5mzGkCam2k009SxPCgvGgt5MNuSdsO9nMmwSNyXiuA9U8i+dgybjonZGGJePcdoqG6aRoRl8yLi3k+LQaKBkXvVPSoL6GW542hplOlmpsriCuSy+Tw/ydOSns6gOHKyflqPo9rOWl2ul9yTtfBBrW8greW6JBLa9AtkX6/NBv9xVyfz4n+W/3Wh+nWx/HFAOx7NbHsRtMfMH6OI6bCUbLFuIGE1+wcotT5yhf3Yk5hmQB047gmGduNawpCmTi/+qaIhGAlYxb4aF6MT2uKepANN9at8CGrQj7Eu1yTRFnUa2myDIHb7BNExZ6sNC1UeGqtODr3rtPbnXA4N67Kidge++dq5J8zgO19WtoptQ7gEDVLGdnnXDy9KObChk41O6um/D5YKm7Tl6Z5cSdDATaPTnJOl88OQISdwFMt3Woy21JAZp217hOJ2J3qulHp7VQo6XS9AqySQqNL2xgcIex95kzVLTsIBkn2w2m9TCc5MSdxwZXz5jrufFzFk4NDe/XcTsTDESF99bwYKHAdp8ZN5N+1PUlsBVPQlu5e8czUEEY9C4fCVzzTJV77Hh6JAgUrz2NXV74Pd593V0TW+a2IKsLhTLkU4SGcRk+TnNSJ9tLcaXK2tXH/g9hrWAOEiu2+phda6XKF/rBmzfPi6cc60Om4JU+hMijvb0hj2hPg7oiz4f1NJ0t14tX4hSsEAzUGHjAnfZMqKWjtd8v5/PnXV3J/By8iOpFc0LRP2GZFKr8phtSJIttkuTPS0Wy++nYQevmhgXNyBoLxVFNBunR3vJqiR6/pbOCHH9L4ux5m/z03KC4OdLNNjc0Mqmvh2rvoxB4ZhCj9yE+nn7dtyyi06qm42Ghket8pcNBJEHjS3EYOkbVn55DZwnvtKxxPqiysjXT0dUQ9C3ZkB+72+zXq4cv2yr4zbKcL3untNian3+54+/i1eYx/sv9Xxu79aqlfNmwH8DYMOCn4khhbeSY59qhkcY24oUqrXtd5AvqMj4vF09x405fvKX9924GuEkCA3cRKSJM2rxHjBq7R93KMdVn3V9b/HuPELk73WItnNiLFZ35g0/JNtc9ez/vIG/x7rES1bP3qjVRrzt1Xx5P7ouBkofHVJN2gBPZ1yRFYB3jNqhb3/qghNSjUDs1/DoyiqCornRgfKFLDQqcoBCecWaaG4Eq8+GTflec3CxnaDo6uTnQKdPyDDJF05FVyLqVG6k10Chm1MkSsfY9aeRXOMZwXaURHt1H/ZQao61LGVdrGmF4p0hksY1ny5wNpzH4/m8zNp/z4q8qNhf7PwMJOa1bDhpUJeWn4YmiTFi6WQbE3GhnoRHNyBBVwGZVNxqdkGUGNMxcg2Z04JUZ0Ah3DZrRHi0zoFHhGLRQVWvgOWhMZQqsgmZ07LMZ0HjoGrTxhWI0Iq5BG18wRkPkGrRbCcfao6zAr8pfGGUxUrmf10ZZxVKWj5EILURZ83nMEVJEWfPpjISqKGsWRA9oqAo2OR2vmivS7OW1G2OF/sdYsOiPK3pjrKo7/yOsBmSiudVoFTL/46sGZDK8cQWZ/9FVA7KouVdqE7LI/9gKQiZQ0wBYhcz/yKoBmdxHdQWZ/3FVQzBlk7wryPyPqhqQydJQV5DdSkzVvsVV/qbtwVfoWfAlb0nlMaD76PRPfUHtQIwplq0vZToQizQCsdeq0+IVAYpq9xz2e6O+4XgAzyhF2DIHDFSaDj4TilysPXyKs8dpvLoL3v5yF7z/kv/7tZjfO/h59812CED06TTh82KM3JDtDw2hoYJGtDCItSKp4sGnOMs/br2/QvaFfAPYTMhQShSurELypHozYDNbq1lHTdnhBlC/EnZPREUnmGXCDj/tAI7FO/B7NOXXXI6Ar0oDsGJzgygKsImxAuxIIxNjxxs6jlZjJDpx6ovybPl8t9N4AhG0OPb7R7CSu/C+Ppe/fbJ6SL9/OF5Qln73Dw8qrd4aHxCvPMMwYJc8w4DcC3F1d1JExD2PSPUhoFdJBPf4pHdNL4rIORv/OHnZpnjB7oILDOY58PL44KPMHVYc1D0lSJXrc9JDW8yFyZ/9n80szpKfroM2ABXnRDGRAcuMnZUWWoJUOU0n1Pg1yeJXahx1nqKg1DI1dHK3HmXVanZ2nS2zH//YW6V0fWpwrzCuUlbajSvyq2WGRnVnHu6Q61pQDkdZYbCQ4TxLhetrDFDdEtCCSaWQuooACPppkvFVgVN7Nl46m54oBA7mb3PSUyMIBNMEtjWCKvv+c2sEeQuq2erNchbLGqHbdFwzTdldNIlpRyLU1RsVvT3RGxPgAIS0p94AR4YFoW210W1E70Uj1evsXTDnGZkzUkJ75r9nRioE6c1qoHxXsoVwiCayzbYhplJUbAtug26+6TYhgCcNXRnt3ehzLrk1ug0xc+IqD7zj6cUn9jbdZo/pIl3Hq1NbO33efhtgGEpFuXZdKIf9eEpO0dfwQnJy25YXq3LRtxkeSns6erZxMRTbbCeMsCq93Tv06GZEh1Jro6MPh+MRZHloZ8cN0Me6riLD6Sp0T27Fb/MruS34UH4bcuy3YY3K72tb+qJghoLgrlFQYefoJiGnlFT3SjWXwWpTH5E6xtt6exFEjcEpqkIfixX31S0aE2jYcb88wb63jypAI45H9FTT08YEGnU8oodgnYSTZ6AxxyN6CNZJm3gGGnc8oqcqPxkRaDRyPAPk+FuMCLTA8YgeIjEafeKoPcLCvkVYQV3rkMr97FwvEDSXsjs4pdrndl+hfpYhntz3y/16FLZ69O3WyxeCdDLOAY2Yz1KXQtVrIKfGHzVMEZqa6DdoblqjixrwLE81UuDaWSWvmB+COgcK89b6W891fUdCrsn1rp0FIajEouUm91CdBf+5o6vljO7efP/v//r68X8f0ce//y7vmvvi8cZZkO/yj0lesud4Nf7TIE9OYplER4FsuHMKGT3r4U1A4zzhtOHdYawqExug3lzJJW/OeWtw6fd0k9/cRXH3XsmkPuMYjMOLVM0LNsnUrQa5hLnWWVs7CekUwHPdgcUuVx9rLQ1yLSQpF+t0YozSgpo9NYa6NNITDIuP+54EMgHl0DQwdkqMkq0D1DAD1ac6duzcKvP4abn6cVgnfyp+2uzfeqBgWf2M1nuNuS01Zv01zQ/+sFx8qylL/08ru0oBkhB0b8lc4okkCIUkYLklMLgCdFhyrTh2SEcpnSqb81LiStk0TkwMeh6ZSVE9IsAIai3DymbAymd7abSuVu2Smm0/8Iq5ZBrlwJmiPa0aDVoW6k20/OE2LRT+8eW5Qn78NZ0lxSv+Hw==</diagram></mxfile>
2201.08265/paper_text/intro_method.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ In Few-shot learning a model learns to adapt to novel categories from a few labeled samples. Common practices such as using augmentation, regularization, and pretraining may help in such a data-scarce regime, but cannot circumvent the problem. Inspired by human learning [@lake2015human], meta-learning [@hospedales2020meta] leverages a distribution of similar tasks [@garcia2018fewshot] to accumulate transferable knowledge from prior experience which then can serve as a strong inductive bias for fast adaptation to down-stream tasks [@sung2018learning]. In meta-learning, rapid learning occurs within a task whereas the knowledge about changes in task structure is gradually learned across tasks [@huang2020graph]. Examples of such learned knowledge are embedding functions [@vinyals2016matching; @snell2017prototypical; @garcia2018fewshot; @sung2018learning], initial parameters [@finn2017model; @Raghu2020Rapid], optimization strategies [@li2017meta], or models that can directly map training samples to network weights [@garnelo2018conditional; @mishra2018a].
4
+
5
+ A fundamental assumption in meta-learning is that tasks in meta-training and meta-testing phases are sampled from the same distribution, i.e., tasks are i.i.d. However, in many real-world applications, collecting tasks from the same distribution is infeasible. Instead, there are datasets available from the same modality but different domains. In transfer learning, this is referred as heterogeneous transfer learning where the feature/label spaces between the source and target domains are nonequivalent and are generally non-overlapping [@day2017survey]. It is observed that when there is a large shift between source and target domains, meta-learning algorithms are outperformed by pre-training/fine-tuning methods [@chen2018a].
6
+
7
+ A few work in computer vision addresses cross-domain few-shot learning by meta-learning the statistics of normalization layers [@Tseng2020cross; @du2021metanorm]. These methods are limited to natural images that still contain a high degree of visual similarity [@guo2020broader]. Cross-domain learning is more crucial on variable-size order-invariant graph-structured data. Labeling graphs is more challenging compared to other common modalities because they usually represent concepts in specialized domains such as biology where labeling through wet-lab experiments is resource-intensive [@Hu2020Strategies] and labeling them procedurally using domain knowledge is costly [@Sun2020InfoGraph]. Furthermore, nonequivalent and non-overlapping feature spaces is common across graph datasets in addition to shifts on marginal/conditional probability distributions. As an example, one may have access to small molecule datasets where each dataset uses a different set of features to represent the molecules [@day2017survey].
8
+
9
+ To the best of our knowledge, this is the first work pertaining cross-domain few-shot learning on graphs. To address this problem, we design a task-conditioned encoder that learns to attend to different representations of a task. Our contributions are as follows:
10
+
11
+ - We introduce three benchmarks for cross-domain few-shot graph classification and perform exhaustive experiments to evaluate the performance of supervised, contrastive, and meta-learning strategies.
12
+
13
+ - We propose a graph encoder that learns to attend to three congruent views of graphs, one contextual and two topological views, to learn representations of task-specific information for fast adaptation, and task-agnostic information for knowledge transfer.
14
+
15
+ - We show that when coupled with metric-based meta-learning frameworks, the proposed encoder achieves the best average meta-testing classification accuracy across all three benchmarks.
16
+
17
+ # Method
18
+
19
+ A *domain* $\mathcal{D}=\{\mathcal{X}, \mathcal{Y}, P_{\mathcal{X}, \mathcal{Y}}\}$ is defined as a joint distribution $P_{\mathcal{X}, \mathcal{Y}}$ over the feature space $\mathcal{X}$ and label space $\mathcal{Y}$. We denote the marginal distribution over feature space as $P_{\mathcal{X}}$ and a parametric model over joint distribution as $f_\theta: \mathcal{X}\longmapsto \mathcal{Y}$ where $f_\theta(x)=\{P(y_k|x, \theta)\ |\ y_k \in \mathcal{Y}\}$. The model parameters are learned by minimizing the expected error over loss function $\mathcal{L}$: $\mathbb{E}_{(x,y) \sim P_{\mathcal{X}, \mathcal{Y}}}\left[\mathcal{L}(f_\theta(x), y)\right]$. In cross-domain few-shot learning, it is assumed that two domains exist: source domain $\mathcal{D}_S$ and target domain $\mathcal{D}_T$ such that their marginal distributions are different $P_{\mathcal{X}_S} \neq P_{\mathcal{X}_T}$, and $\mathcal{Y}_S$ and $\mathcal{Y}_T$ are disjoint. The source domain is available during meta-training phase whereas the target domain is only seen in meta-testing phase. During meta-training, tasks $\{\mathcal{T}_i | i=1...N \}$ are drawn from a distribution of tasks defined over $\mathcal{D}_S$, i.e., $\mathcal{T}_i \sim P_S(\mathcal{T})$, where each task consists of two non-overlapping small datasets:$D_i^{support}=\{(x_j, y_j)\}_{j=1}^{k\times n}, D_i^{query}=\{(x_j, y_j)\}_{j=1}^{k\times m}$. $k$ denotes the number of sampled classes and $n$ and $m$ are number of examples per category, i.e., *k-way n-shot learning*.
20
+
21
+ In the meta-training phase, the model's error on the support set provides *task-level* update signals, while error on the query set after the model adapts to the support set, provides *meta-level* update signals. During the meta-testing stage, the model is expected to quickly adapt to task $\mathcal{T}_j \sim P_T(\mathcal{T})$ by only accessing the support set for that task. The tasks in the meta-testing phase are sampled from $\mathcal{D}_T$ and $P_S(\mathcal{T}) \neq P_T(\mathcal{T})$. It is noteworthy that learning in cross-domain few-shot setting is more difficult than learning in the transductive setting of traditional meta-learning where $\mathcal{D}_S =\mathcal{D}_T$ and as a result $P_{\mathcal{X}_S} = P_{\mathcal{X}_T}$ and $P_S(\mathcal{T}) = P_T(\mathcal{T})$.
22
+
23
+ In cross-domain few-shot learning, it is assumed that: (1) there is a domain shift between the source and target domains ($P_{\mathcal{X}_S} \neq P_{\mathcal{X}_T}$), and (2) the feature spaces are equivalent between domains ($\mathcal{X}_S = \mathcal{X}_T$). This makes sense in computer vision where various image acquisition methods such as satellite images, dermatology images, and radiology images share a similar feature space with natural images [@guo2020broader]. However, in domains where data is represented as graphs, this assumption does not hold. For example, two molecular property prediction datasets may have different node/edge feature spaces due to the method used to generate the datasets (e.g., one may contain additional atom features such as formal charge and whether the atom is in the ring) [@hu2020open].
24
+
25
+ As such, we go beyond cross-domain few-shot learning and investigate its heterogeneous variant for graph classification. In this setting, we assume that: (1) Each task is possibly sampled from a dedicated domain different from all other tasks, either in meta-training or meta-testing phase, i.e. if there are $N$ tasks in meta-training and $M$ tasks in meta-testing phases, there exist $|\mathcal{D}|\le N+M$ domains. (2) Tasks are heterogeneous, which means they may have nonequivalent non-overlapping feature spaces, i.e., different dimensions in addition to distribution differences and disjoint label spaces: $\forall i \neq j: \mathcal{X}_{\mathcal{T}_i} \neq \mathcal{X}_{\mathcal{T}_j}, P_{\mathcal{X}_{\mathcal{T}_i}} \neq P_{\mathcal{X}_{\mathcal{T}_j}}, \mathcal{Y}_{\mathcal{T}_i} \neq \mathcal{Y}_{\mathcal{T}_j}$. (3) Tasks can be grouped based on their *meta-domains* which essentially defines the conceptual domain of a task. For example, all datasets that represent social networks can be grouped under a *social network* meta-domain despite the fact that they may have different feature and label spaces. We investigate whether there exists underlying knowledge that can be transferred across these meta-domains.
26
+
27
+ <figure id="figarch" data-latex-placement="ht">
28
+ <div class="center">
29
+ <img src="arch" style="width:175mm" />
30
+ </div>
31
+ <figcaption>The proposed model for cross-domain few-shot graph classification. Graphs from sampled tasks are augmented to one contextual view and two geometric views and fed to three dedicated encoders resulting in three representations of the same graph. An attention mechanism is then used to aggregate the representations into a single graph representation. The parameters of the encoders and the attention mechanism are learned end-to-end using an arbitrary metric-based meta-learning approach.</figcaption>
32
+ </figure>
33
+
34
+ Graph-structured data can be analyzed from two congruent views: a contextual view and a topological view. The contextual view is based on initial node or edge features (for simplicity and without loss of generality, we only consider node features) and carries task-specific information. The topological view, on the other hand, represents topological properties of a graph which are task-agnostic and hence can be used as an anchor to align graphs from various domains in the feature space. We exploit this dual representation and explicitly disentangle them by designing dedicated encoders for each view which in return imposes the needed inductive bias to learn task-specific domain-invariant features. In a heterogeneous few-shot setting, the topological features can help with knowledge transfer across tasks whereas the contextual features can help with fast adaptation. We also use an attention mechanism that is implicitly conditioned on the tasks and learns to aggregate the learned features from the two views. We use a meta-learning strategy that simulates the generalization process by jointly learning the parameters of the encoders and the attention mechanism. As shown in Figure 1, our method consists of the following components:
35
+
36
+ - An augmentation mechanism that transforms a sampled graph into one contextual view and two topological views. The augmentations are applied to the initial node feature and graph structure.
37
+
38
+ - An encoder consisting of two dedicated GNNs, i.e., graph encoders, and an MLP for the contextual and topological views, respectively, and an attention mechanism to aggregate the learned features.
39
+
40
+ - A meta-learning mechanism to jointly learn the parameters of the dedicated encoders and attention model based on error signals from the query set.
41
+
42
+ Recent works on self-supervised learning on graphs suggest that contrasting graph augmentations allows encoders to learn rich node/graph representations [@pmlr-v119-hassani20a]. In this work, we are specifically interested in task-specific and domain-agnostic views of graphs to help the meta-learner to gradually accumulate domain-agnostic knowledge while utilizing task-specific information for fast adaptation. We use both feature-space and structure-space augmentations as follows. For the contextual view, we considered three feature-space augmentations on the initial node features including: (1) Heterogeneous feature augmentation [@duan2012learning] where the initial feature and its projection by a linear layer are concatenated and padded to a predefined dimension, (2) Deep set [@zaheer2017deep] approach in which we considered the initial node feature space as a set, projecting each dimension independently to a new space using a linear layer, and aggregating them by a permutation-invariant function. This augmentation can capture the shared information across tasks with overlapping features when the alignment among the features is not available. (3) Simple padding of the features to a predetermined dimension. Surprisingly, we observed that the simplest augmentation achieves better results. We speculate this is because the tasks are not sharing overlapping features.
43
+
44
+ For the topological view, we apply one feature-space and one structure-space augmentation. In the feature-space augmentation, we replace the task-dependent node features with sinusoidal node degree encodings which allow the model to extrapolate to node degrees greater than the ones encountered during meta-training stage [@vaswani2017attention]. Because node degrees are universal properties of graph nodes, encoding a graph with such initial features will capture task-agnostic geometric structure of the graph. We also use graph sub-sampling to keep the degree distribution in a similar order of magnitude across domains. For the structure-space augmentation, we compute graph diffusion to provide a global view of the graph's structure. We used Personalized PageRank (PPR) [@page_1999_stanford], a specific instantiation of the generalized graph diffusion. We compute the eigenvalues of the diffusion matrix, sort them in a descending order, and select the top-k eigenvalues as the structural representation. We also experimented heat kernel diffusion, eigen values of normalized graph Laplacian, and shortest path matrix, and found that diffusion produced better results.
45
+
46
+ Assume a support set $D_i^{sup}=[g_1, g_2,...,g_N]$ of $N$ graphs belonging to a randomly sampled task $i$. Augmenting each graph $g$ produces three views: a contextual view represented as graph $g_{c}=(\textbf{A}, \textbf{X})$ where $\textbf{A}\in\lbrace 0, 1 \rbrace^ {n\times n}$ and $\textbf{X}\in \mathbb{R}^{n \times d_{x}}$ denote the adjacency matrix and the task-specific node features, a topological view represented as graph $g_{g}=(\textbf{A}, \textbf{U})$ where $\textbf{U}\in \mathbb{R}^{n \times d_{u}}$ denotes sinusoidal node degree encodings, and another topological view represented as vector $\textbf{z}\in \mathbb{R}^{d_{z}}$ denoting the sorted eigenvalues of the corresponding diffusion matrix $\textbf{S}\in \mathbb{R}^ {n\times n}$. Our framework allows various choices of network architecture without any constraints. For encoding graph-structured views, we opted for expressive power and adopted graph isomorphism network (GIN) [@xu_2019_iclr]. The $k^{th}$ layer of our graph encoder consists of a GIN layer followed by a feature-wise transformation layer (FWT) [@Tseng2020cross] and a swish activation. FWT layer simulates various feature distributions under different domains: $h_v^{(k)}=\gamma^{(k)} \times h_v^{(k)} + \beta^{(k)}$ where $\gamma \sim \mathcal{N}\left(1, \text{softplus} (\theta_\gamma)\right)$ and $\beta \sim \mathcal{N}\left(0, \text{softplus} (\theta_\beta)\right)$. $\theta_\gamma, \theta_\beta$ are the standard deviations of the Gaussian distributions for sampling the affine transformation parameters.
47
+
48
+ We use a dedicated graph encoder for each view: $g_\theta(.): \mathbb{R}^{n \times d_x} \times \mathbb{R}^{n \times n} \longmapsto \mathbb{R}^{n \times d_h}$ and $g_\phi(.): \mathbb{R}^{n \times d_u} \times \mathbb{R}^{n \times n} \longmapsto \mathbb{R}^{n \times d_h}$ resulting in two sets of node representations $\mathbf{H}^x$, $\mathbf{H}^u \in \mathbb{R}^{n \times d_h}$ corresponding to the contextual and the topological views of the sampled graph. For each view, we aggregate the node representations into a graph representation using a pooling (readout) function$\mathcal{R}(.) : \mathbb{R}^{n \times d_h} \longmapsto \mathbb{R}^{d_h}$. We experimented with global soft attention pooling [@li2015gated], jumping knowledge network [@Xu_2018_icml], and summation and mean pooling layers, and found that they produce similar results. Therefore, we opted for simplicity and used a simple mean pooling layer. This results in two graph representations: $\textbf{h}^x, \textbf{h}^u \in \mathbb{R}^{d_h}$. We also feed the topological view from the eigenvalues of the graph diffusion into a projection head $f_\psi(.): \mathbb{R}^{d_z}\longmapsto \mathbb{R}^{d_h}$, modeled as an MLP resulting in the third representation: $\textbf{h}^z \in \mathbb{R}^{d_h}$.
49
+
50
+ To aggregate the learned representations, we feed the concatenation of the learned representations into an attention module $f_\omega(.): \mathbb{R}^{3 \times d_h}\longmapsto \mathbb{R}^{3}$ that generates attention scores for each representation. The attention module is modeled as a single-layer MLP followed by a softmax function:
51
+
52
+ $$\begin{equation}
53
+ \label{eq:att}
54
+ \alpha= \text{Softmax}\bigg(\text{ReLU}\bigg(\bigg[ \textbf{h}^x \parallel \textbf{h}^u \parallel \textbf{h}^z \bigg]\mathbf{W_1}\bigg)\mathbf{W_2}\bigg)
55
+ \end{equation}$$
56
+
57
+ where $\mathbf{W_1}\in \mathbb{R}^{(3 \times h_d) \times h_d}$ and $\mathbf{W_2}\in \mathbb{R}^{h_d \times 3}$ are network parameters. The attention scores are then used to aggregate the learned features into a final graph representation.
58
+
59
+ The attention mechanism gates the representations and decides if the model should rely more on contextual or topological representations. If the samples are from a task that is similar to seen tasks, the model will pay more attention to contextual representation whereas if there is a drastic shift in feature space, the model will rely more on geometric representations. We assume that the target domain is not available during training and hence there is no information in advance about whether there are shared features among tasks. If there is, the attention module will pass the shared contextual information through, otherwise it will not attend to the contextual features and will let the learner to learn them from scratch during the meta-test adaptation phase. Hence, rather than naively throwing the information away and learning from scratch, we let the model decide if it can use the information. It is noteworthy that we are not introducing a new meta-learning framework. Instead we are introducing an encoder with attention module that can seamlessly be integrated into any meta-learning framework. As an example, we show the training procedure of the encoder within a mini-batch of tasks using prototypical approach [@snell2017prototypical] in Algorithm [\[algo\]](#algo){reference-type="ref" reference="algo"}. Depending on the meta-learner, the aggregated representation can be then fed into a linear classifier or a non-parametric classifier such as a prototypical classifier.
60
+
61
+ ::: algorithm
62
+ $[\textbf{H}^s, \textbf{H}^q, \mathcal{Y}^s, \mathcal{Y}^q] \gets \emptyset$\
63
+ $\textbf{h}_x \gets \mathcal{R}\left(g_\theta\left(\textbf{A}_g, \textbf{X}_g \right) \right)$\
64
+ $\textbf{h}_u \gets \mathcal{R}\left(g_\phi\left(\textbf{A}_g, \textbf{U}_g \right) \right)$\
65
+ $\textbf{h}_z \gets f_\psi\left( \mathcal{E}\left(\textbf{S}_g \right) \right)$\
66
+ $\alpha \gets f_\omega \left( \left[ \textbf{h}_x \parallel \textbf{h}_u \parallel \textbf{h}_z \right] \right)$\
67
+ $\textbf{h} \gets \alpha_0 \textbf{h}_x + \alpha_1 \textbf{h}_u + \alpha_2 \textbf{h}_z$\
68
+ $\textbf{H}^q \gets \textbf{H}^q \parallel\textbf{h}$ , $\mathcal{Y}^q \gets \mathcal{Y}^q \parallel y$ $\textbf{C}_\mathcal{T} \gets \mathcal{P}\left( \mathbf{H}^s, \mathcal{Y}^s \right)$ $\textbf{L}_\mathcal{T} \gets \textbf{L}_\mathcal{T} + \mathcal{L}\left(\textbf{C}_\mathcal{T}, \mathbf{H}^q, \mathcal{Y}^q \right)$ $\left[\theta, \omega, \phi, \psi \right] \gets \left[\theta, \omega, \phi, \psi \right] - \gamma\nabla_{\left[\theta, \omega, \phi, \psi \right]} \frac{1}{N \times |D^q|} \sum\limits_{j=1}^N{ \left[ \textbf{L}_{\mathcal{T}_j}\right]}$
69
+ :::
70
+
71
+ :::::: table*
72
+ ::::: center
73
+ :::: small
74
+ ::: sc
75
+ +------------------------------+--------------------------+----------------------------------------------------------------------+----------------+-----------------+-----------------+
76
+ | **Meta-Domain** | **$|$Task$|$** | **Avg. on target** | **$|$shot$|$** | **$|$class$|$** | **$|$query$|$** |
77
+ +:==========:+:===============:+:======:+:======:+:======:+:===============:+:================================:+:===============:+:==============:+:===============:+:===============:+
78
+ | 1-8 Source | Target | Train | Dev | Test | Node | Edge | Feature | | | |
79
+ +------------+-----------------+--------+--------+--------+-----------------+----------------------------------+-----------------+----------------+-----------------+-----------------+
80
+ | Molecules | Molecules | 169 | 5 | 18 | 26.6 $\pm$ 15.7 | 28.6$\pm$`<!-- -->`{=html}16.6 | 18.1 $\pm$ 18.7 | 1,5,10,20 | 2 | 50 |
81
+ +------------+-----------------+--------+--------+--------+-----------------+----------------------------------+-----------------+----------------+-----------------+-----------------+
82
+ | Molecules | Bioinformatics | 187 | 5 | 24 | 79.2 $\pm$ 58.5 | 406.6$\pm$`<!-- -->`{=html}300.3 | 19.8 $\pm$ 15.1 | 1,5,10,20 | 2 | 50 |
83
+ +------------+-----------------+--------+--------+--------+-----------------+----------------------------------+-----------------+----------------+-----------------+-----------------+
84
+ | Molecules | Social Networks | 187 | 5 | 12 | 54.1 $\pm$ 58.8 | 98.1$\pm$`<!-- -->`{=html}117.9 | 0 | 1,5,10,20 | 2 | 50 |
85
+ +------------+-----------------+--------+--------+--------+-----------------+----------------------------------+-----------------+----------------+-----------------+-----------------+
86
+ :::
87
+ ::::
88
+ :::::
89
+ ::::::
90
+
91
+ :::::: table*
92
+ ::::: center
93
+ :::: footnotesize
94
+ ::: sc
95
+ +--------------------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
96
+ | **method** | **1-shot** | **5-shot** | **10-shot** | **20-shot** | |
97
+ +:========:+:=========================:+:====================:+:====================:+:====================:+:===============================:+:=:+
98
+ | | Empirical Upper Bound | **66.78 $\pm$ 10.30** | |
99
+ +----------+---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
100
+ | ::: turn | GCN | 54.88 $\pm$ 7.55 | 55.05 $\pm$ 8.85 | 55.03 $\pm$ 8.91 | 54.99 $\pm$ 8.82 | |
101
+ | 90sup | | | | | | |
102
+ | ::: | | | | | | |
103
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
104
+ | | GAT | 54.75 $\pm$ 8.85 | 54.69 $\pm$ 8.90 | 54.76 $\pm$ 8.97 | 54.63 $\pm$ 8.94 | |
105
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
106
+ | | GIN | 55.37 $\pm$ 9.83 | 55.52 $\pm$ 9.79 | 55.47 $\pm$ 9.89 | 55.52 $\pm$ 9.65 | |
107
+ +----------+---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
108
+ | ::: turn | InfoGraph | 54.00 $\pm$ 6.65 | 53.67 $\pm$ 7.35 | 54.42 $\pm$ 6.41 | 54.96 $\pm$ 7.63 | |
109
+ | 90uns | | | | | | |
110
+ | ::: | | | | | | |
111
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
112
+ | | MVGRL | 57.12 $\pm$ 7.75 | 57.25 $\pm$ 9.04 | 57.17 $\pm$ 8.01 | 57.54 $\pm$ 8.06 | |
113
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
114
+ | | GSFE | 52.84 $\pm$ 6.71 | 52.96 $\pm$ 7.82 | 53.06 $\pm$ 7.64 | 53.16 $\pm$ 7.87 | |
115
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
116
+ | | GCC | 53.11 $\pm$ 6.51 | 53.17 $\pm$ 6.43 | 53.18 $\pm$ 7.18 | 53.35 $\pm$ 7.64 | |
117
+ +----------+---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
118
+ | ::: turn | MatchNet | 54.83 $\pm$ 7.66 | 55.62 $\pm$ 7.60 | 55.92 $\pm$ 6.67 | 56.04 $\pm$ 7.78 | |
119
+ | 90meta | | | | | | |
120
+ | ::: | | | | | | |
121
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
122
+ | | ProtoNet | 54.71 $\pm$ 8.86 | 55.75 $\pm$ 7.84 | 55.96 $\pm$ 6.73 | 55.50 $\pm$ 9.65 | |
123
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
124
+ | | RelationNet | 54.93 $\pm$ 8.55 | 55.92 $\pm$ 8.69 | 56.02 $\pm$ 7.69 | 56.15 $\pm$ 7.81 | |
125
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
126
+ | | MAML | 53.83 $\pm$ 9.62 | 54.46 $\pm$ 6.77 | 54.50 $\pm$ 8.77 | 54.79 $\pm$ 8.90 | |
127
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
128
+ | | MetaSGD | 53.83 $\pm$ 8.79 | 54.21 $\pm$ 7.70 | 54.67 $\pm$ 9.72 | 54.71 $\pm$ 7.90 | |
129
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
130
+ | | MetaSpecGraph | $-$ | 55.47 $\pm$ 7.79 | 55.82 $\pm$ 8.91 | 55.97 $\pm$ 8.89 | |
131
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
132
+ | | MatchNet + Our Encoder | **59.14 $\pm$ 7.00** | **59.19 $\pm$ 9.77** | 59.22 $\pm$ 8.72 | 59.56 $\pm$ 6.97 | |
133
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
134
+ | | ProtoNet + Our Encoder | 57.17 $\pm$ 7.76 | 57.58 $\pm$ 8.89 | 57.79 $\pm$ 8.76 | 58.17 $\pm$ 7.88 | |
135
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
136
+ | | RelationNet + Our Encoder | 58.83 $\pm$ 8.03 | 58.83 $\pm$ 9.68 | **59.29 $\pm$ 7.87** | **59.82 $\pm$ 7.93** | |
137
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
138
+ | | MAML + Our Encoder | 56.00 $\pm$ 8.74 | 56.21 $\pm$ 7.76 | 56.37 $\pm$ 8.81 | 57.04 $\pm$ 7.85 | |
139
+ | +---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
140
+ | | MetaSGD + Our Encoder | 55.08 $\pm$ 8.67 | 56.12 $\pm$ 8.23 | 57.08 $\pm$ 8.86 | 57.33$\pm$`<!-- -->`{=html}8.86 | |
141
+ +----------+---------------------------+----------------------+----------------------+----------------------+---------------------------------+---+
142
+ :::
143
+ ::::
144
+ :::::
145
+ ::::::
146
+
147
+ :::::: table*
148
+ ::::: center
149
+ :::: footnotesize
150
+ ::: sc
151
+ +--------------------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
152
+ | **method** | **1-shot** | **5-shot** | **10-shot** | **20-shot** | |
153
+ +:========:+:=========================:+:=====================:+:====================:+:====================:+:=====================:+:=:+
154
+ | | Empirical Upper Bound | **72.35 $\pm$ 12.38** | |
155
+ +----------+---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
156
+ | ::: turn | GCN | 60.51 $\pm$ 10.54 | 60.54 $\pm$ 10.18 | 60.33 $\pm$ 10.19 | 60.35 $\pm$ 10.52 | |
157
+ | 90sup | | | | | | |
158
+ | ::: | | | | | | |
159
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
160
+ | | GAT | 61.32 $\pm$ 10.31 | 61.37 $\pm$ 10.20 | 61.17 $\pm$ 10.17 | 61.51 $\pm$ 10.13 | |
161
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
162
+ | | GIN | 62.11 $\pm$ 10.11 | 62.98 $\pm$ 10.04 | 63.27 $\pm$ 10.05 | 63.24 $\pm$ 9.28 | |
163
+ +----------+---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
164
+ | ::: turn | InfoGraph | 61.92 $\pm$ 9.84 | 62.25 $\pm$ 7.12 | 62.58 $\pm$ 8.57 | 62.58 $\pm$ 7.32 | |
165
+ | 90uns | | | | | | |
166
+ | ::: | | | | | | |
167
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
168
+ | | MVGRL | 63.00 $\pm$ 10.70 | 63.75 $\pm$ 11.17 | 63.25 $\pm$ 11.69 | 63.75 $\pm$ 11.99 | |
169
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
170
+ | | GSFE | 60.38 $\pm$ 9.74 | 60.45 $\pm$ 9.62 | 60.46 $\pm$ 9.95 | 60.55 $\pm$ 9.11 | |
171
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
172
+ | | GCC | 60.61 $\pm$ 9.55 | 60.73 $\pm$ 9.74 | 60.81 $\pm$ 9.61 | 60.98 $\pm$ 9.97 | |
173
+ +----------+---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
174
+ | ::: turn | MatchNet | 62.25 $\pm$ 9.14 | 62.42 $\pm$ 9.98 | 62.92 $\pm$ 9.22 | 63.33 $\pm$ 9.92 | |
175
+ | 90meta | | | | | | |
176
+ | ::: | | | | | | |
177
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
178
+ | | ProtoNet | 60.50 $\pm$ 9.05 | 61.25 $\pm$ 10.01 | 61.75 $\pm$ 9.06 | 63.50 $\pm$ 9.97 | |
179
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
180
+ | | RelationNet | 61.25 $\pm$ 10.04 | 61.08 $\pm$ 10.25 | 61.83 $\pm$ 10.17 | 62.00 $\pm$ 10.17 | |
181
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
182
+ | | MAML | 58.33 $\pm$ 9.05 | 58.75 $\pm$ 10.84 | 59.00 $\pm$ 9.87 | 60.17 $\pm$ 10.99 | |
183
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
184
+ | | MetaSGD | 59.25 $\pm$ 10.15 | 59.33 $\pm$ 9.50 | 59.83 $\pm$ 9.89 | 60.25 $\pm$ 9.88 | |
185
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
186
+ | | MetaSpecGraph | $-$ | 62.55 $\pm$ 9.79 | 62.74 $\pm$ 9.91 | 63.73 $\pm$ 9.89 | |
187
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
188
+ | | MatchNet + Our Encoder | **67.40 $\pm$ 10.37** | **67.71 $\pm$ 9.24** | **68.34 $\pm$ 9.09** | 68.90 $\pm$ 9.18 | |
189
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
190
+ | | ProtoNet + Our Encoder | 66.42 $\pm$ 10.21 | 66.92 $\pm$ 10.55 | 67.00 $\pm$ 9.13 | 67.50 $\pm$ 10.46 | |
191
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
192
+ | | RelationNet + Our Encoder | 66.75 $\pm$ 9.89 | 67.08 $\pm$ 10.80 | 67.33 $\pm$ 10.33 | **69.67 $\pm$ 10.05** | |
193
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
194
+ | | MAML + Our Encoder | 62.25 $\pm$ 10.12 | 64.75 $\pm$ 10.79 | 66.58 $\pm$ 10.93 | 66.83 $\pm$ 10.21 | |
195
+ | +---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
196
+ | | MetaSGD + Our Encoder | 61.92$\pm$ 9.56 | 63.17 $\pm$ 10.51 | 64.42 $\pm$ 10.16 | 64.83 $\pm$ 10.06 | |
197
+ +----------+---------------------------+-----------------------+----------------------+----------------------+-----------------------+---+
198
+ :::
199
+ ::::
200
+ :::::
201
+ ::::::
202
+
203
+ We also introduce three new few-shot graph classification benchmarks with fixed meta-- train/val/test splits constructed from publicly available graph datasets. In all benchmarks, the source meta-domain consists of molecule classification tasks. We made this decision because most of the available graph classification datasets are molecule datasets and hence using them as the source meta-domain can provide sufficient tasks during meta-training. The target meta-domains are molecule, bioinformatics, and social networks. Note that although both source and target meta-domains in the first benchmark pertain to molecule, the tasks differ in both feature and class spaces. The process of creating these benchmark was as follows.
204
+
205
+ We collected all the datasets from TUDataset [@morris2020tudata] and OGB [@hu2020open]. We kept graphs with maximum node degree of 50, maximum node feature dimension of 100, and graphs with a minimum of two nodes and one edge and ignored the rest. We also filtered out graphs with disconnected components. For graphs with more than 500 nodes, we sorted the nodes by their harmonic centrality [@boldi2014axioms] in a descending order and used the sub-graphs containing the top 500 nodes. For multi-task datatsets, we drew samples from each task without replacement and split them into several single-task datasets without sharing any data samples. Because the majority of datasets in TUDataset and OGB are binary classification tasks, we opted for a "k-shot 2-way" setting and split the few remaining multi-class datasets into binary datasets by sampling without replacement. We then randomly selected 20 and 50 samples per class as support and query sets, respectively.
206
+
207
+ For the second and third benchmarks, we used the processed datasets from bioinformatics and social network categories as the meta-testing tasks. For the first benchmark, we split the tasks such that if a task is originated from a multi-task or a multi-class dataset, it is allocated to a split with all of the tasks that also originated from the same dataset. The statistics of the proposed benchmarks are shown in Table [\[table:stat\]](#table:stat){reference-type="ref" reference="table:stat"}. We believe these benchmarks will help the community to drive further advances in heterogeneous meta-learning.
2202.12823/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-10-05T04:55:34.202Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36" etag="H68fLi2iZogxZ1WuGHtK" version="15.3.7" type="device" pages="2"><diagram id="0v0yH8gOQTXzF0Ik4jON" name="Page-1">3Zhbb9owGIZ/TS47EedAuGxpu7YrVTVW7XAzmdhJrDo2cpwS+uvnNM4JB8EQVN1AQvHr82O/9kcsZ5oWnwVcJjOOMLXACBWWc2kBMAGu+i2FdSW4k3ElxIKgSrJbYU5esRZHWs0JwlmvoOScSrLsiyFnDIeyp0Eh+KpfLOK03+sSxtgQ5iGkpvqdIJlUagDGrX6DSZzUPdv+pMpJYV1YzyRLIOKrjuRcWc5UcC6rp7SYYlqyq7lU9a635DYDE5jJfSo83hfj1Bvhpxv+8PvOy1+TRXqmW3mBNNcT1oOV65qA4DlDuGxkZDkXq4RIPF/CsMxdqSVXWiJTqlK2esyk4M94yikXSmGcqWIXug8sJC62Dt5ukKithHmKpVirIrqCoyGu2jVotkjS4Q8mWoR63eOmqRaNetB0hkkFZ1+yu2f71ypwn9gTuWb+7de/I2XvJhURSmtOFnAir/waBFWO//Ypa3AmO3r1afAaLAeIb8VbU9MmDUzaNhigbTunog0M2lPOXjjNJeHsqOARxEEUDoIPA7yIjgPYtvuEbc9EHAwQdk8F2DEAz2DxqE5WwuLjbuwoAuEgX+QvfM8/Ed/RfnzBqfi6Bt+HwuCqJif3OD+7RLUEKYmZSoYKGVb6RYmKqGvrXGekBKGym8HV6h/pR8DvbtAfOK7d96Tv7T6sMUPnZXxQQqQwy0jYXwkFQ6x/lIg+eXXyZzfvstD8qtR6F00JRYzlbkti1ItITOYdqN4A1FoTmEJJXvpxzBBp3cMjJ2rEraO8LSdW3UTGcxFiXasbd2w0BEZbrFk3VIExGnpb+Gbah+8F39gLt2yZy+z/dWNjP00cDFw272rH4F+0I/jQdgSH2tFoaHPVT2zHibEX7uffZkcNOZCHA+QOhRwBWDj+kUKOjZgZ7Bkzn8xk9h7/UD6eyyYf2mXGXXXopbfp1oNNppLtK4OqePvexbn6Aw==</diagram><diagram id="5Dkw892MHKhOnYwywbi5" name="Page-2">7Zxtc6I6FIB/jR/tQMLrR2u7287d7vZe7451v+wgRKUXwWKoL7/+hgVUkqhoIVDb7syOHCHKc87JeUmwBbvT5dfQmk0eAgd5LSA5yxa8aQFgAoX8HwtWiUAx9UQwDl0nEclbQc9do1QopdLIddA8dyIOAg+7s7zQDnwf2Tgns8IwWORPGwVe/lNn1hgxgp5teay07zp4kkgNoG/ld8gdT7JPljUzeWdqZSendzKfWE6w2BHB2xbshkGAk1fTZRd5MbuMiymvbl9BR1c7Ydto9zw3/P61nQz25ZRLNrcQIh+fPXT7ZfC8nmC1/zxs/xqYz+sITNNLpFfLi1Je6b3iVQYwDCLfQfEgUgteLyYuRr2ZZcfvLojFENkETz1yJJOXcxwG/6Fu4AUhkfiBT067Tj8DhRgtKaUcuSN5g5mYJwqmCIcrcl06ipJ++8VWr6qeyiY7OoVGKrRSWxpvhtryIi9SZHx8UbT+0VfuI0WSpvNFx5P1p1VbPwWffBzfyPW8DF4LwJEa/2Owkne0P3/xFYGPd+TJ34Z5McAc3exlLkspy3Q6kHVWCTLgKEGGVSnBYJTQDfzXwIuwG/il6sOxkDGyufqwDTQcVckd5LnzjN/gYFeqom4y1B+s5SOZ2V1/XK4TjEbA5kJ3tKGmagKhy2Yx6KAq6DI7X39fMrTJ3eEC0/Iu51Rkee7YJ4c2AYmI/Dpm5ZJg2knfmLqOE38MV4f5SFGVUlTKEQCrE0WoTmRGJ4xGkO904lwmRutZ87lr5/VDEIWrpxjclZodDnbfu1mmVJOj1VmMsRWOET7u08jJpVSsJnYDLgd1JguRZ2H3NZ+I8finn/AYuOQ2tt6n7ZnysiHmQRTaKL1qN/OhBtrkorQbZwMlYJiB/pjD5rbfYCFshLr0NAHojUsTZDZiXWCeAMyG5QmADVmXlyjQ1GtPFECBoHRhU46iNG7KyaBf9pSj7IvTtU058ANMOTT1+qcchaH+4WoTAClXqLs4AeqnUqAJr9SGqUVj1HLvzyI8/2iqoSozqNatmAId3aqKebR08c5l5Giw8872ovjgvAZAUjAXsMujnQK5Wa0CSOXfkE7pirYK6CQS0EsHFbcKQIFWwTtpJmXfvCkmQoVmSM8hZ5tIwW4S0Zm12jltFp8w3/+FVYn/hbcWl4xYqv1lkHbs7+esZ01n5afOho34qfPQUBW10iyNbk5JbNThVoqVhR14OT1k49Pr3+L1dNNbjNezfQph5teArCdLZo6HNKlRxi1T89hm3FONmxmI9pKKsx7INmwuL+ow/cn6ow7bsXmvUSdr+TXEM+nc7eylS5Va7Ra9dAnZ/lE38G0Ll+qVSHZUpPO80tR0aFW6xYPSFK8BoYl0SoVtDX2kWJzla0c9PrPMhng83YEAWlmJpizW45UyOmANsKNmBYTSzIOOLLpY69DZTI2xjjEJBbM3zteb/eDWMBtWOjiPM8th2fxQ13JYto28tK3VLNHDGjoeAutqsmc222A0+3eYCyVlFCBVi7sxG15qdzc2UX2bTREg2HL9eOVsT3w6rLHi3UjWxoRWgVqBKrBh5GpDVSA3bxiq/ROZWHInPSnTJHKaOHKrX+GN83PZV00pcL8MvRfYeSq7NysDI1cRXkmbCnFfURgfPaLQJfezwd2ADL9o07ZhhaKsmlTkLKtQpAcqrxTgG2aBNatachNZogmzucmm7SPEh097AKCU2e+wyk4KsbJQVGWXTdWjWuYx1UWOs9H+nZDjZMBiyZ20Wb5J5GDd5AokJ80kx0mIxZIr0EhrJjnOColYcuLr1ZLIcUoJseTE90hKIsd7lEcoOfGFf0nkjLrJiS/8SyLHeZ6mKnLh4J9v3b9+GNFT7+Zu/eLfWV3eT7N86/37cADeGQ+NqchwFN66uwGGUKv0iUlmDyZLu6o+Sz9cONHf7sP09+++swpXg+getwvkgM1bdqd6GmU+CVB4HX5PMS6mm8I87kzv1y/aTYHiuilc8yuQSH8o84NFzQ9+mt9p5scNNu9y9itvs4jKWtuhmNyQVvI72HNEDrc/kJecvv2VQXj7Pw==</diagram></mxfile>
2202.12823/main_diagram/main_diagram.pdf ADDED
Binary file (10.6 kB). View file
 
2202.12823/paper_text/intro_method.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The success of deep generative models is rapidly spreading over the entire fields of industry and academia. In today's game developments, deep generative models are starting to help us create various assets including graphics, sounds, character motions, conversations, landscapes, and level designs. For instance, the Game Developers Conference 2021[^1] held a special session named "Machine Learning Summit" to present various deep generative models used in game products, such as for generating character motions that match the content of conversations [@YuDing2021] and for generating 3D facial expression models of characters from human face pictures [@PeiLi2021].
4
+
5
+ <figure id="fig:game_screen" data-latex-placement="tb">
6
+ <img src="figure_1.png" />
7
+ <figcaption>Love Live! School Idol Festival All Stars. </figcaption>
8
+ </figure>
9
+
10
+ Although deep generative models for rhythm actions have been studied for a while -- notably by @Donahue:2017, they have been focusing on proof of concept or personal hobby use, not yet being used in cutting-edge commercial products. There thus remain questions: what is the blocker to leverage chart generation models in the game business, and how should we overcome it?
11
+
12
+ The present article reveals a key remaining problem, which is musical structure recognition. Indeed, we considered features such as beats and temporal scales with our model (see [2.1](#sec:musicalStructure){reference-type="ref+label" reference="sec:musicalStructure"} for more on musical structures). While existing models have not had components dedicated for these concepts, our preset model has them, which process beats and multiple temporal scales. Those correspond to our *beat guide* and *multi-scale conv-stack*, respectively.
13
+
14
+ The beat guide is an extraordinary technique in the sense that it can be computed for any input audio and straightforwardly. Somewhat surprisingly, it had been overlooked by existing works. The multi-scale conv-stack is incorporated in order to capture musical features of different time scales, like repeats and notes of various lengths.
15
+
16
+ We have included a thorough evaluation of this improved model, employing a supercomputer and user feedback from our application in the gaming industry operation with a business company. The benchmarks show that our improved model outperformed the state-of-the-art model known as the Dance Dance Convolution; DDC [@Donahue:2017]. From the business feedback ([6](#sec:feedback){reference-type="ref+label" reference="sec:feedback"}), we learned that the model is capable of replacing the manual labor of generating the first drafts (this is practical, since game artists have relied on manager-level decisions to fine-tune their first drafts anyway). Our model, named *GenéLive!*, reducing the business cost by half, will stay in business operation for the foreseeable future.
17
+
18
+ <figure id="fig:chart_example" data-latex-placement="tb">
19
+ <img src="figure_2.png" />
20
+ <img src="figure_3.png" />
21
+ <figcaption>Data processing in the generative model. The present article focuses on the onset model.</figcaption>
22
+ </figure>
23
+
24
+ To conduct the application study, we worked with the game artists team in KLab Inc., a Japan-based video game developer (which employs also some of the authors). The company has by today operated three rhythm action game titles online. A particularly successful title, "Love Live! School Idol Festival All Stars," or simply "Love Live! All Stars" ([1](#fig:game_screen){reference-type="ref+label" reference="fig:game_screen"}), has been released in 6 languages and been played worldwide while under KLab's operation, acquiring more than 10 million users. Today, we see a wide range of competitive games with comparable impacts, which makes this work relevant to a larger audience.
25
+
26
+ Specifically, our target task is the generation of *charts*, which instruct the player to tap or flick buttons at specified moments, the defining challenge of rhythm action games. These buttons are known as *notes* as they fly through the screen forming a spatial pattern resembling a musical score. The audio record playing in the background is commonly referred to as a *song*.
27
+
28
+ A chart generation model consists of two submodels: the *onset*, which generates the timing of a note, and the *sym*, which decides the user action type (like a tap or flick). As deciding the timing is the bottleneck of KLab's workflow (detailed in [2.3](#sec:workflow){reference-type="ref+label" reference="sec:workflow"}), this article focuses on presenting our onset model.
29
+
30
+ The present work has the following contributions:
31
+
32
+ - We propose a deep generative model for chart creation, which achieved its business-quality performance by improving the state-of-the-art model DDC [@Donahue:2017] by incorporating two novel techniques: the beat guide and multi-scale conv-stack.
33
+
34
+ - Each of our improvements enhances the performance for all difficulty modes in multiple game titles. The improvements were effective particularly for easier difficulty modes, overcoming a commonly known weak point of the DDC.
35
+
36
+ - Incorporated into the workflow of KLab's rhythm action titles, our model halved the chart delivery time. The workflow is usable to rhythm actions in general -- the results verified the versatility also for open datasets from third parties, *Stepmania*.
37
+
38
+ - Our PyTorch-based source code and the trained models, found after extensive hyperparameter tuning (over 80,000 GPU hours of Tesla P100 on a supercomputer), are publicly available.[^2]
39
+
40
+ # Method
41
+
42
+ Most songs used in rhythm action games have a typical musical structure that is composed of temporally hierarchical performance patterns. The percussion keeps a steady beat, creating a rhythmic pattern in a bar. A series of such bars, together with phrases of melody instruments or vocals, forms musical sections such as an intro, verse, bridge, chorus, and outro. For example, the song *Sweet Eyes*[^3] in "Love Live! All Stars" has 60 bars that organize the following musical structure: an 8-bar intro, 16-bar verse, 10-bar bridge, 18-bar chorus, and 8-bar outro, each of which consists of repetitions of 1-bar to 4-bar phrases.
43
+
44
+ Artists at KLab confirm that this sort of musical structure is common in almost all songs in their rhythm action games and also a key feature of their chart creation. More specifically, the artists tend to put identical note patterns on the above-mentioned repetitions of a phrase. See [16](#sec:chart_full){reference-type="ref+label" reference="sec:chart_full"} for more details on the musical structure of Sweet Eyes and how the artists put notes on such a structure.
45
+
46
+ To learn the temporal patterns with our generative model, we would thus be required to consider multiple time scales. This was, in fact, absent in the network design of the DDC. To see how we designed our model to capture these features, see the explanations of the beat guide ([4.3](#sec:beat-information-consideration){reference-type="ref+label" reference="sec:beat-information-consideration"}) and multi-scale conv-stack ([4.4](#sec:conv-stack){reference-type="ref+label" reference="sec:conv-stack"}).
47
+
48
+ A song will be assigned charts of various difficulty modes ranging from *Beginner*, *Intermediate*, *Advanced*, and *Expert* to *Challenge*, in increasing order. In our preliminary experiments, the Dance Dance Convolution (DDC) [@Donahue:2017] generated charts for higher difficulty game modes at a human-competitive quality. (Find more related work in [7](#sec:related-work){reference-type="ref+label" reference="sec:related-work"}.) However, the generation of low-difficulty charts had room for improvement (as @Donahue:2017 themselves pointed out). As our primary target was easier modes, this was a significant challenge.
49
+
50
+ Leading rhythm action titles today tend to take the form of one piece of a large entertainment franchise, like KLab's "Love Live! All Stars" does of "Love Live!." The company's role is to operate the mobile app, while songs are delivered by other participants of the franchise. After the first release of the app, KLab continued to contribute by offering new playable songs. This is why a significant cost for the company's business is posed by chart generation.
51
+
52
+ The company's workflow ([\[fig:workflow\]](#fig:workflow){reference-type="ref+label" reference="fig:workflow"}) does *not* demand a *fully*-automated chart generation, since KLab's artists need to experiment with different variations of candidate products, employing their professional skills -- this is a high-level decision critical for the success of the franchise. We thus focus on semi-automation, which was to generate the *first drafts* of the charts (see [2](#fig:chart_example){reference-type="ref+label" reference="fig:chart_example"}) so the artists can be freed from this low-skill labor.
53
+
54
+ To create a chart, artists repeatedly listen to the whole of a song to understand its musical structure as set by business partners. During this process, they ponder how to place hundreds of notes to be tapped by the player, to eventually craft the chart through trial and error. This first draft does not require too much expert skill, although it had been causing as much as half the cost in the workflow before our model was in operation.
55
+
56
+ The charts are then modified so that the actions are connected with emotions, like imitating the dance motion rendered in the background or flicking to a specific direction relating to the lyrics. It may be revised further to enhance the gameplay experience with more focused consideration of the overall game design.
57
+
58
+ In essence, the first draft of the chart generation is crafted only from the input audio, while the enhancements are applied from information harder to compile into numerical data. We thus target the auto-generation of the first drafts.
59
+
60
+ We acquired songs and charts used in "Love Live! School Idol Festival All Stars" (in short "Love Live! All Stars") and "Utano Princesama Shining Live" ("Utapri") operated by KLab. Both the songs and charts are provided by multiple artists. In addition, we use openly accessible songs and charts from "Fraxtile" and "In the groove" in the open source game "Stepmania," which were used also in the prior work [@Donahue:2017]. The number of songs were 163, 140, 90, and 133 for "Love Live! All Stars," "Utapri," "Fraxtile," and "In the groove," respectively. There were typically 4 game difficulties for "Love Live! All Stars" and 5 for the rest, each difficulty contributing to one chart. See [9](#sec:characteristics){reference-type="ref+label" reference="sec:characteristics"} for details on the datasets.
61
+
62
+ We augmented the audio of each song in the datasets. The audio was first converted to a Mel spectrogram, which is a 2D array of time-frequency bins. The spectrogram was then augmented via a series of transformations adopted from [@Park_2019], resulting in an augmented Mel spectrogram, which is an input to generative models.
63
+
64
+ We applied the following transformations in the presented order (see [15](#sec:dataAugmentationDetails){reference-type="ref+label" reference="sec:dataAugmentationDetails"} for details): the *frequency shift* shifts all frequency bins by a random amount; *frequency mask* fills some frequency bins with the mean value; *time mask* fills some time bins with mean value; *high frequency mask* also fills some frequency bins but such frequencies must be above a predetermined threshold; *frequency flip* reverses the order of frequency bins; *white noise* adds a Gaussian noise to all time and frequency bins; *time stretch* stretches all time bins.
65
+
66
+ The onset labels, which specify the existence of notes in the chart, were augmented by fluctuating the labels [@Liang:2019]. We also augment the beat information as explained in [4.3.2](#sec:dataAugmentationBeat){reference-type="ref+label" reference="sec:dataAugmentationBeat"}.
67
+
68
+ Following @Donahue:2017, our model uses the Short-Time Fourier Transform (STFT) and Mel spectrogram of the audio. The STFT allows the model to capture features in the frequency domain. The window length and stride of STFT were both set to be 32 ms. The audio is sliced into chunks of 20 seconds.
69
+
70
+ The Mel spectrogram can capture perceptually relevant information in the audio data, and is a standard treatment in speech processing. It is also used in the DDC [@Donahue:2017]. Following Hawthorne et al., @hawthorne:2017, we set the number of the Mel bands to 229. We set the lowest frequency to 0 kHz and the highest to 16 kHz (0 and 3575 in the Mel scale). Accordingly, 229 evenly distributed triangular filters in the Mel scale are applied. We denote a Mel spectrogram by $S(t,f)\in\mathbb R$, where $t=1,\dots,T$ denotes the $t$th time bin, and $f=1,\dots,F$ denotes the $f$th frequency bin. We used $T = 20{,}000 / 32 = 625$ and $F=229$ as mentioned above.
71
+
72
+ As shown in [3](#fig:base-model){reference-type="ref+label" reference="fig:base-model"}, our base model follows the DDC [@Donahue:2017]. The Mel spectrogram $S(t,f)\in\mathbb R$ is processed through the CNN layers to extract audio features $A(t,f)\in\mathbb R$. The audio features are concatenated with the game difficulty flag $D(t)=$ const. of 10 (Beginner), 20 (Intermediate), ..., 50 (Challenge) and the beat guide $G(t)\in\{0,1,2\}$. These two are fed to the BiLSTM layers [@Graves:2005] to generate the chart $C(t)\in [0,1]$.[^4] Our improvements are explained in sections [4.3](#sec:beat-information-consideration){reference-type="ref" reference="sec:beat-information-consideration"} and [4.4](#sec:conv-stack){reference-type="ref" reference="sec:conv-stack"}. Find more details of the model architecture and the corresponding parameters in [11](#sec:architecture){reference-type="ref+label" reference="sec:architecture"}.
73
+
74
+ <figure id="fig:base-model" data-latex-placement="ht">
75
+ <img src="figure_4.png" />
76
+ <figcaption>Overall architecture of our network.</figcaption>
77
+ </figure>
78
+
79
+ The main task for the convolution stack (or *conv-stack*) is to extract features from the Mel spectrogram using the CNN layers. The conv-stack comprises a standard CNN layer with batch normalization, a max-pooling layer, and a dropout layer. The activation function is ReLU. Finally, to regularize the output, we use an average-pooling layer.
80
+
81
+ Although it had been rare to consider the positions of beats in the model, the beat is indeed crucial to the generation of the charts, as it is used by artists to evoke emotions. The beat guide is a trinary array whose length is the same as the number of time frames of the input audio. The first beat of each bar is indicated by 2, the other beats by 1, and non-beat frames by 0 ([4](#fig:beat-guide){reference-type="ref+label" reference="fig:beat-guide"}). Each element indicates the existence of a beat at that frame. It is calculated from the BPM and time signature in the song metadata. The beat guide is fed as an input to the BiLSTM layers.
82
+
83
+ <figure id="fig:beat-guide" data-latex-placement="ht">
84
+ <img src="figure_5.png" />
85
+ <figcaption>Beat guide in 4/4 time signature.</figcaption>
86
+ </figure>
87
+
88
+ [5](#fig:notes_stats){reference-type="ref+Label" reference="fig:notes_stats"} shows how frequently each note timing appears in KLab's charts. The 4th note accounts for 70--90% of a chart, and the 8th takes up 10--20%; the 12th and 16th are marginal. This fact supports the effectiveness of the beat guide, as it provides hints for placing 4th notes. It also hints that the multi-scale conv-stack's temporal max-pooling layers would be able to extract temporal dependencies of the 4th and 8th note scales.
89
+
90
+ <figure id="fig:notes_stats" data-latex-placement="hbt">
91
+ <embed src="figure_6.pdf" style="width:75.0%" />
92
+ <figcaption>Note timings in “Love Live! All Stars.”</figcaption>
93
+ </figure>
94
+
95
+ Since the proposed model requires a beat guide as an extra input accompanied with a Mel spectrogram, it is also augmented. *Beat mask* drops beats in the section with given probability. The augmented guide is $$\begin{equation}
96
+ G(t) = \delta_t G_0(t),
97
+ \end{equation}$$ where $G_0(t)\in\{0,1,2\}$ is the original beat guide at time step $t=1,\dots,T$, and $\delta_t \sim \mathcal B(1,p)$ is a random number drawn for each $t$ from the binomial distribution with $p$ being the probability of dropping a beat. The value of $p$ was optimized to 0.123 by random search in the range $[0.1, 0.3]$. Finally, our model uses ($S$, $G$) as an input, where $S$ is an augmented Mel spectrogram defined in [\[eq:augmentation\]](#eq:augmentation){reference-type="ref+label" reference="eq:augmentation"} in appendix.
98
+
99
+ One key difference between the DDC and the present model is the structure of the conv-stack. In the model used in DDC, the convolution layers are applied repeatedly to the input of Mel spectrogram, whereas the max-pooling reduces the matrix size only along the frequency axis and not time ([\[fig:ddcconv\]](#fig:ddcconv){reference-type="ref+label" reference="fig:ddcconv"}).
100
+
101
+ <figure id="fig:conv" data-latex-placement="ht">
102
+ <div class="minipage">
103
+ <p><img src="figure_7a.png" alt="image" /> <span id="fig:ddcconv" data-label="fig:ddcconv"></span></p>
104
+ </div>
105
+ <div class="minipage">
106
+ <p><embed src="figure_7b.pdf" /> <span id="fig:klabconv" data-label="fig:klabconv"></span></p>
107
+ </div>
108
+ <figcaption>Conv-stack architectures, previous vs. present.</figcaption>
109
+ </figure>
110
+
111
+ The present model uses four conv-stacks with different temporal resolutions. The stack with the highest resolution (stack 1) does not perform max-pooling along the temporal dimension. The process is the same as the conv-stack of the DDC. In stacks 2, 3, and 4, max-pooling is performed along the time dimension, and the length is reduced to $1/16$, $1/64$, and $1/128$, respectively. Finally, up-sampling is applied to stacks 2, 3, and 4, and the four matrices, which have the same length in the temporal dimension, are concatenated ([\[fig:klabconv\]](#fig:klabconv){reference-type="ref+label" reference="fig:klabconv"}). By doing so, we expect our model to extract not only short-term features (e.g., the attack of the percussion) but also long-term features (e.g., rhythm patterns and melodic phrases).
112
+
113
+ Note that unlike generic 2D multi-scale convolutions such as GoogLeNet [@Szegedy2015], our temporal max-pooling does multi-scale pattern extraction explicitly and only along the time axis. Existing networks can be distracted by multi-scale patterns in frequency arisen by instruments such as piano or trumpet. More important, as agreed with our artists, is multi-scale temporal patterns.
114
+
115
+ The results of taking a combination of different conv-stacks are shown in [7](#fig:convstack-combination){reference-type="ref+label" reference="fig:convstack-combination"}.
116
+
117
+ <figure id="fig:convstack-combination" data-latex-placement="ht">
118
+ <embed src="figure_8.pdf" />
119
+ <figcaption> Experimenting with multi-scale temporal analysis. The results are sorted for the median of the F<span class="math inline"><sub>1</sub></span>-score<span class="math inline">$^{\textrm m}$</span> in descending order along the horizontal axis. The error bar shows <span class="math inline">1<em>σ</em></span> of results. The baseline is a single max-pooling of kernel size 32 ms (placed in the middle of the horizontal axis) that was employed in the DDC model. There is a statistical significance in our multi-scale model (32, 512, 2048, 4096) improving the baseline (32) (Wilcoxon rank sum test, <span class="math inline"><em>W</em> = 1703.5</span>, <span class="math inline"><em>p</em> &lt; 0.01</span>). </figcaption>
120
+ </figure>
121
+
122
+ [7](#fig:convstack-combination){reference-type="ref+Label" reference="fig:convstack-combination"} shows the effectiveness of multi-scale conv-stacks with different kernel sizes for max-pooling. The size 32 ms is the baseline chosen also by the DDC (without multi-scaling).
123
+
124
+ For candidates of max-pooling kernel size, we choose lengths with regular intervals in logarithmic scale to be musically meaningful length: 256 ms, 512 ms, 1024 ms, 2048 ms, and 4096 ms, each of which corresponds to the 8th note, 4th note, 2nd note, one bar, and two bars at 120 beats per minute (BPM) in 4/4 time signature.[^5]
125
+
126
+ We can see that the 8th note (256 ms) typically worsens the learning compared to 32 ms. In this experiment combining 4 scales at maximum, the best one combines 32 ms, 512 ms (4th note), 2048 ms (one bar), and 4096 ms (two bars).
2203.10761/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2022-03-01T06:45:53.989Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.9.6 Chrome/89.0.4389.128 Electron/12.0.16 Safari/537.36" version="14.9.6" etag="b38tHxS2LQUmT4PDolVa" type="device"><diagram id="ivhkUhJmC8YY4zEnQRo6">7V3fk5s4Ev5rXLX3EEq/BY/JTObu4W4rtUnd7j4Sm/FwwcaHmczM/vUrGQmQhD3MGLDHo1QqMY0k4Gv1p6ZbEjN8tXr8ZxFv7v6TL5JshsDicYavZwhBSqH4T0qelARqybJIF0rWCL6mfyVKCJT0Pl0kW6NgmedZmW5M4Txfr5N5acjiosgfzGK3eWZedRMv1RVBI/g6j7PEKfZ7uijvKmmIeCP/V5Iu7/SVIYuqM6tYF1ZNbO/iRf7Quhb+PMNXRZ6X1a/V41WSSfQ0LtUN3ew5W99YkazLPhVQVeFnnN2rZ1P3VT7phy3y+/UikeXhDH96uEvL5OsmnsuzD0K/QnZXrjJ1evsjKefyEYE4uM3X5U28SjOp52/pSqgMgV+TB/Hvb/kqXqsiSr8Qyfplkf+oQZUtLot4kYqnuU4Loco0F7Wuk3hbysppll3lWV4I0Tpfi0Y+qcdJijJ53AsJrIEWXTTJV0lZPIkiugJirKqjuieNogDSSvTQqFucqGR3LVXXwlh1sWXdfqMF8UMpolsp2CvFUQqAJEAkav5YKuIBm1JF5HkVlUUar5fy6Dn9tPFm4jjO0qVENEtuJaJFXsYK4w8RsCCeIQwAY1dXQ+FMzM7PEQwiB1jYhSs4HlbaASvLJAjbjeiabXzZ/+8lSX6aV0B8FCeL5ff4F3EX4q+4Euj89Q/5U0IFJOofbpUlyOqrfJ1vd0oyimx3mpEFwOaxPvU9nv9Y7qzwg3kHvyAS1pcTPbb5TeXF9W2LX8vqf2qPlFRgI6W7UaA+0ljRHVpCci1/y8apRIkKiJ8rC+uyuke8qhnUNFMpqj7TnKj0VJ+obUFI2tYgC+zsQcp3FiElcHdYW8VOxHayyjKkYGcbUlRbh5RK+5DC1h1W1tAHg/pU/fiNfVBpIU1JyBrAwFNzAhMWRK1LVCbT1KPNKWU6DaqtFoUJ1fJl6x5sBe0Oay21hWbfUeWcTlaZljDKyrp0j7wMIuMWkRESYAjqP3AyUmOe1DypeVLzpDY8qbGIBgBNxmTcM5lnMs9k74XJBmEsarlhHJ/IDQs9eXny8uT1XshrrDhkFAbUZDRIAzIZi0XPBx7n98XPOjScrBcfZcajQeEApmagF1XVb1J5M9fNgJAsdHakP3wtZChwkdGyIsmEFn+azXfBpa7wJU+lldXagYQYuglBFLRixlFotrjN74t5ohpBrSyJ3S4C2NI6wywgwBnHdMtlXCyT0mlZqCJ+ahXbyALbQw/E7AtzeeHDdwuJUwk8Xwk7jxi9phJ7vpL9EkO4WUX8qIBqLKLuSr2MROcJD1mJqJFutj2IJ95uqhzibfoozcpJj1gvfDc3oegPA9ENDB2AQxaEDt2gDqMagm4gPBWSA3K2YAWnl3LhhTrZpCkARZcAKHdZiXQMgqOB2JUh9b689+W9L3/xvvzgIyyK7MGBwIBMF5eAPeYSvA1nxfWSOZ10WOiaPvAmkewYYLl4y8TTQdmVtDyVvY86wYWFwhVE01l7Vw7lMoBl0ASWgoBOCGzo2vpimXxVh3lR3uXLfB1nnxvpp2YanUSnKfPvPN8oiP+XlOWTwji+L3NTAQKc4ukPWV/Yqzr8UzW3O7h+NI6eZs0svLbi3DCNGXlxIzOLeHtXR3mqR3cCM1V8Q4nUO04VmahkhPbWcu/ITG99dUWyvA/vfXjvw3sf/qXuEnB8eBh2zdkea/DRbXgy82TmycyT2XHRam5RGeta2zAalfWI+I+XYazOG9CCNrTnnHvE9tw88WIZOTmGF+ceQYjMdlGdzRg62wgi61JcX2qozJzuTJf3+osiYGDHEQ109mwKs/UpEe+BeA/kvXogg1IZRhaVET5lJK9zcaWnMk9lnso8lR1JZSwKA4QOTfwZjdX82mbPap7V3g2rDcJewHLEOJvUEfMrlz1lecp6N5Q10mxhTJCzZCYM2ISxsR5zbt7xohkchoZ2hlo0Q2TYwPS+cRQwsDckPlQYmwD7wlxeGB6+XRw6tUCPWtx5yOhVtdjztYj9MkOBVef4+HzXKlnLVN7GFEqCXMWAALi9b4LZlH1W7Z39og9BEzagHAR0ujmpuGuSxVtDkUCXm3gXKU4BaFeq1/v33r/3/v3F+/dDD7eYOaODUM6ECXA9h/8CPBfHb+b0ZENEj71o3wiq7sDLxVsoPgmqF7t9rD0LRvrcHS7iaCTQYwXY2wQWA4tdKQ74hMD6ULB3Fb2r6F3FIaiMOnuaIBhohpmCzPw+lp7MPJl5MhuCzNyoKJp0+wjcJ2Lv5LOM1cMtaKX8S1yWSbHeSRDAnSkud/1yvQ4a1uui/1THZHZ4MfTu6EtSpOLhk0ILH9NSLawO1WHVHgdQHTfNyQPd2raMi9JeTC0K2BeoUDq4Zpoo/bTXTOtx6nzyd8jeYhXo/dRenrPDwFrEQp229mTpXpFswn3SItN13Q8gADBsd14pYUf2Xk6x0X0ZQpN1X9zRfc8v/ewm6njzbvvyHsxwgMJmY0ZkNk14gKBzdvi+ranD+5jex/Q+5rvzMevYn/5SWpf/2fUVNcFzknTNb7aZ29TU3wwczIfF9jQdNOXmCdpZPhM3YAwPtuUBHHZee4zqWlnGRj7g7EZ1J93C2DGjOj04quOpRnX0fFf1i+M7J7yFQWu3bDuZMdBSeYKsfgFHWyqPmXUpOvRSeeJXdHsP0nuQ3oM84EEO4v3pj2+3xmm9BHsK76/P9rcTen+m64fqXR33eX7jOHlaAYaTh89uVMfOyg4QBuj1oRsScGtYFZ0x5C9dMfCa0faUewcPOlMEAmv/qAgH0P1y9gTTnEiPTYTfBqZuypoDcCpUe6xmks7YZu+Dz/P1WmAZf9fF+789Np3MzhPQoI3GhHCcxYqVjkH6ZR2w0upBJh8NwR5pmOmTs4cQ7QqcPSTb8liwVY3a1tvuD3bB15ukH/XFe58n8G95/i3v3b/lDeL5wShAlpsitEPxKT63qt8qPbF5YvPE5ontuFda4hBbCAMyXZRK77rj2cyzmWczz2bHLZW15mtOu70q9XlFT2WeyjyVDUFlOnbY9soOLqkejdWIG/Wd4PNvOvs3a0/x1pnA7uyfkbp83XfguqYAmTnOnh+F08NOO82oZaf4KBy9mC94Evn1Q9M24Kk2caCnzMMNjCq0ZxXSSLwHngTVHnm4t4EqAszpq7hrBvBoUJ5FDm8YKO0ZEbvPoE63SRp91ZqqF01K3TOunM+0FACBHS6inAcAHr+nJcAvbXq4KSrMTRV+K+SbAZhn8Xab3qZJ4eha9OXSVLDpQqgO0LHbneVXSrtI53H2UYlX6WKx1101Hat2lwrV8Y16J7z+lq4S4YmDX5MH8e9v+Uo4yaqKeopdE8L4/zsbbskDkDPGLT2yMIAdDiuoF/gauV8xmKPjzVXvgjuiub7hWeOQ2mxKOQ3o/pn+ve0Y8he2PKAZI0flN3G29Xb8qswANye7UcYDHB7Md6Iw4CMatI+u+eiaj6691+jasDPzAa2pqua36dZlMndmvh6oxIvY/SIXP7L4u1SFH6x66ZM6aWzKWKfTSRr3dIxB6mImu2MxmjuQEuPzBaNFhGaKo1qOYMNO+PPf</diagram></mxfile>
2203.10761/main_diagram/main_diagram.pdf ADDED
Binary file (25.2 kB). View file
 
2203.10761/paper_text/intro_method.md ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Deep Learning has become the bedrock of modern AI for many tasks in machine learning [3] such as computer vision [19; 18], natural language processing [12]. Using a large number of learnable parameters, deep neural networks (DNNs) can recognize subtle dependencies in large training datasets to be later leveraged to perform accurate predictions on unseen data. However, models might overfit the training set without constraints or enough data [53]. To this
4
+
5
+ ![](_page_0_Figure_7.jpeg)
6
+
7
+ Figure 1: visualization of hard mixed sample mining by class activation mapping (CAM) [49] of ResNet-50 on ImageNet. From left to right, CAM of top-2 predicted classes using mixup crossentropy (MCE) and decoupled mixup (DM) loss.
8
+
9
+ <sup>∗</sup>Equal contribution. † Stan Z. Li (Stan.ZQ.Li@westlake.edu.cn) is the corresponding author.
10
+
11
+ ![](_page_1_Figure_0.jpeg)
12
+
13
+ Figure 2: Illustration of the two types of hard mixed samples in CutMix with 'Squirrel' and 'Panda' as an example. Hard mixed samples indicate that the mixed sample contains salient features of a class, but the value of the corresponding label is small. MCE loss fails to leverage these samples.
14
+
15
+ end, regularization techniques have been deployed to improve generalization [61], which can be categorized into data-independent or data-dependent ones [16]. Some data-independent strategies, for example, constrain the model by punishing the parameters' norms, such as weight decay [40]. Among data-dependent strategies, data augmentations [51] are widely used. The augmentation policies often rely on particular domain knowledge [58] in different fields.
16
+
17
+ Mixup [77], a data-dependent augmentation technique, is proposed to generate virtual samples by a linear combination of data pairs and the corresponding labels with the mixing ratio λ ∈ [0, 1]. Recently, a line of optimizable mixup methods are proposed to improve mixing policies to generate object-aware virtual samples by optimizing discriminative regions in the data space to match the corresponding labels [56; 23; 22] (referred to as *dynamic* methods). However, although the *dynamic* approach brings some performance gain, the extra computational overhead degrades the efficiency of mixup augmentation significantly. Specifically, the most computation of *dynamic* methods is spent on optimizing label-mismatched samples, but the question of why these label-mismatched samples should be avoided during the mixup training has rarely been analyzed. In this paper, we find these mismatched samples are completely underutilized by *static* mixup methods, and the problem lies in the loss function, mixed cross-entropy loss (MCE). Therefore, we argue that these mismatched samples are not only not *static* mixup disadvantages but also hard mixed samples full of discriminative information. Taking CutMix [74] as an example, two types of hard mixed samples are shown on the *right* of Figure 2. Since MCE loss forces the model's predictions to be consistent with the soft label distribution, *i.e.,* the model cannot give high-confidence predictions for the relevant classes even if the feature is salient in hard mixed samples, we can say that these hard samples are not fully leveraged.
18
+
19
+ From this perspective, we expect the model to be able to mine these hard samples, *i.e.,* to give confident predictions according to salient features for localizing discriminative characteristics, even if the proportion of features is small. Motivated by this finding, we introduce simple yet effective Decoupled Mixup (DM) loss, a mixup objective function for explicitly leveraging the hard samples during the mixup training. Based on the standard mixed cross-entropy (MCE) loss, an extra decoupled regularizer term is introduced to enhance the ability to mine underlying discriminative statistics in the mixed sample by independently computing the predicted probabilities of each mixed class. Figure 1 shows the proposed DM loss can empower the *static* mixup methods to explore more discriminative features. Extensive experiments demonstrate that DM achieves data-efficiency training on supervised and semi-supervised learning benchmarks. Our contributions are summarized below:
20
+
21
+ - Unlike those dynamic mixup policies that design complicated mixing policies, we propose DM, a mixup objective function of mining discriminative features adaptively.
22
+ - Our work contributes more broadly to understanding mixup training: it is essential to focus not only on the smoothness by regression of the mixed labels but also on discrimination by encouraging the model to give reliable and confident predictions.
23
+ - Not only in supervised learning but the proposed DM can also be easily generalized to semi-supervised learning with a minor modification. By leveraging the unlabeled data, it can reduce the conformation bias and significantly improve performance.
24
+ - Comprehensive experiments on various tasks verify the effectiveness of DM, *e.g.*, DMbased *static* mixup policies achieve a comparable or even better performance than *dynamic* methods without the extra computation.
25
+
26
+ # Method
27
+
28
+ Mixed Cross-Entropy Underutilizes Mixup Let us define $y \in \mathbb{R}^C$ as the ground-truth label with C categories. For labeled data point $x \in \mathbb{R}^{\mathcal{W} \times \mathcal{H} \times \mathcal{C}}$ whose embedded representation z is obtained from the model M and the predicted probability p can be calculated through a Softmax function $p = \sigma(z)$ . Given the mixing ratio $\lambda \in [0,1]$ and $\lambda$ -related mixup mask $H \in \mathbb{R}^{\mathcal{W} \times \mathcal{H}}$ , the mixed sample $(x_{(a,b)},y_{(a,b)})$ can be generated as $x_{(a,b)} = H \odot x_a + (1-H) \odot x_b$ , and $y_{(a,b)} = \lambda y_a + (1-\lambda)y_b$ , where $\odot$ denotes element-wise product, $(x_a,y_a)$ and $(x_b,y_b)$ are sampled from a labeled dataset $L = \{(x_a,y_a)\}_{a=1}^{n_L}$ . Note that superscripts denote the index; subscripts indicate the type of data, $e.g., x_{(a,b)}$ represents a mixed sample generated from $x_a$ and $x_b$ ; $y^i$ indicates the label value on i-th position. Since the mixup labels are obtained by somehow $\lambda$ -based interpolation, the standard CE loss weighted by $\lambda$ , $\mathcal{L}_{CE} = y_{(a,b)}^T \log \sigma(z_{(a,b)})$ , is typically used as the objective in the mixup training:
29
+
30
+ $$\mathcal{L}_{MCE} = -\sum_{i=1}^{C} \left( \lambda \mathbb{I}(y_a^i = 1) \log p_{(a,b)}^i + (1 - \lambda) \mathbb{I}(y_b^i = 1) \log p_{(a,b)}^i \right). \tag{1}$$
31
+
32
+ where $\mathbb{I}(\cdot) \in \{0,1\}$ is an indicator function that values one if and only if the input condition holds. Noticeably, these two items of Equation 1 are classifying $y_a$ and $y_b$ while keeping the linear
33
+
34
+ consistency with mixing coefficient $\lambda$ . As a result, DNNs with this mixup consistency prefer relatively less confident results in high-entropy behaviour [46] and longer training time in practice. The main reason is that in addition to $\lambda$ constraint, the competing relationships defined by Softmax in $\mathcal{L}_{MCE}$ are the main cause of the confidence drop, which is more obvious when dealing with hard mixed samples. Precisely, the competition between the mixed class a and b in Equation 1 can severely affect the prediction of a single class; that is, interference from other classes prevents the model from focusing its attention. This typically causes the model to be insensitive to the salient features of the target and thus undermines model performance, as shown in Figure 1. Although the dynamic mixup alleviates this problem, the extra time overhead is unavoidable if only focusing on mixing policies on the data level. Therefore, the key challenge is to design an ideal objective function for mixup training that maintains the smoothness of the mixup and can simultaneously explore the discriminative features without any computation costs.
35
+
36
+ To achieve the above goal, we first dive into the $\mathcal{L}_{MCE}$ and propose the efficient decoupled mixup.
37
+
38
+ **Proposition 1.** Assuming $x_{(a,b)}$ is generated from two different classes, minimizing $\mathcal{L}_{MCE}$ is equivalent to regress corresponding $\lambda$ in the gradient:
39
+
40
+ $$(\nabla_{z_{(a,b)}} \mathcal{L}_{MCE})^{i} = \begin{cases} -\lambda + \frac{\exp(z_{(a,b)}^{i})}{\sum_{c} \exp(z_{(a,b)}^{c})}, & i = a \\ -(1-\lambda) + \frac{\exp(z_{(a,b)}^{i})}{\sum_{c} \exp(z_{(a,b)}^{c})}, & i = b \end{cases}$$
41
+
42
+ $$\frac{\exp(z_{(a,b)}^{i})}{\sum_{c} \exp(z_{(a,b)}^{c})}, & i \neq a, b$$
43
+ (2)
44
+
45
+ **Softmax Degrades Confidence.** As we can see from Proposition 1, the predicted probability of $x_{(a,b)}$ will be consistent with $\lambda$ , and the probability is computed from the Softmax directly. The Softmax forces the sum of predictions to one (winner takes all), which is undesirable in mixup classification, especially when there are multiple and non-salient targets in mixed samples, e.g., hard mixed samples, as shown in Figure 2. The standard Softmax in $\mathcal{L}_{MCE}$ deliberately suppresses confidence and produces high-entropy predictions by coupling all classes. As a consequence, $\mathcal{L}_{MCE}$ makes many static mixup methods require longer epochs than vanilla training to achieve the desired results [57; 73]. Based on previous analysis, a novel mixup objective, decoupled mixup (DM), is raised to remove the Coupler and thus utilize the hard mixed samples adaptively, finally improving the performance of mixup methods. Specifically, for mixed data points $z_{(a,b)}$ generated from a random pair in labelled dataset L, an encoded mixed representation $z_{(a,b)} = f_{\theta}(x_{(a,b)})$ is generated by a feature extractor $f_{\theta}$ . A mixed categorical probability of i-th class is attained:
46
+
47
+ $$\sigma(z_{(a,b)})^{i} = \frac{\exp(z_{(a,b)}^{i})}{\sum_{c} \exp(z_{(a,b)}^{c})}.$$
48
+ (3)
49
+
50
+ **Decoupled Softmax.** where $\sigma(\cdot)$ is standard Softmax. Equation 3 shows how the mixed probabilities are computed for a mixed sample. The competition between a and b is the main reason that results in low confidence of the model, *i.e.*, the sum of semantic information of hard mixed samples are larger than "1" defined by Softmax. Therefore, we propose to simply remove the competitor class in Equation 3 to achieve decoupled Softmax. The score on i-th class is not affected by the j-th class:
51
+
52
+ $$\phi(z_{(a,b)})^{i,j} = \frac{\exp(z_{(a,b)}^i)}{\exp(z_{(a,b)}^j) + \sum_{c \neq j} \exp(z_{(a,b)}^c)}.$$
53
+ (4)
54
+
55
+ where $\phi(\cdot)$ is the proposed decoupled Softmax. In Equation 4, by removing the competitor, compared with Equation 1, the decoupled Softmax makes all items associated with $\lambda$ become -1 in gradient, the derivation is given in the A.1. Our Proposition 2 verifies that the expected results are achieved with decoupled Softmax.
56
+
57
+ **Proposition 2.** With the decoupled Softmax defined above, decoupled mixup cross-entropy $\mathcal{L}_{DM}$ can boost the prediction confidence of the interested classes mutually and escape from the $\lambda$ -constraint:
58
+
59
+ $$\mathcal{L}_{DM} = -\sum_{i=1}^{c} \sum_{j=1}^{c} y_a^i y_b^j \left( \log \left( \frac{p_{(a,b)}^i}{1 - p_{(a,b)}^j} \right) + \log \left( \frac{p_{(a,b)}^j}{1 - p_{(a,b)}^i} \right) \right). \tag{5}$$
60
+
61
+ ![](_page_4_Figure_0.jpeg)
62
+
63
+ Figure 3: Results illustration of applying decouple mixup. Left: taking MixUp as an example, our proposed decoupled mixup cross-entropy, DM(CE), significantly improves training efficiency by exploring hard mixed sample; Middle: Acc vs. cost on ImageNet-1k; Right: Top-2 acc is calculated when the top-2 predictions equal to $\{y_a, y_b\}$ .
64
+
65
+ **The Decoupled Mixup.** The proofs of Proposition 1 and 2 are given in the Appendix. In practice, the original smoothness of $\mathcal{L}_{MCE}$ should not be lost, and thus the proposed DM is a regularizer for discriminability. The final form of decoupled mixup can be formulated as follows:
66
+
67
+ $$\mathcal{L}_{DM(CE)} = -\left(\underbrace{y_{(a,b)}^T \log(\sigma(z_{(a,b)}))}_{\mathcal{L}_{MCE}} + \eta \underbrace{y_{[a,b]}^T \log(\phi(z_{(a,b)})) y_{[a,b]}}_{\mathcal{L}_{DM}}\right).$$
68
+
69
+ where $y_{(a,b)}$ indicates the mixed label while $y_{[a,b]}$ is two-hot label encoding, $\eta$ is a trade-off factor. Notice that $\eta$ is robust and can be set according to the character of mixup methods (see Sec. 5.4).
70
+
71
+ Practical consequences of such simple modification on mixup and the performance:
72
+
73
+ Make What Should be Certain More Certain. As we expected, mixup training with a decoupling mechanism will be more accurate and confident in handling hard mixed samples with our artificially constructed hard mixed samples by using PuzzleMix. Figure 3 right demonstrates the model trained with decoupled mixup mostly doubled the top-2 accuracy on these mixed samples, which also verifies the information contained in mixed samples is beyond the "1" defined by standard Softmax. More interestingly, this advantage of decoupled mixup, i.e., higher confidence and accuracy, can be further amplified in semi-supervised learning due to the uncertainty of pseudo-labeling.
74
+
75
+ **Enhance the Training Efficiency.** It is straightforward to notice that there is no extra computation cost when using DM in vanilla mixup training, and the performance we can achieve is the same or even better than optimizable mixup policies, *i.e.*, PuzzleMix, CoMixup, *etc*. Figure 3 *left* and *middle* show decoupled mixup unveils the power of static mixup for more accurate and faster.
76
+
77
+ With the high-accurate nature of decoupled mixup for mining hard mixed samples, semi-supervised learning is a suitable scenario to propagate the accurate label from labeled space to unlabeled space by using asymmetrical mixup. In addition, we can also generalize the decoupled mechanism into the binary cross-entropy for boosting the multi-classification task.
78
+
79
+ Based on labeled data $L=\{(x_a,y_a)\}_{a=1}^{n_L}$ , if we further consider unlabeled data $U=\{(u_a,v_a)\}_{a=1}^{n_U}$ decoupled mixup can be the strong connection between L and U. Recall the confirmation bias [67] problem of SSL: the performance of the student model is restricted by the teacher model when learning from inaccurate pseudo-labels. To fully use the L and strengthen the teacher model to provide more robust and accurate predictions, the unlabeled data with large $\lambda$ can be used to mix with the labeled data to form hard mixed samples. With these hard mixed samples, we can employ decoupled mixup into semi-supervised learning effectively. Since only the label of L is accurate, we need to make a little asymmetric modification to the decoupled mixup, called Asymmetrical Strategy(AS). Formally, given the labeled and unlabeled datasets L and U, AS builds reliable connection by generating hard mixed samples between L and U in an asymmetric manner ( $\lambda < 0.5$ ):
80
+
81
+ $$\hat{x}_{(a,b)} = \lambda x_a + (1 - \lambda)u_b; \quad \hat{y}_{(a,b)} = \lambda y_a + (1 - \lambda)v_b.$$
82
+
83
+ Due to the uncertainty of the pseudo-label, only the labeled part is retained in $\mathcal{L}_{DM}$ :
84
+
85
+ $$\hat{\mathcal{L}}_{DM} = y_a^T \log \left( \phi(z_{(a,b)}) \right) y_b,$$
86
+
87
+ where $y_a$ and $y_b$ are one-hot labels from L. AS could be regarded as a special case of DM that only decouples on labeled data. Simply replacing $\mathcal{L}_{DM}$ with $\hat{\mathcal{L}}_{DM}$ can leverage the hard samples and alleviate the confirmation bias in semi-supervised learning.
88
+
89
+ Binary Cross-entropy Form of DM. Different from Softmax-based classification, we can also build decoupled mixup in multi-label classification tasks (1-vs-all) by using mixup binary cross-entropy (MBCE) loss [63] ( $\sigma(\cdot)$ ) denotes Sigmoid rather Softmax in this case). Proposition 2 demonstrates the decoupled CE can mutually enhance the confidence of predictions for the interested classes and be free from $\lambda$ limitations. Similarly, for MBCE, since it is not inherently bound to mutual interference between classes by Softmax, we have to preserve partial consistency and encourage more confident predictions, and thus propose a decoupled mixup binary cross-entropy loss, DM(BCE).
90
+
91
+ ![](_page_5_Figure_5.jpeg)
92
+
93
+ Figure 4: Rescaled label of different $\lambda$ value.
94
+
95
+ To this end, a rescaling function $r:\lambda,t,\xi\to\lambda'$ is designed to achieve this goal. The mixed label is rescaled by $r(\cdot)$ : $y_{mix}=\lambda_a y_a+\lambda_b y_b$ , where $\lambda_a$ and $\lambda_b$ are rescaled. The rescaling function is defined as follows:
96
+
97
+ $$r(\lambda, t, \xi) = \left(\frac{\lambda}{\xi}\right)^t, \quad 0 \le t, 0 \le \xi < 1,$$
98
+ (6)
99
+
100
+ where $\xi$ is the threshold, t is an index to control the convexity. As shown in Figure 4, Equation 6 has three situations: (a) when $\xi=0$ , t=0, the rescaled label is always equal to 1, as two-hot encoding; (b) when $\xi=1$ , t=1, $r(\cdot)$ is a linear function (vanilla mixup); (c) the rest curves demonstrate t is the parameter that changes the concavity and $\xi$ is responsible for truncating.
101
+
102
+ **Empirical Results.** In the case of interpolation-based mixup methods (*e.g.*, Mixup, ManifoldMix, *etc.*) that keep linearity between the mixed label and sample, the decoupled mechanism can be introduced by only adjusting threshold t. In the case of cutting-based mixing policies (*e.g.*, CutMix, *etc.*) where the mixed samples and labels have a square relationship (generally a convex function), we can approximate the convexity by adjusting $\xi$ , which are detailed in Sec. 5.4 and Appendix C.5.
2203.12997/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2203.12997/paper_text/intro_method.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Dimensionality reduction techniques are now increasingly used in many fields of science and have to cope with an ever increasing size of real-world datasets. It plays an important role both for visualization and processing of high dimensional data. Much of current research is focused on finding unsupervised algorithms that are both scalable to massive data and are able to preserve the structure of data in less dimensions. Most of them attempt to retain the local or global structure of the data by optimizing over pairwise distances in the target space. Two main directions for the current dimension reduction techniques can be identified with respect to how such local or global neighborhood is preserved in terms of the distances. Methods such as PCA [\[16\]](#page-8-0), MDS [\[19\]](#page-8-1) and Sammom mapping [\[32\]](#page-8-2) try
4
+
5
+ <span id="page-0-0"></span>![](_page_0_Figure_7.jpeg)
6
+
7
+ Figure 1. Visualization of the entire ImageNet dataset. Speed and embedding quality of dimension reduction methods.
8
+
9
+ to preserve the global distances among all samples in the data. Whereas more recent popular methods such as t-SNE [\[36,](#page-9-0)[37\]](#page-9-1), LargeVis [\[34\]](#page-9-2), and UMAP [\[27](#page-8-3)[,31\]](#page-8-4) seek to additionally preserve the local structure e.g. by preserving the distance relations in the k-neighborhood of each data sample. To retain such relations, these methods generally have to solve an optimization problem with the goal of matching the distribution of distances in the target space with their distribution in the original space. For instance, t-SNE minimizes the Kullback-Leibler divergence between distributions of k-nearest neighbor (k-NN) distances fitted in the high and low-dimensional space. Similarly, the more recent method UMAP optimizes the embedding in the target space with the goal of preserving the 1-skeleton of fuzzy simplicial sets constructed in the original space. Such optimizations are computationally expensive in nature and account <span id="page-1-0"></span>for the main complexity of these algorithms, thus limiting their run-time performance on large scale datasets.
10
+
11
+ In this paper, we present a different approach which instead of relying on point-level optimization, captures multistage NN properties of the data and, using those, projects points in a simple algorithmic way. The main tools used to build this structure are Nearest Neighbor Graphs (NNGs) which have been well studied. In [\[13\]](#page-8-5) Epstein *et al*. show that for a 1-NNG, its NN relations are well preserved in a low dimensional space when the edges are placed on a monotone logical grid [\[9\]](#page-8-6). An effective strategy for embedding the NNG into an *l*-dimensional grid is to embed the individual components of the graph separately. The connected components of NNGs capture clusters of samples. Recursively building 1-NNGs on the previously obtained connected components provides a hierarchical view on how samples are merged together at successive levels. Considering each connected component as a node in the hierarchy one can identify complete paths on how these nodes and their associated samples successively merge together from bottom to top. Such a hierarchical node graph provides a view of data in terms of how the local neighborhood is distributed in the high dimensional space. After an inexpensive preliminary projection of the high dimensional data on a desired low dimensional space we can use the original hierarchical node graph in the target space to enforce the local structure directly. We achieve this with a fast recursive topdown approach by moving the clusters of samples towards these nodes starting at the top level with the least number of nodes and moving progressively downward to reach the bottom *i.e*. the finer level.
12
+
13
+ Since our proposal is rooted in obtaining the Hierarchical 1-Nearest Neighbor graph based Embedding, we term the method h-NNE. Figure [1](#page-0-0) depicts an example of embedding the ResNet-50 features of the full ImageNet dataset. As seen, in comparison to the current state-of-the-art methods, not only do we achieve competitive embedding quality - indicated by the trustworthiness metric - but also a significantly faster run-time. To summarize, our main contribution is an alternative dimensionality reduction and visualization method which does not rely on expensive optimization methods. This makes it operate at a magnitude faster than existing methods, without requiring hyperparameter tuning, and maintaining similar performance. In the following sections, before delving into the proposed method, we will discuss the related works to place it in context and followed by experiments and comparisons with the state-ofthe-art on a diverse collection of datasets.
14
+
15
+ # Method
16
+
17
+ Our projection algorithm consists of three main steps: building a tree hierarchy based on 1-NNGs, computing a preliminary projection with an approximate version of PCA and adjusting the projected point locations based on the constructed tree. The projected point location adjustment can be enhanced with an optional inflation step which can be used to improve visualization. In the following sections, we will elaborate on each step and provide some evidence of their validity. Figure 2 gives an overview of the method.
18
+
19
+ Several projection methods start by defining a structure over the data which encodes the relative positions of points and then project in a way that preserves this structure. For example, UMAP relies on a weighted graph encoding nearest neighbor relations while t-SNE uses a collection of local distributions based again on the nearest neighbor relations. In our case, we strive for a structure which captures both local neighbor properties of points and global clustering properties. In order to achieve this with a low computational cost, and at the same time keep the approach simple and parameter free, we build a hierarchy based on 1-NN relations between points. This approach is inspired by classical work on nearest neighbor graphs such as [13] and the FINCH clustering method [33].
20
+
21
+ Assume that our dataset is $\mathbf{X} = \{\mathbf{x}_i\}_{i \leq N}$ , where $\mathbf{x}_i \in \mathbb{R}^D$ . The first step in constructing the hierarchy entails building $NNG(\mathbf{X})$ which is a directed graph that connects each point to its nearest neighbor. This can be performed by using any nearest neighbor or approximate nearest neighbor algorithm. Next, we identify the connected components of $NNG(\mathbf{X})$ , denoted by $\{NNG_i(\mathbf{X})\}_{i \leq g_0}$ , which form directed graphs with all edges pointing to a single bi-root. For each graph $NNG_i(\mathbf{X})$ , we compute its centroid $\mathbf{c}_i^{(0)} = \frac{1}{g_0} \sum_{\mathbf{x} \in NNG_i(\mathbf{X})} \mathbf{x}$ and thus form a new set of points $\mathbf{C}^{(0)} = \mathbf{c}_i$
22
+
23
+ $\{\mathbf{c}_i^{(0)}\}_{i \leq g_0}$ . We then repeat the same process of computing $NNG(\mathbf{C}^{(0)})$ and its components' centroids to derive $\mathbf{C}^{(1)}$ and continue until we reach the smallest set of centroids $\mathbf{C}^{(l)}$ which contains at least three points. The NNG hierarchy is then the tree $T_{NNG}(\mathbf{X}) = \langle \bigcup_{i < l} \mathbf{C}^{(i)} \cup \mathbf{X}, E \rangle$ ,
24
+
25
+ where each centroid is connected to each of the points of the NNG component which corresponds to it. Figure 3 displays a single step of this iterative process.
26
+
27
+ In comparison with k-NNGs for k > 1, 1-NNGs are quite small in size, which make them well-suited to construct a fine-grained hierarchy with several levels. A sim-
28
+
29
+ <span id="page-3-1"></span>ple rule of thumb is that the number of points decreases on average by a factor of 0.31 on every step. This can be extracted from the following theorem assuming that, at least locally, the data and centroids on higher levels are uniformly distributed. Though the validity of the assumption is hard to verify, we notice that this rule holds for all observed datasets.
30
+
31
+ **Theorem 1** (Eppstein, Paterson, Yao [13]). The expected number of components in $NNG(\mathbf{X})$ for a uniform random point set $\mathbf{X}$ in a unit square is asymptotic to approximately $0.31|\mathbf{X}|$ .
32
+
33
+ Though the clustering tree structure is enough to produce a projection respecting the partitions of the original dataset on separate levels, the relative positions of the points contained in the graph components $NNG_i(\mathbf{X})$ and $NNG_i(\mathbf{C}^{(k)})$ need to be determined in the target space. One could use a random projection, or even start with random points, but there is an extra gain in both preservation scores and in visual quality when using some meaningful initial projection. We choose to use PCA and to accelerate its computation, we estimate the covariance matrix of the data using the centroids $C^{(i)}$ of a predefined level of the hierarchy. One could as well sub-sample the original point set X, but we notice that using the centroids increases stability and avoids deviation of the initialization between runs. We experimentally verify that this approximation of principal components produces results comparable to using PCA on the full data. We provide this analysis in the supplementary.
34
+
35
+ We choose the level of centroids to be the lowest level of the hierarchy such that all levels above have cardinality less than 1000. This implies that if the dataset is small, thus for all $i, |\mathbf{C}^{(i)}| < 1000$ , then the PCA will directly be computed on $\mathbf{X}$ . The advantage of this approximation is that we can reduce the computational cost of PCA from $\mathcal{O}(N \cdot D^2)$ to $\mathcal{O}(D^2)$ with N replaced with a factor of 1000.
36
+
37
+ Once the eigenvectors of PCA are computed, say in a matrix V, then all points of $\mathbf{X}$ and all centroids $\mathbf{C}_i$ are projected from the higher dimension D to the lower dimension d by multiplying with this single shared matrix. We denote the projections of such points by a tilde superscript, for example $\tilde{\mathbf{c}}_i^{(k)}$ for centroids and $\tilde{\mathbf{x}}_i$ for points of the dataset.
38
+
39
+ This is the central part of the algorithm and the goal is to move the points hierarchically so that they occupy the projection space $\mathbb{R}^d$ following the tree $T_{NNG}(\mathbf{X})$ in a way that the 1-NN relationships are preserved over all levels. Once we have a preliminary projection for all points and centroids, we start from $\mathbf{C}^{(l)}$ and consider its projected centroids $\{\tilde{\mathbf{c}}_i^{(l)}\}_{i < q_l}$ . Those centroids form a Voronoi tessella-
40
+
41
+ <span id="page-3-0"></span>![](_page_3_Figure_8.jpeg)
42
+
43
+ Figure 3. The induction step in building $T_{NNG}(\mathbf{X})$ . All 1-NNG components are mapped to their centroids which form the basis for the next step.
44
+
45
+ tion of $\mathbb{R}^d$ and an ideal way to place the lower level projected centroids $\{\tilde{\mathbf{c}}_i^{(l-1)}\}_{i\leq g_{l-1}}$ would be to select for each $\tilde{\mathbf{c}}_i^{(l)}$ the centroids of level l-1 which correspond to it, translate them so that they are centered around $\tilde{\mathbf{c}}_i^{(l)}$ and finally spread them to occupy the corresponding Voronoi cell. In order to perform this process in an efficient and easy to vectorize way, we choose to use the already known distance $d_i^{(l)}$ of each $\tilde{\mathbf{c}}_i^{(l)}$ to its nearest neighbor in $\tilde{\mathbf{C}}^{(l)}$ and then scale the translated, lower level centroids to a d-ball of radius $\frac{1}{3}d_i^{(l)}$ . This distance guarantees that points belonging to neighboring centroids will not form nearest neighbor relationships cross-centroids, thus preserving the separation encoded in $T_{NNG}(\mathbf{X})$ . Figure 4 illustrates this process.
46
+
47
+ Once the points of $\tilde{\mathbf{C}}^{(l-1)}$ are placed around the points of $\tilde{\mathbf{C}}^{(l)}$ , we use them to translate the points of $\tilde{\mathbf{C}}^{(l-2)}$ around them, the same way as before. This step is repeated until we reach the level of $\mathbf{X}$ which forms the final projection.
48
+
49
+ There is still one issue we need to address. This is the fact that though the radius $\frac{1}{3}d_i^{(l)}$ guarantees the separation of neighboring centroids on one step, it could be the case that the borders of this d-ball are crossed by points moved on later step of the iteration (see again figure 4). Below we compute a shrinking coefficient for this radius, such that this guarantee still holds for the points moved in later steps.
50
+
51
+ **Lemma 1.** Given a d-ball $B(\mathbf{c}_i^{(k)}, r)$ centered on a centroid, all points belonging to $\mathbf{c}_i^{(k)}$ translated with the h-
52
+
53
+ NNE algorithm with radii multiplied a factor of $\frac{3}{5}$ , lie inside $B(\mathbf{c}_i^{(k)}, r)$ .
54
+
55
+ *Proof.* Assume that a factor s is used to reduce the computed radii on each step of h-NNE. On the first step, all lower level centroids are translated and scaled so that they lie in a d-ball of radius sr. The worst case scenario for the next step is that there a two antidiametrical points $\mathbf{c}_1^{(k)}$ , $\mathbf{c}_2^{(k)}$ which are nearest neighbors. In that case, the points of the next step belonging to $\mathbf{c}_1^{(k)}$ will be placed inside a d-ball around it of radius $\frac{2}{3}sr$ since $d(\mathbf{c}_1^{(k)},\mathbf{c}_2^{(k)})=2sr$ . This means that the largest distance of any of those points to $\mathbf{c}_i^{(k)}$ is $sr+\frac{2}{3}s^2r$ . By recursively computing those worst case scenarios, we get that for infinite steps of the algorithm, the most distant point will be placed in a distance of at most
56
+
57
+ $$\sum_{j \in \mathbf{N}} (\frac{2}{3})^j s^{j+1} r = sr \sum_{j \in \mathbf{N}} (\frac{2}{3}s)^j = \frac{sr}{1 - \frac{2}{3}s} \tag{1}$$
58
+
59
+ Therefore, in order for all points to lie inside the original ball $B(\mathbf{c}_i^{(k)},r)$ , we need that $\frac{sr}{1-\frac{2}{3}s} \leq r$ from which we get that $s \leq \frac{3}{5}$ .
60
+
61
+ The above bound guarantees that if we place new points in a ball of radius $\frac{3}{5} \cdot \frac{1}{3} = 0.2$ times the distance to the nearest neighbor, then the nearest neighbors of points will be restricted in the clusters formed in the NNG hierarchy. In practice the worst case scenario of anti-diametrical points does not occur so often. In low dimensions where points are more dense one can use radii of $\frac{1}{3}$ of the 1-NN distance or even more without noticeable drop in k-NN preservation. This can be particularly useful for visualization, as it can help make plots more spread on the plane or 3D space.
62
+
63
+ **Point cluster inflation for visualization purposes.** The use of a single linear projection for all points can result in cluttered point clusters when they are not well aligned to the global principal components used to project. Though this has minimal impact to performance, it leads to poorer visualization which contains artifacts from this initial projection. In order to enhance the shape of images without sacrificing speed and without adding new hyper-parameters, we add the option to inflate potentially squeezed point clusters using six local rotations with angles equally distanced in the interval $[0, \frac{\pi}{2}]$ , followed by a scaling and the inverse rotation. This results in an output almost equivalent to that of rotating the clouds to their PCA principal components, scaling them, and then rotating them back to the original orientation but much less computationally expensive.
64
+
65
+ **Projecting new points.** The projection of new points can be performed by repeating the same algorithm as before, just for the individual points and by descending only the second level of the hierarchy. If x is the new point, we
66
+
67
+ <span id="page-4-0"></span>![](_page_4_Figure_7.jpeg)
68
+
69
+ Figure 4. Voronoi cells of the top level centroids in MNIST. The circles have radius $\frac{1}{3}$ the distance from the nearest neighbor and the points are projected with a shrinking factor of exactly this factor of $\frac{1}{3}$ . One can notice that the final points cross the boundaries of the circle. Nevertheless, the density of the data in 2D result to a situation where no severe overlaps appear.
70
+
71
+ first identify the closest centroid in $\mathbf{C}^{(1)}$ using our ANN algorithm of choice. Then the point is transformed by scaling, applying the pre-computed PCA and normalizing the position of the point relative to the centroid based on the corresponding radius. If point cluster inflation was used in the original projection, then the relevant rotations and scalings are also performed before the final normalization step.
72
+
73
+ There are three steps in h-NNE which make up its complexity: the construction of the $T_{NNG}(\mathbf{X})$ tree, the preliminary PCA projection and the hierarchical point translation. Since each component of the NNG graph contains at least two points, the height of $T_{NNG}(\mathbf{X})$ is $\mathcal{O}(\log N)$ . That implies that given that the computation of approximate 1-NN is at least $\mathcal{O}(N)$ , no matter which method is used, the total complexity is equal to the complexity of a single Approximate Nearest Neighbor (ANN) step including any preparation on the dataset (e.g. building indexing trees) and querying once on all points of the dataset. We denote the ANN complexity with $\mathcal{O}(ANN(N,D))$ . The PCA projection step requires $\mathcal{O}(Dd^2)$ , given we have fixed the number of used samples to a constant number of points. Finally, the point translation steps are $\mathcal{O}(N \log Nd + ANN(N, d))$ , again because of the logarithmic height of the tree, the we use group by operations, and the fact we need to compute nearest neighbors to find the radius of the d-balls where clusters are expanded. Thus, overall, the algorithm has $\mathcal{O}(ANN(N,D) + Dd^2 + N \log Nd)$ complexity.
74
+
75
+ The complexity ANN(N) is not easy to compute. In
76
+
77
+ <span id="page-5-3"></span><span id="page-5-0"></span>
78
+
79
+ | | Higgs [3] | Google News [28] | COIL 20 [29] | CIFAR-10 [18] | F-MNIST [40] | ImageNet [11] | BBT [6] | Buffy [6] | MNIST | [20, 24] |
80
+ |-----------|--------------|------------------|--------------|---------------|--------------|---------------|----------|-----------|--------|----------|
81
+ | Туре | Sensor | Text | | Obj | ects | | Vid | eos | Di | gits |
82
+ | #Classes | 2 | - | 20 | 10 | 10 | 1000 | 5 | 6 | 1 | 0 |
83
+ | #Samples | 11M | 3M | 1440 | 60K | 70K | 1.2M | 200K | 206K | 70k | 8M |
84
+ | Dimension | 28 | 300 | 16384 | 3072 | 784 | 2048 | 2048 | 2048 | 784 | 784 |
85
+ | Features | Measurements | Word2Vec | Pixels | Pixels | Pixels | ResNet50 | ResNet50 | ResNet50 | Pixels | Pixels |
86
+
87
+ Table 1. Datasets of different size ranging from 1400 to 11 million samples in 28 to 16384 dimensions used in experiments.
88
+
89
+ the worst case scenario, it could be $N^2$ when using linear search. Some of the older exact methods [8, 30] achieve $N\log N$ complexity, but unfortunately scale exponentially on D. Approximate methods such as HNSW [25], NNDescent [12] and ScaNN [14] achieve good performance on real-world datasets but support it with experimental evidence and no complexity bounds. In our current implementation we use PyNNDescent [26] which is a tuned version of NNDescent. The empirical complexity of NNDescent is approximately $O(N^{1.14})$ for datasets with small intrinsic dimensionality. Finally, an interesting analysis [4] provides synthetic families of datasets where NNDescent attains a complexity of $O(N^2)$ and $O(N\log N)$ respectively. Further work of the authors [5], suggests an empirical complexity of $K^2N\log N$ , where K is the number of neighbors.
2204.09245/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-01-13T16:06:08.000Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36" version="16.2.6" etag="du5i3Fmlp8iYpa8Ym0zQ" type="google"><diagram id="QOxrN2WPz9V7dQ1-oje_">7VtLc+MoEP41rpo9bEpvy8eMJ5k5ZHZd5ezu5IglLLGRhAvh2Npfv8hCEgg5UWLHODPOIQXNQ/TXHzRNl0f2NN1+JWAVf8chTEaWEW5H9peRZU0Mk/0vBUUlcHynEkQEhZXIbAVz9B/kQoNL1yiEudSRYpxQtJKFAc4yGFBJBgjBG7nbEifyV1cg4l80WsE8AAlUuv2DQhpXUt8Ven+DKIrrL5sGb0lB3ZkL8hiEeCOI7JuRPSUY06qUbqcwKbGrcanG3e5pbRZGYEaHDLCqAU8gWXPd+LpoUSsbEbxe8W6QULjtgxgski5i7RLMRjFGCIhTSEnButQTTfgQTgbb4/VNC20DYCzAWvcD3JpRM3WrMCtwnfv1t1/Wn6mfhbDsb47sz5sYUThfgaBs3TB2M1lM04Q3L1GSTHGCyW6sHQLoLwMmzynBj1Bo8QIfLpasZS+yIoJWP4KnQMhREJpOvzLBDJAcEgUtpgWVIZFVz3AGOzhxEUhQlLFqwLRmE9ufS0wQ23TXvCFFYVh+ptcGrZWMA0GtaTiQhfYRMHYVjOdFRkHA1OdIoyz6CaE+BbbeyzscZuF16RVagEKQx82WFwAu5TNAGWjZTmIZdgN77QksBSK4RfSHUH4Qyl+2YqWoKxnT8gf/6K7yILa0g3a1QjIFDGvntc8QTHm8JgGUNjgFJIJUED1rLrfHWrWMwARQ9CQvos+E/AszjNjy9vqD37unWLV4Pkr0bZ2JLKczkWl2Zqp0VmbaUarRexDLxiqpmBnmvIoJjXGEM5DctNIOSdo+dxivuOn/hZQW/P4D1hTLbKx5ZYq8unKHMcuQmFWNej23RCL5KpFMa/BtYDBrhm58X9n47B6KloghbnlJeXIumPvyorJUubR7AuFh7n/QYfj8dcgxZNZa/gmvQ5MLjZsgQ+Sxr4/G9XKUC9hZsLVhnQ62mqai/y9IV6vn2HU08lUNKe9wVF4umfAWkzTXTlrX0Ula+0LahqASaScaSatGufez+5l6USgJDKh2Bnu+Tga7OhncsvZBaNnDYFaZQYKYimWY22F1S+QhwZUQJAYJyHMUVMJblMiGfZ743pmd1mp4fL1FuOeQzmOwKotBkSBmSWK/TPRFZfO7RSMAwWO0Y8Kfa8qmgVyeV0afXBniH7t1KQ9qy+XSCnof1EJv4bnecbbXuHMHb0JSYXt5fRGwewSLXEJJJuqLJYe/LB9/m6jB5Lx8O8sCmKtO4tOMwBTlUG35VqwwjWGO8t+0+xDT0ulDJgqgDyWURjmDZfyB62IXwb+yxwxvMu3o+TrRs9SYUAHkvdM2ti0DcNK0Ta1G93Gn2L2T70LjA8OM90rkGP2gngQ0NTI7OWucsU7WfJRsn6WRJGokFASRlYB0EQIFrY+UgtoDqoZ0X1++D6Yg+3nSfW8k8FHAvST8VFPIt+u+lN8l5/fqnF+d9NPpT8euTn+qRkXn6U/HGv2pGuj8DdIVImru5iMd8HsQ1eBM6znEp9sYYhaBl76U4KeP7kvfSN6jYKuGWb+0L62f5kRfOunxpZOLL32lL7V7iKUrp/A8uyR2q5Q25VQBZlvnLXkCd/BF4EC2MCNf2cKf48v3iXc0udZE6PmZ/BVvkAfa3PZlm7snM7lzMblk8pNtc9fSZfIzShafg8nt4SHegSb3jdOYnFXb36tU3dsf/dg3/wM=</diagram></mxfile>
2204.09245/main_diagram/main_diagram.pdf ADDED
Binary file (21 kB). View file
 
2204.09245/paper_text/intro_method.md ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Natural Language Inference (NLI) is the task of determining whether a premise entails a hypothesis. In particular, NLI involving temporal expressions is crucial. () is an example of English NLI involving temporal expressions.
4
+
5
+ The inference example with temporal expressions is challenging. This is because we need to represent the meaning of sentences that contain temporal adverbs like *before* and *in*, temporal expressions like *April* 2021, and verb tenses like *arrived*, and to compute temporal order of events written in the sentences.
6
+
7
+ @thukral-etal-2021-probing showed that deep learning-based models [@liu2019roberta; @he2020deberta] trained on a standard NLI dataset such as Multi-Genre Natural Language Inference (MultiNLI; @williams-etal-2018-broad) failed to perform simple temporal inference as in (). Furthermore, deep learning-based models have performed poorly on challenging NLI datasets that involve various temporal inferences such as FraCaS [@cooper1996] for English and JSeM [@kawazoe2015inference] for Japanese.
8
+
9
+ Recently, logical inference systems based on compositional semantics [@bos-markert-2005-recognising; @abzianidze-2015-tableau; @mineshima-etal-2015-higher; @mineshima-etal-2016-building; @bernardy-chatzikyriakidis-2017-type; @bernardy2020fracas; @Onishi2020ccg] (i.e., semantics in which the meaning of a phrase is determined compositionally from the syntax and the meaning of the lexicon contained in the phrase) achieved high accuracy in FraCaS and JSeM. However, most previous systems did not cover temporal inference.
10
+
11
+ In addition, because most previous research on NLI has focused on English, research on other languages is desirable. In particular, research on NLI in Japanese is still in its infancy and is limited to deep learning-based systems using pre-trained language models and a few logical inference systems [@mineshima-etal-2016-building; @Onishi2020ccg]. @Onishi2020ccg attempted to implement a Japanese logical inference system for temporal inference. However, the focus of this previous research was limited to a few temporal clauses in Japanese, and temporal adverbs are out of scope. Thus, there is still room for improvement in the accuracy of temporal inference in Japanese.
12
+
13
+ In this study, our aim is to realize the compositional semantics and a logical inference system for temporal inference in Japanese based on Combinatory Categorial Grammar (CCG) [@steedman2000syntactic; @Bekki2010] to derive a transparent syntax-semantics interface and the analysis of tense and aspect studied in formal semantics [@Kamp1993-KAMFDT; @yoshimoto2000tense; @kaufmann2011temporal; @Utsugi2015towards; @ogihara2017tense; @jacobsen_2018]. We focus on temporal order and develop a Japanese logical inference system for temporal order.
14
+
15
+ In our system, a CCG parser first parses the premise and hypothesis sentences and converts them into CCG trees. Based on the analysis of the compositional semantics, we then modify the obtained CCG trees. Next, using ccg2lambda [@martinez-gomez-etal-2016-ccg2lambda], the meaning of the whole sentence is derived as a logical form. Finally, we attempt to prove the entailment relations between the obtained logical forms by an automated theorem prover Vampire [@kovacs2013first].
16
+
17
+ We experiment with two NLI datasets involving temporal order in Japanese: JSeM and a Japanese translation of the NLI dataset focusing on temporal inference [@thukral-etal-2021-probing]. We compare our system with the previous Japanese logical inference system [@Onishi2020ccg] and the Japanese BERT model [@devlin-etal-2019-bert]. Our experiments show that our system outperforms previous logical inference systems as well as current deep learning-based models. Our system will be available for research use at <https://github.com/ynklab/ccgtemp>.
18
+
19
+ Tense and aspect are important linguistic phenomena related to temporal expressions. This section provides standard background on the semantics of temporal expressions in Japanese, which have been analyzed in previous studies [@yoshimoto2000tense; @kaufmann2011temporal; @Utsugi2015towards; @ogihara2017tense; @jacobsen_2018].
20
+
21
+ In Japanese, verb tense is classified into past (*-ta*) and non-past (*-ru*), and aspect is classified into stative (like *iru*) and non-stative (like *kuru*). The temporal interpretation of a matrix clause (i.e., a clause that contains a subordinate clause) is determined by the combination of tense and aspect, and is expressed by the constraints imposed on the relation between speech time and reference time. Speech time represents the time that a sentence is uttered, and reference time is a concept proposed by @Reichenbach1947-REIEOS and refers to the time used with location time (i.e., time when an event occurs) and speech time to represent the meaning of tense. Table [\[tab:tense_aspect_relation\]](#tab:tense_aspect_relation){reference-type="ref" reference="tab:tense_aspect_relation"} shows the temporal interpretation of a matrix clause determined by the combination of tense and aspect and example sentences corresponding to each combination.
22
+
23
+ To analyze the temporal interpretation of embedded clauses, the concepts of absolute tense and relative tense are necessary. Absolute tense means that the temporal interpretation is determined by the relation between the speech time and the reference time, as in the matrix clause. However, relative tense means an interpretation in which the temporal interpretation does not depend on the relation between the speech time and the reference time. We explain the details with examples in Section [3.2](#compsemclause){reference-type="ref" reference="compsemclause"}.
24
+
25
+ This paper uses CCG to formalize the syntactic analysis of our method and analyzes the compositional semantics of temporal expressions based on the analysis by @kaufmann2011temporal.
26
+
27
+ This section explains the semantic representations for verb tense. Consider the following sentences. (a) is non-past tense ([np]{.smallcaps}), and (b) is past tense ([p]{.smallcaps}). (a) means that the event of Taro's coming occurs after the speech time, whereas (b) means that the event occurred before the speech time. Thus, for the speech time $s$ and the reference time $r$, $r > s$ in (a) and $r < s$ in (b). Here, $r$ and $s$ both represent intervals and $r < s$ means the end of the interval $r$ is before the beginning of the interval $s$. Another interpretation of time is instance semantics, which treats time as an instance, but in this study, we follow the standard treatment of time as an interval [@Kamp1993-KAMFDT; @bernardy2020fracas].
28
+
29
+ Following @Kamp1993-KAMFDT, in this study, the time of an event is represented by its relationship with the reference time. Then, the meaning of (a) and (b) can be expressed by the following logical expressions, where $\mathsf{tgk}$ is the predicate that represents the event Taro's coming, $\mathsf{time}$ is the function that returns the time when the event occurred and $e$ is a variable representing the event. The meanings of (a) and (b) are as shown in the Figure [1](#fig:temporalinterpretation1){reference-type="ref" reference="fig:temporalinterpretation1"} and Figure [2](#fig:temporalinterpretation2){reference-type="ref" reference="fig:temporalinterpretation2"}. Figure [\[fig:example_derivation\]](#fig:example_derivation){reference-type="ref" reference="fig:example_derivation"} shows the CCG derivation tree for (a).
30
+
31
+ <figure id="fig:temporalinterpretation1" data-latex-placement="h">
32
+ <embed src="sources/time_series1.pdf" style="width:7cm" />
33
+ <figcaption>Temporal interpretation of (a)</figcaption>
34
+ </figure>
35
+
36
+ <figure id="fig:temporalinterpretation2" data-latex-placement="h">
37
+ <embed src="sources/time_series2.pdf" style="width:7cm" />
38
+ <figcaption>Temporal interpretation of (b)</figcaption>
39
+ </figure>
40
+
41
+ Next, consider the following sentences with an embedded clause. In (a), the embedded clause is the non-past tense, and in (b), the embedded clause is the past tense. As mentioned in Section [2](#sec:background){reference-type="ref" reference="sec:background"}, the temporal meaning of embedded clauses is interpreted using "relative tense." Thus, the temporal meaning of embedded clauses is determined not by the relation between the speech time and the reference time of the embedded clause but by the relation between the reference time of the matrix clause and the reference time of the embedded clause. For the reference time of the embedded clause $t$ and the reference time of the matrix clause $r$, we then have $t > r$ in (a), and $t < r$ in (b).
42
+
43
+ Therefore, using the same predicates and functions as Section [3.1](#subsec:compsemanticsverb){reference-type="ref" reference="subsec:compsemanticsverb"}, the meaning of the embedded clauses can be expressed by the following logical formulas. By combining these logical formulas with the meanings of the matrix clauses interpreted in the same way as Section [3.1](#subsec:compsemanticsverb){reference-type="ref" reference="subsec:compsemanticsverb"}, the meanings of sentences with the embedded clauses can be expressed by the following logical formulas, where $\mathsf{o}$ is the predicate that represents the event of my swimming. The meanings of (a) and (b) are as shown in the Figure [3](#fig:temporalinterpretation3){reference-type="ref" reference="fig:temporalinterpretation3"} and Figure [4](#fig:temporalinterpretation4){reference-type="ref" reference="fig:temporalinterpretation4"}.
44
+
45
+ <figure id="fig:temporalinterpretation3" data-latex-placement="h">
46
+ <embed src="sources/time_series3.pdf" style="width:7cm" />
47
+ <figcaption>Temporal interpretation of (a)</figcaption>
48
+ </figure>
49
+
50
+ <figure id="fig:temporalinterpretation4" data-latex-placement="h">
51
+ <embed src="sources/time_series4.pdf" style="width:7cm" />
52
+ <figcaption>Temporal interpretation of (b)</figcaption>
53
+ </figure>
54
+
55
+ This study interprets the temporal meaning of sentences with embedded clauses in this way.
56
+
57
+ An example of the temporal adverbs targeted in this paper is shown in bold in the following.
58
+
59
+ More generally, we analyze temporal adverbs comprising various types of absolute temporal expressions (e.g., date, day of the week, and time) and temporal connectives *izen* (*before*) and *ikou* (*after*). Absolute temporal expressions are temporal expressions that do not depend on the speech time, in contrast to relative temporal expressions such as *today* that depend on the speech time. In this study, temporal adverbs containing relative temporal expressions are out of scope and left for future work.
60
+
61
+ In temporal adverbs containing absolute temporal expressions, the particle *-ni* is unnecessary. For example, the following three sentences are all acceptable and have the same meaning. Thus, *-ni* can be analyzed as a separation of clauses like a comma and does not have any meaning. Before considering the syntactic category of *-ni*, let us consider absolute temporal expressions. As shown in (c), absolute temporal expressions are combined with sentences such as *Taro-ga kita*. Therefore, $S/S$ is assigned as the syntactic category of the absolute temporal expression 4 *gatsu* 3 *nichi*. As mentioned above, because *-ni* plays the role of connecting the preceding and following clauses, $(S/S)\backslash(S/S)$ is appropriate as its syntactic category.
62
+
63
+ In addition, absolute temporal expressions like 4 *gatsu* 3 *nichi* can be a noun phrase $NP$, as in Figure [\[fig:example_ccg_tree\]](#fig:example_ccg_tree){reference-type="ref" reference="fig:example_ccg_tree"}. In this example, the syntactic category of 4 *gatsu* 3 *nichi* is $NP$, and the syntactic category of *izen* is $(S/S)\backslash NP$. We explain the reason why absolute temporal expressions are used as both $NP$ and $S/S$ from a semantic perspective in the next paragraph.
64
+
65
+ We treat absolute temporal expressions (e.g., 4 *gatsu* 3 *nichi* (*April* 3)) as multi-word expressions. Consider the expression 4 *gatsu* 3 *nichi*. We can decompose the expression into four constituents as follows. $$\begin{align*}
66
+ [4\ gatu\ 3\ nichi] &= [4\ gatu][3\ nichi] \\
67
+ &= [[4][gatu]][[3][nichi]]
68
+ \end{align*}$$ A current Japanese CCG parser [@yoshikawa-etal-2017-ccg] analyzes each constituent as the syntactic category $4 = NP, gatsu = (NP/NP)\backslash NP, 3 = NP,$ and $nichi = NP/NP$, respectively. The semantic template for $NP$ is $\lambda E\ N\ F. ^\exists x.(N(E,x) \wedge F(x))$, which means "some bound variable $x$ is associated with the word $E$." Now 4 and 3 are both $NP$, so 4 and 3 have different bound variables associated with them. This bound variable refers to the interval. Essentially, because 4 *gatsu* 3 *nichi* refers to only one interval, 4 and 3 need to be associated with the same interval. The correct meaning cannot be derived when 4 and 3 are associated with different bound variables.
69
+
70
+ Thus, we treat temporal expressions such as 4 *gatsu* 3 *nichi* as multi-word expressions and set up a semantic template as shown in Table [\[tab:semantic_templates\]](#tab:semantic_templates){reference-type="ref" reference="tab:semantic_templates"}. This semantic template allows us to derive the meaning of a temporal expression associated with only one bound variable. In this template, the function $\mathsf{normalized\_time}$ takes interval as an argument and returns its actual time, which can be set in the format YYYYMMDDHH from absolute temporal expressions. For example, for interval $x$, which represents *April* 3, the value is $\mathsf{normalized\_time}(x)=0000040300$. In this example, year and hour are not explicitly written, so zero-padding is applied to them.
71
+
72
+ As shown in Figure [\[fig:example_ccg_tree\]](#fig:example_ccg_tree){reference-type="ref" reference="fig:example_ccg_tree"}, 4 *gatsu* 3 *nichi* functions as $NP$ when connected to *izen* and as $S/S$ when used by itself. This phenomenon can be analyzed as follows. Temporal expressions such as 4 *gatsu* 3 *nichi* and 4 *gatsu* 3 *nichi izen* play the role of representing the time of the sentence. Consider the following sentences. In (), the location time of the event *Taro-ga kita* (*Taro came*) is 4 *gatsu* 3 *nichi* (*April* 3), and in (), the location time of the event *Taro-ga kita* (*Taro came*) is 4 *gatsu* 3 *nichi izen* (*before April* 3). The expressions that represent temporal adverbs such as 4 *gatsu* 3 *nichi* (*April* 3) and 4 *gatsu* 3 *nichi izen* (*before April* 3) must have the syntactic category of $S/S$, so 4 *gatsu* 3 *nichi* changes from $NP$ to $S/S$.
73
+
74
+ Next, the semantic template for *izen* was determined as shown in Table [\[tab:semantic_templates\]](#tab:semantic_templates){reference-type="ref" reference="tab:semantic_templates"}. The temporal meaning of *izen* is represented as the lambda expression $\lambda x. \mathsf{before}(j3,x)$, which indicates that the expression "doing before $x$" means "doing in $j3$ before $x$." Finally, the meaning of temporal expressions can be derived by setting up a template with *-ni* and a comma as meaningless words, as described in Section [3.3.1](#subsubsec:ccgtree){reference-type="ref" reference="subsubsec:ccgtree"}.
75
+
76
+ <figure id="fig:overview" data-latex-placement="t">
77
+ <embed src="sources/overview.pdf" style="width:14cm" />
78
+ <figcaption>Overview of our system</figcaption>
79
+ </figure>
80
+
81
+ We introduce a set of axioms for temporal relations and temporal expressions to perform inference for temporal order. @allen-1983-maintaining defined 13 relations between time intervals. The previous logic-based inference system [@Onishi2020ccg] introduced 169 axioms for these 13 temporal relations. Six of the 13 temporal relations, $\mathsf{meets}$, $\mathsf{met\_by}$, $\mathsf{starts}$, $\mathsf{started\_by}$, $\mathsf{finishes}$, and $\mathsf{finished\_by}$ are special cases of other relations in implementing axioms. For example, $\mathsf{meets}$ is a special case of $\mathsf{before}$ where the end of the preceding interval coincides with the beginning of the following interval. $\mathsf{meets}$ is necessary for inferences involving temporal clauses such as *soon after*. Thus, we consider that those six relations are redundant in performing the temporal inference involving temporal order in this study. We therefore merged them into the most similar relations: merged $\mathsf{meets}$ into $\mathsf{before}$, $\mathsf{met\_by}$ into $\mathsf{after}$, $\mathsf{starts}$ into $\mathsf{during}$, $\mathsf{started\_by}$ into $\mathsf{contains}$, $\mathsf{finishes}$ into $\mathsf{during}$, and $\mathsf{finished\_by}$ into $\mathsf{contains}$, respectively. In summary, we introduce 49 axioms corresponding to seven temporal relations: $\mathsf{before}$, $\mathsf{after}$, $\mathsf{overlaps}$, $\mathsf{overlapped\_by}$, $\mathsf{during}$, $\mathsf{contains}$, and $\mathsf{equal}$.
82
+
83
+ In addition, we speculate 30 additional axioms for temporal expressions in Japanese such as *izen* (*before*) and *ikou* (*after*), and those for identity conditions of speech times between premises and hypotheses. Table [\[tab:axioms\]](#tab:axioms){reference-type="ref" reference="tab:axioms"} shows examples of the axioms.
84
+
85
+ Figure [5](#fig:overview){reference-type="ref" reference="fig:overview"} shows the pipeline of our system. Our system consists of three main steps. First, natural language sentences of premises and hypotheses are converted into modified CCG trees by CCG parsing and modifying trees. Next, a meaning from the semantic templates is assigned to each lexical item. The semantics in lexical items are then composed by ccg2lambda to derive a logical formula that represents the meaning of the whole sentence. Finally, an automated theorem prover determines whether the logical formula of the hypothesis is provable from the logical formula of the premises. In this section, we describe each of these steps.
86
+
87
+ The syntactic analysis, which obtains CCG parsing trees of input sentences, consists of two steps. First, we use the tokenizer to tokenize sentences and a CCG parser to obtain a CCG tree. We use depccg [@yoshikawa-etal-2017-ccg], a standard Japanese CCG parser, trained on the Japanese CCGBank [@uematsu-etal-2013-integrating] for the first step.
88
+
89
+ Second, if the sentence contains temporal expressions, we extract the subtrees in which the leaves are temporal expressions from the CCG tree of the whole sentence. The extracted CCG subtree is then transformed into an appropriate form. Figure [\[fig:before_modify_tree\]](#fig:before_modify_tree){reference-type="ref" reference="fig:before_modify_tree"} and Figure [\[fig:after_modify_tree\]](#fig:after_modify_tree){reference-type="ref" reference="fig:after_modify_tree"} show the temporal expression subtrees 4 *gatsu* 3 *nichi ni* (*on April* 3) before and after the conversion. As another possible way of implementation for obtaining correct CCG trees for temporal expressions, we can improve the CCG parser itself. However, to do that, we need to re-train the morphological analyzer and the CCG parser to correctly handle a variety of temporal expressions. We do not take this approach because it is too costly.
90
+
91
+ In semantic analysis, each leaf (lexical item) of the CCG tree obtained in the syntactic analysis is assigned a meaning from the semantic templates. The lexical items are then combined according to the CCG derivation tree to derive a logical formula that expresses the meaning of the entire sentence. The composition is performed using ccg2lambda in Japanese [@mineshima-etal-2016-building].
92
+
93
+ In order to assign meaning to the temporal expressions, we set up semantic templates for lexical items such as absolute temporal expressions and *izen*. We provide a set of semantic templates, which contains 150 lexical entries. The number of lexical entries assigned to CCG categories is 92, and the number of entries directly assigned to specific words is 58. Table [\[tab:semantic_templates\]](#tab:semantic_templates){reference-type="ref" reference="tab:semantic_templates"} shows the examples of semantic templates.
94
+
95
+ As a representation language, we use the typed first-order form of the Thousands of Problems for Theorem Provers (TPTP; @sutcliffe2017tptp) format. We use standard interval semantics [@dowty1979word; @bennett1978toward] and introduce an interval type to express time instances as intervals and their relations in logical expressions. We use four basic types: $\mathsf{E}$ (Entity), $\mathsf{Ev}$ (Event), $\mathsf{Prop}$ (Proposition) and $\mathsf{I}$ (Interval). The types of expressions we adopt are defined by $$\begin{align*}
96
+ \mathsf{T} ::= \mathsf{E}\ |\ \mathsf{Ev}\ |\ \mathsf{Prop}\ |\ \mathsf{I}\ |\ \mathsf{T1} \Rightarrow \mathsf{T2}
97
+ \end{align*}$$ where $\mathsf{T1} \Rightarrow \mathsf{T2}$ is a function type. Because the logical expressions derived by ccg2lambda are not typed, we implement automatic completion of variable types, predicate types, and definitions of predicates.
98
+
99
+ In theorem proving, we use the state-of-the-art first-order logic automated theorem prover Vampire [@kovacs2013first] which accepts TPTP formats to determine whether or not a hypothesis is provable from premises using the logical formula derived in Section [4.2](#subsec:semanticanalysis){reference-type="ref" reference="subsec:semanticanalysis"}. The system outputs "yes" (entailment) when the hypothesis can be proved from the premises, "no" (contradiction) when the negation of the hypothesis can be proved from the premises, and "unknown" (neutral) when neither can be proved. We use the fastest mode, CASC mode, and set the timeout of Vampire to a maximum of 300 sec for our experiments.
100
+
101
+ Even though Vampire is a fast theorem prover, it takes too long to prove the problems, whose premises and hypothesis are too complex. When proving the negation of a hypothesis, it turns out that simply negating the logical formula increases the complexity. Therefore, this study uses the symmetrical relationship between *ikou* and *izen* to replace *izen* and *ikou* in the hypothesis with *ikou* and *izen*, respectively, to negate the logical formula without increasing the complexity.
2205.10442/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-11-15T23:53:41.280Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" etag="cAlKtHurOAygAmI2J-xJ" version="15.7.3" type="device"><diagram id="-F7nC-LwQvmivoTQekkk" name="Page-1">7V1dd9s2Ev0t+5Cz2wf3EOCHqEfHTtqmTZs6abvZlxxapm2dyKJWH7W9v34JiaREXCj+JGcMYLvnRCZpSb53MIM7MwBehUdXNz/Ms9nl++Isn7ySwdnNq/D4lZRCRmn5j7pyu7kyCJLNhYv5+Kx6aHvh4/h/eXUxqK6uxmf5ovXgsigmy/GsfXFUTKf5aNm6ls3nxXX7sfNi0v7UWXZRfWKwvfBxlE1yeOyv8dnycnM1jXee/jEfX1zWnyyC6s5VVj9cXVhcZmfF9c6l8M2r8GheFMvNq6ubo3yiwKtx2fze2z13my82z6fL+/zCMhBnsZhd/zT4nH74Nf988/f/rg5COdy8z9/ZZFX9ydXXXd7WGFzMi9XsVfj6fDyZHBWTYl5ePsvPs9Wk/ODX+D2qr/Z3Pl/mNyaWstNJrgNRWlBeXOXL+W35XPVbMgg3v1cZTxhvfrzeMjGs8b7cYSFJqotZxf5F89ZbgMoXFUYPwCsS4X3xAljuIODxaO3AkaYDAxyyKziEAYxkbRWLWTZtoZL8d6Us/fV5MV0eLNbj/LB8QEazm+3N8tVF9e+kfvjx73JSv035t23eqf3u5eXN16wvazxeX46X+cdZNlI/X5f+rXzocnlVInQsypfZYrbxOOfjm/ys+lKVB5NR/XP1duoXFst58TWvR9ArGUbH6j/jGLrD+nRj+aZRRIYhEnVlEhJM4o1dwN60AaTCGf3QoZU4p8Q4R4DzeytxFpIY6Nh2g77l4TgSwPlXu3DWHDQT2AcA+59Wwp7ygj0F2H+yEvbGfTPBHWXlO7twv+UxPanzJDtA/2YX0GZ3To47Sl87dU7KDHfUlx+txF136OTA8xKcu7nI8vrb9f869fTkAkmgFD1ygwFzCKAnBCXriVOEpNwIQW1LORnqnxA9aNAzgrK3c0I02Bs6SKJ4Qk0ACmA+I6IHAmRATQAq4bd8GDhO4rdxtz6JPBVR+0AmUo2AAWrtIFEs/9JrGJgW05w6AjBJzNX+3/mAwIUP1NXdjw2KgcCtICNRTltWcNTdPxfgra/07nH95GEY5fEnK4HXfTw58KiC/7ATeGZVGonqlzJD15+PJwceVa9l/ZV7fDx5xi1Etdt9dN2X6eTg/OkZQfXbfdhlxQi3wk3ougbmVrgJUQRbVtWvkaauB4Soen+0C+k9xWJy4FH1frYSeN23kAOPqtcpZw81YXJCXK0J32oTVDICCGrCLAgwhwZ6Pu6xBtdmPlJmfNRvzKRk319ooEcepTLl4qT+m0zJizMRSmO3Wuj04EBOCEpjyqINeZMpPSGooLtvJ+KUzoOoQc4ISmvL1uLUSFOXdSLU0nY2q+hRgBx41Mx29kzo3p4ceNTKdi6XB69OjjyqYjtXtkIJmTpBV29uZv2eJ1AqJkfelVXF7MozMereH+xEnlt9JkaBe2Il8rqfJ8+2xRRKloOfp0ceFeuxnchzq7XEqGC79zacsjfsqi0xStvue384MQJxgTqfFvs1w8wIISgQsyKEW1GmHrH2z5y4VV8S05bTDg0FCBbUaboEhTOjfaHSUSLCUb/RgpwRFNSMmij6YIRb9SZBoe0YI9zKOgkKcMpCZv97goTUSz4SilLyPpwpplYhdeI1YSC8SRmIqFMfieNCOyJXEyisLVt3prt7JnuADFBAu5X0C5ltCzVAXW1nt0XEbP+nAepnN2MAFz5QPR9bORD0kEAtyQYU+3FxDgnkhLiyYlkPCeTAO75iWQ8J5HwQLFjmxIceKci3CxmgbrZzIzs9JNAj78q+XnpMIEe+doLWn2Sme3965FEaW2rz1IE2RS18YiXSMblN8zrNqUOkqassKa9DgnrwHkwSOSmqKEtNnFluP0W5ZOfysZhZDj/FybmlCRtmux0PXZmb666GHHicmn+wE3hmueGhK6eW6K6GXI0OsUxiJ/K6r6FHHmfydjaL6M6GHnlXdj+BTDu1eh063qEGCXhyQnCOz+gww+Tw6CiJ+k3MUzMi6i/EJC4QUMJtMw8RoC7oXpCx6iXhtrOrCCRQwuj0+sFQ/ddrKGFAiStFAD1mMIAeVZydiQs9NjCA3pW9VyAGUOenRYAlmd63zm08PYd+HgaUoLS2c39LiAH00DuurSE00DOC4tqtvQwgZFDXGYRAcc1o6TyFbKCnxPEtWCCS0DOC2ppRUjBKB+lx0G8ooacEtXX36Y59qT8WhVPypKBAzd03Iw3uLAqq9IygFHeLEW7VJSFQDp4QRpL+3BN9XkqgHHSjiYwB9Kj7LO2pYVeDkK6ceAAOhzzbUQdb6xPh4HDooUd18Ied0LNLeEvrD4vb53DINbF05fQ4cDj00FPs9cfC4dBDj5NLyyRVBTX5lqIixGKBpb69beQMkMdp/L+tRD5lhzzO4gmBJ9gQX7JjxPod5GqoydOVIab0bd21sm3k9Mi7sq2B7vDpkUcVZWeeGDw7PfQEG8GxqFc1GxyQM0DQM8eCAXMcYEAIxXo0TittYNcbakbqd7Y+MuvxgQH0qIIp96XUhsmbN2kcHXYbIMjT+xHKYVvXNLWNnx55V7YE0T0+PfKunOMNHp8eepTBH+yEXqur0KfZIgYHiNGe5B2wowT1sZ3pOH4FmAiF8bGd0LOrwEQoge2srUMMIE/Ixa4UfMHX00OPWvdPt8Ivu5pMjNrXrVwcv2JN7MoWK3psoE/GxQTLvTg1RughgwEjKJbtHAz8yjSxK623/OoxsSs7rEAIIM/Oxa6cEwa+nh56FMVuRV929Zl6dDrLCLu6TcJr7SNBuZh8cUaCktnOeZHujuiRR2VsZxuj7nbokXdcGEMVmZ4RXsJYo2QYJ6PTqNNAQH4eokhQIP/gBgXmAMGAEdTNjGZHPTCSsmPE8Q1L9cDBgBFea4v7VxDkx7uKAcpqO5PaeoCgRx7l81srkdcDAT3yqJr5eB2KQEDPCE6Wjq0cC1BkIBdvA5wVdd/0yKrrQi8+0FOC0yI7S2780nopTocshZ5dXi/F+dDvVkKvxwB6HZbihKj7/fw4xwAGlLhSX+CXuUtdOdyMX4oupTijmpMjgthALs1SrCx034TEiRKIDfSUuHKeOL+kXYpq+cRO6Nll7YY4IyJEnuAsd3Y7BQ4d78TQzzZjwAjOn9wScvrhZgwowflT9xmlfUeYcThCmQElrixpgJBBH8RdOSUIYgM99K5UFSAGUEPfKEie3XkEB1wyoASrDXZ2zusxgHxFoQwkQG/nNjt6DGAAPWpoO1cU6jGAAfSOi2U9BDBghOIoLU5iGUID9ZJCGbhebICQQU+JM0fqcluELgMUy3YeswixgR56FMvv7YSe236EUqBYtnNTPP2YRQbQ89oAvkPouVVupCCobrI6GYTb3ndSEAg2ToyAe6JnBAWbW4xwqylIgeLgNWHA2Kfk+nNb9FkNgaLBzuYw3T8xgN6VCpvuiOihlzhzpcxSMHBE5BK6Ds7Wl/rBEdFD78q2zeCI6KHntRdSd9BD8YB6AaGUWDywFHpui8ilRB1gaaaa22JxKV3ZJBiKBPTQ43zfzpNzdF9PvlBQhlgkIESefukBA0YodgPmVLrXQwMDSlCB2Tkh0kMDA+hdabSD0ECekw4db7SD0EDPiON1G4gM9IygVP7RSvcEkYEc+gjnSZRSmUGmmlzHRThPsrPZCzLV9NDjPMmRkhk99BiVKaeo9I6IXjUYzoR3o/WRAfSurAnXHRED6DGNaqfVg8Mhn4waDiA/thJ6cDj00KMO+Gwn9OzyEYYjTQVAX/51yzbabfCmxTTXpi3VpWwyvpiWP45KKPPy+muF1XiUTQ6rG1fjszP1Ma9N9M6L1fRMkblekr9DqOhoif4ODcJAg+iOBqxcYl7IGRpu2lZPxwpG49B5VgxirV9WDKefRs6zIgxT155pwcxRqaqTiaJhMcumLX6S/64KdWO0wf6wvDm/OM3+VX678v/lNwiMr75TLxWMgUL24Dy7Gk9uN79+VUyLxZqL1iOLNfrqgWB2s/3c8tXF5t94jWdJ3uX74iyfqAtv1lfnRflk81MNQryGobxyrF6rLxarvzpW59Xc8axonq0t4FFvI7dvs4G6uRNt72yQbu6sR0P502Y8qMti/ePumFBX10NA3diOi9blzdhQl6rRoS62x4e6WY0QdXNnjKg761GirtfjRF0MNh9Zj5X119tc2/lTNyPhPmA2txoct8MlVgOmeTLcAT643V5Pdy5vxtP2zXduVePKeK8cX831i52P10le/9gwvXuxbX/Vc2Com/FVjtjNEKut2nFvSD91MJyJm3ha6OcOWBAfOE+LacVvz7Rg/n3oPC2mBv6eacEcfP1hLvNSHxFDx4shaeNw1qbJa5Lzglkc4dM4cULNi+F4UeETORUrzbpVapJwalZvcuAwSVHAjSacqgk/hQ5bJJHLnIFh4pY6T5LUxhI9TYZ5nNc9UcptNBmKc14GtUmirz8YTgr2LJWjRzLjyXCssK+q6qPJ0BfSM0sSWfJVVpFIbjxhncHlBp5m3Rc7r4fqVvoZudCSENKweVbPPKG89aUIGE8MeELpFPrZXsxvPKF2Cn18ivmNJ0Oro3Sep/asnD75OkTtFHrtFEluNGG9KfTiKWVHE2pcNZp8y2p/Lavhzi3fs3qfntX1MDL0rLZ6WX3T6gv0j3p5ioGDxORS6EvyDAMZJi1C322s1xEZ8IRl+dD3TmgskZd7h4bUkk/VQnSi58mQWvKp2mYbIT48YWqp9sQO8xTpWQtqnsI6B7nLk0/VtlkiXyoTBphbqi3JYZb0wjwDnjC5FPlUbVQf/c6HJ9S4kc/VtucQpsP1emYJ2ycin4kQ/HjCTETkMxGhPoug5wkzEZHPRIRJxI0ng3byGlev+JrOXOyXJ8Np4bHXuFHKjieclcde4+ptYwx4wll57FWu3jbGgCecl8de5bZn5eT7eISGw+Jjr3H1SQQ9TTgpj73I1QtPDHjCAmHsxVMU6W5PUPOEBcLYF3K10URedhIocRMvnSA6kfMkUeImwnfL7jzbfbfswHfLPrBbVpV3TN2yie+WfeEOUs8t0TtIw5avPmfRZsl0wk7PLGHGIvFaWN9mjAFPKIYTL4bbEst0fEzPLKEUTny9V+9GYsATSuHEpyz0HkxyniJDHRFYuijhmj0ZFBmMiuk0Hy2z0/qNg2+ClWj5nYMQwTIeXtWg+vxmHZrgukORnmajrxdriztoi9N/SVXNqJSoWlLTvPwOdeXRallO/4Oz4nra3wx9kp8vDWNrWcweOrDK6Lr5uf6kR51/JvbY1v7wFbRMSASYIjQOuLgzCzLslFnzeWo0Hy3pIBNT1iH+vnyVXSkKpqeL2eZObSSnHVrIfIPZM7nfvq2kNovaae2YRWiwCtmZVRj2qHsGqxDD770ZPMAMDPuG92wG+8PLE8wgTLx3eEIMiVshpCma3mEktXbswEhkJxHEG8lTfEccpLS+IzJ02B6O5sVi0d+88HGMhd9ibPcQ3bi3eWK96cCuLEsMbEadDXLDFtDPEQlSP8ifMMgjgXbR7wQhxPTXSX4xLpTyLM7Vnz3PD06Kq7UU/WmZTW77G/1EqjCst8u8/2hPBlrniGHiZ5aFw86COtZynmG8y9BdAfAYu9AyTsJQ4ut3vBv2mH8Osxh4s3iCWTQbVe66i0GvYQBrijWhCp77W0bV2LI/RSnUtLLKS8pgm64U69YZ3bB+yRcL1cIRrBaqESP4oB79mt/uWNvm+/WdsXxZ9qaFp2Yd913hqUM/hPOOZ/BDifR+6CF2Uc/+as0Zk09H96cintsPqVWIje9Jt69Tkx/aXjkcqW9xW6yUV/ry5cs/Wg97p3R/40vuNr7OnNJff3767fRjevKfZPj50zSe/3o9+/0Ac6UfVrPZ7T8Xa0Oaf33pysdQQb0vsfcOLUJg2qoz5WNkET3IL+OvquU2U98qm0zOJ8W1mlN4MpHM9upxkQyJyUQRe5RdzVZqROYHV9lYvcEkXy7zefnJbz2nJk7bmchmq0261ES4PzXxiJ4HEW2779XGh9XLxBTG3xbzq7Wc+OP7X79f/72ZUhU/F+dj8h6IlxW8NU8RBT0Gb/MfkaDjf4pVKQDqKWH7tcGuxHoFyOc8U25IvcXh6encDYt6cGVEiKhtO81xKru2U28bsms79UksHdjOfo/0zLJjvRFrbUvKEVavB5HBrD5ezlXWwwVV8RgzatfRo2F0PzPq0AU9VxZNN4Sfx1MVp9YFmcW6dzLIltWU9l1WOpd8oWa483yxzFbzbG0l3miMRqP179V7RhEaDZbZj4rpqPyblCOZZIv15NYTJ9tBQ9ATtz+F+bTR/m61UL89KbLz8fTCD+X7TSOaZdiEFrG/uPY0izgcLUvfrlTvhzyfTfKFN4p9RpG0jCJOyd2E4Sit56i4utGJ+wgLEJq6SLC9UvZrAPuF6RMMIAq8AewxANGOC4khNdGvAXTSihO70YrzGAO4hzDs1wD268KneADPv5n/QCtJGVaC98x/J80OoY8A95sCGJVBvwbwzebryp97OvfRqW/LRu7PO2miTLwB7DOAe4i6Pg0gMmxq/lChHxpt4Li9GvslaXuRfsMM7lh18wibCLWNRNAniF7rj5FhI+FncArCjY7GxziFeySEO3IKr39O4uz48nh1GBx/eP/7uw/vfvp0gDsUv9ydOh++pDXRFrUa1rt1tA2JkQysqQzdISPRj6giJgPVT+0rXWCjaS6o2BhgPrJXNlCKiJe7K9mD2Yi0rgFqNnAiKV7u7mMP91QpLzbqN96hQ0qH6NCqZyLGOVW/fGDtTLrkrLTz38XQMD7iPvmQyIdD7iqWETM+sLIkX+6miI+Y6Orjw+CveuXDcHikQypQauE8CqnjueH8DKf8lcYH+fwKp7uxQ8I81LIksSFn1SsfEue7tVp1gw+tsBATjw/DCSUdrUMYqI+pl7RsF1DJIDWsQ/jt/Hw8GmfrxXbZ9EKtnFq3ov+wus2m2QutV/TQi5i2rKvZkqePXkSjeUXPZV66gXzKJl/Lm2phgrcFoy3Eermqx8UqZltAKfccTWlxU65aL4CrN0/wBSw0CZncaRKiI4swrrVG7bLl941yDsvVJkh8+bJea73l/sUP7jsWXj/Q0RsbVJ5pcL+qzlza3ts5bSl8838=</diagram></mxfile>
2205.10442/main_diagram/main_diagram.pdf ADDED
Binary file (35.3 kB). View file
 
2205.10442/paper_text/intro_method.md ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Recent breakthroughs in NLP established high standards for the performance of machine learning methods across a variety of tasks. However, even state-of-the-art models demonstrate fragility [@wallace2019universal] and exhibit sensitivity to shallow data patterns [@mccoy2019right; @zellers2019hellaswag; @jin2020bert; @si2019does; @sugawara2020assessing; @yogatama2019learning; @niven2019probing]. This has led to a growing demand for successively more challenging tasks.
4
+
5
+ One of the important tasks in natural language understanding is question answering (QA), with many recent datasets created to address different different aspects of this task [@yang2018hotpotqa; @rajpurkar2016squad; @kwiatkowski2019natural; @zellers2019hellaswag; @dua2019orb; @rogers2021taxonomy]. There are two main forms of question answering (QA): extractive QA and open-domain QA. In extractive QA, a passage that answers the question is provided as input to the system along with the question. In open-domain QA, only the question is provided as input, and the answer must be generated either through memorized knowledge or via some form of explicit information retrieval over a large text collection which may contain answers.
6
+
7
+ The task of answering clues in a crossword is a form of open-domain question answering. Once a human or an open-domain QA system generates a few possible answer candidates for each clue, one of these candidates may form the correct answer to a word slot in the crossword grid, if the candidate meets the constraints of the crossword grid.
8
+
9
+ Solving a crossword puzzle is therefore a challenging task which requires (1) finding answers to a variety of clues that require extensive language and world knowledge, and (2) the ability to produce answer strings that meet the constraints of the crossword grid, including length of word slots and character overlap with other answers in the puzzle.
10
+
11
+ Our contributions in this work are as follows:
12
+
13
+ - We introduce a new natural language understanding task of solving crossword puzzles, along with the specification of a dataset of New York Times crosswords from Dec. 1, 1993 to Dec. 31, 2018.
14
+
15
+ - We propose an evaluation framework which consists of several complementary performance metrics.
16
+
17
+ - We release the collection of clue-answer pairs as a new open-domain QA dataset.
18
+
19
+ - We provide baselines for the proposed crossword task and the new QA task, including several sequence-to-sequence and retrieval-augmented generative Transformer models, with a constraint satisfaction crossword solver.
20
+
21
+ <figure id="fig:crossword_example" data-latex-placement="ht">
22
+ <embed src="images/crossword_example_NYTIMES_color_v3.drawio.pdf" style="width:70.0%" />
23
+ <p>.</p>
24
+ <figcaption>Crossword puzzle example. A few clues from the puzzle have been provided on the right, they are filled horizontally (Across) or vertically (Down) in the crossword grid. The clue number tells the player where in the grid the answer needs to be filled in. Some of these clue and their answers have further been highlighted with different colors which belong to different clue categories as described in Section <a href="#sec:clue_types" data-reference-type="ref" data-reference="sec:clue_types">3.2</a>, color-coded in accordance with <a href="#fig:piechart_1000annotation" data-reference-type="ref+label" data-reference="fig:piechart_1000annotation">2</a>. Highlight colors denote distinct clue categories: red for word meaning clues, purple for fill-in-the blank clue, orange for synonym/antonym, blue for factoid question type, grey for abbreviation and brown for historical. Source: New York Times daily crossword which appeared on the July 7, 2009. Copyright of The New York Times, 2009.</figcaption>
25
+ </figure>
26
+
27
+ # Method
28
+
29
+ For the purposes of our task, crosswords are defined as word puzzles with a given rectangular grid of white- and black-shaded squares. The goal is to fill the white squares with letters, forming words or phrases by solving textual clues which lead to the answers. The answer words and phrases are placed in the grid from left to right (\"Across\") and from top to bottom (\"Down\"). The shaded squares are used to separate the words or phrases. Usually, the white spaces and punctuation are removed from the answer phrases. A sample crossword puzzle is given in [1](#fig:crossword_example){reference-type="ref+label" reference="fig:crossword_example"}. Note that the answers can include named entities and abbreviations, and at times require the exact grammatical form, such as the correct verb tense or the plural noun.
30
+
31
+ Solving a crossword puzzle is a complex task that requires generating the right answer candidates and selecting those that satisfy the puzzle constraints. Similar to prior work, we divide the task of solving a crossword puzzle into two subtasks, to be evaluated separately. The first subtask can be viewed as a question answering task, where a system is trained to generate a set of candidate answers for a given clue without taking into account any interdependencies between answers. The second subtask involves solving the entire crossword puzzle, i.e., filling out the crossword grid with a subset of candidate answers generated in the previous step.
32
+
33
+ The two tasks could be solved separately or in an end-to-end fashion. In contrast to prior work [@ernandes2005; @ginsberg2011dr], our clue-answer data is linked directly with our puzzle-solving data, so no data leakage is possible between the QA training data and the crossword-solving test data. In the present work, we propose a separate solver for each task. We provide details on the challenges of implementing an end-to-end solver in the discussion section.
34
+
35
+ Our dataset is sourced from the New York Times, which has been featuring a daily crossword puzzle since 1942. We worked with daily puzzles in the date range from December 1, 1993 through December 31, 2018 inclusive. All the crossword puzzles in our corpus are available to play through the New York Times games website [^1]. We release two separate specifications of the dataset corresponding to the subtasks described above: the NYT Crossword Puzzle dataset and the NYT Clue-Answer dataset.[^2]
36
+
37
+ There are a few details that are specific to the NYT daily crossword. First, the clue and the answer must agree in tense, part of speech, and even language, so that the clue and answer could easily be substituted for each other in a sentence. Second, abbreviated clues indicate abbreviated answers. Further, clues that end in a question mark indicate a play on words in the clue or the answer. There are also a lot of short words that appear in crosswords much more often than in real life. These 3- and 4-letter words, referred to as crosswordese, can be very helpful in solving the puzzles. Finally, every Sunday through Thursday NYT crossword puzzle has a theme, something that unites the puzzle's longest answers. Theme answers are always found in symmetrical places in the grid.
38
+
39
+ The dataset consists of 9152 puzzles, split into the training, validation, and test subsets in the 80/10/10 ratio which give us 7293/922/941 puzzles in each set. We removed the total of 50/61 special puzzles from the validation and test splits, respectively, because they used non-standard rules for filling in the answers, such as L-shaped word slots or allowing cells to be filled with multiple characters (called rebus entries).
40
+
41
+ Most NYT crossword grids have a square shape of $15{\times}15$ cells, with the exception of Sunday-released crosswords being $21{\times}21$ cells. Other shapes combined account for less than $3\%$ of the data. The vast majority of both clues and answers are short, with over 76% of clues consisting of a single word. For traditional sequence-to-sequence modeling such conciseness imposes an additional challenge, as there is very little context provided to the model. In most puzzles, over 80% of the grid cells are filled and *every* character is an intersection of two answers. Such high answer inter-dependency suggests a high cost of answer misprediction, as errors affect a larger number of intersecting words. More detailed statistics on the dataset are given in [1](#tab:stats){reference-type="ref+label" reference="tab:stats"}.
42
+
43
+ We generate an open-domain question answering dataset consisting solely of clue-answer pairs from the respective splits of the Crossword Puzzle dataset described above (including the special puzzles). Within each of the splits, we only keep unique clue-answer pairs and remove all duplicates. However, certain clues may still be shared between the puzzles contained in different splits. We therefore remove from the training data the clue-answer pairs which are found in the test or validation data. This ensures that the model can not trivially recall the answers to the overlapping clues while predicting for the test and validation splits.
44
+
45
+ This produces the total of $578$k clue-answer pairs, with $433$k/$72$k/$72$k examples in the train/validation/test splits, respectively. Since certain answers consist of phrases and multiple words that are merged into a single string (such as \"VERYFAST\"), we further postprocess the answers by splitting the strings into individual words using a dictionary. Out of all the possible word splits of a given string we pick the one that has the smallest number of words. If there are multiple solutions, we select the split with the highest average word frequency. Examples of a variety of clues found in this dataset are given in the following section.
46
+
47
+ To provide more insight into the diversity of the clue types and the complexity of the task, we categorize all the clues into multiple classes, which we describe below.
48
+
49
+ Clues that encode encyclopedic knowledge and typically can be answered using resources such as Wikipedia (e.g. *Clue: South Carolina State tree, Answer: PALMETTO*). This type of clue is the closest to the questions found in open-domain QA datasets. Note that the facts required to solve some of the clues implicitly depend on the date when a given crossword was released. For instance, the clue \"*President of Brazil*\" has a time-dependent answer.
50
+
51
+ Clues that require the knowledge of historical facts and temporal relations between events. (e.g. *Clue: Automobile pioneer, Answer: BENZ*).
52
+
53
+ Clues that exploit general vocabulary knowledge and can typically be resolved using a dictionary. (e.g. *Clue: Opposing sides, Answer: FOES*).
54
+
55
+ Clues that focus on paraphrasing and synonymy relations (e.g. *Clue: Prognosticators, Answer: SEERS*). In most cases, such clues can be solved with a thesaurus.
56
+
57
+ Clues formulated as a cloze task (e.g. *Clue: Magna Cum \_\_, Answer: LAUDE*). Fill-in-the-blank clues are expected to be easy to solve for the models trained with the masked language modeling objective [@devlin2019bert].
58
+
59
+ Clues answered with acronyms (e.g. *Clue: (Abbr.) Old Communist state, Answer: USSR*). Abbreviation clues are marked with \"*Abbr.*\" label.
60
+
61
+ Clues that suggest the answer is a suffix or prefix. (e.g. *Clue: Suffix with mountain, Answer: EER*)
62
+
63
+ Clues that rely on wordplay, anagrams, or puns / pronunciation similarities (e.g. *Clue: Consider an imaginary animal, Answer: BEAR IN MIND*). In a lot of cases, wordplay clues involve jokes and exploit different possible meanings and contexts for the same word.
64
+
65
+ Clues that either explicitly use words from other languages, or imply a specific language-dependent form of the answer. (e.g. *Clue: Sunrise dirección, Answer: ESTE*).
66
+
67
+ Clues the answer to which can be provided only after a different clue has been solved (e.g. *Clue: Last words of 45 Across*). Although rare, this category of clues suggests that the entire puzzle has to be solved in certain order.
68
+
69
+ <figure id="fig:piechart_1000annotation" data-latex-placement="!ht">
70
+ <embed src="images/crossword_test_piechart.pdf" style="width:70.0%" />
71
+ <figcaption>Class distribution of the 1000 manually annotated test examples.</figcaption>
72
+ </figure>
73
+
74
+ To understand the distribution of these classes, we randomly selected 1000 examples from the test split of the data and manually annotated them. [2](#fig:piechart_1000annotation){reference-type="ref+label" reference="fig:piechart_1000annotation"} illustrates the class distribution of the annotated examples, showing that the Factual class covers a little over a third of all examples. The synonyms/antonyms, word meaning and wordplay classes taken together comprise 50% of the data. The remaining 20% are taken by fill-in-the-blank and historical clues, as well as the low-frequency classes (comprising less than or around 1%), which include abbreviation, dependent, prefix/suffix and cross-lingual clues. We illustrate each one of these classes in the [1](#fig:crossword_example){reference-type="ref+label" reference="fig:crossword_example"}.
75
+
76
+ ::: {#tab:stats}
77
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
78
+ | | **Train** | **Validation** | **Test** |
79
+ +:===============================+:====================================+:====================================+:====================================+
80
+ | | Clue-Answer dataset |
81
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
82
+ | \# clues | 4,33,033 | 72,303 | 72,939 |
83
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
84
+ | avg/median clue length (words) | 4.0/3 | 4.2/4 | 4.2/4 |
85
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
86
+ | avg/median ans. length (chars) | 5.5/5 | 5.7/5 | 5.6/5 |
87
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
88
+ | avg/median ans. length (words) | 1.3/1 | 1.3/1 | 1.3/1 |
89
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
90
+ | | Crossword Puzzle dataset |
91
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
92
+ | \# puzzles | 7,293 | 872 | 879 |
93
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
94
+ | avg/median \# of clues | 83.5/76 | 83.6/76 | 82.9/76 |
95
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
96
+ | avg cols${\times}$rows | 15.9${\times}$`<!-- -->`{=html}15.9 | 15.9${\times}$`<!-- -->`{=html}15.9 | 15.8${\times}$`<!-- -->`{=html}15.8 |
97
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
98
+ | \% of cells filled | 82.20% | 80.20% | 81.20% |
99
+ +--------------------------------+-------------------------------------+-------------------------------------+-------------------------------------+
100
+
101
+ : The full statistics on the two versions of the released datasets.
102
+ :::
2206.04301/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-09-28T20:52:51.815Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36" etag="ff3X07KAPdeM1CyDbSdR" version="20.3.7"><diagram id="07j30SORgZAi5qbgCSt1" name="Page-1">7Vtdc6I6GP41zuxe1IEkgFxW29PObM+Zzu752qudCFE4B4mLsdr99ZtgooSgohVKZ3vRkbyEhDzv837kDe3B0Wx9l+F59DsNSdIDVrjuwZseADYCLv8RkueNxENoI5hmcSg77QRf4h9ECi0pXcYhWWgdGaUJi+e6MKBpSgKmyXCW0ZXebUITfdY5nsoZrZ3gS4ATYnT7Jw5ZJKW2Veh+T+JpJKceOPLGDG87bwSLCId0VZgL3vbgKKOUba5m6xFJBHgKl81Av+25u32xjKSszgPjiUvtiH2ar9YjZ+Lay0/o7kpq5wknS7nge5yFAVcjl14zxseOaSoXwJ4VKhldpiERA9s9OFxFMSNf5jgQd1ecB1wWsVkib09oyqRi+SvC4TTBC6EVcW0uQq7riWSMrAsiuag7QmeEZc+8i7yLlOYkwyDy+64cZbVTGYCyW1TQlgP63kCSRRJlup1ghyW/kHCeAK1nQPs28ASuUwdPuxJPqyEw1WQFzEjIDVU2acYiOqUpTm530uEOVQHNrs8DpXOJ5X+EsWcJJl4yqiNN1jH7Vzzed2TrqxxMXN+si41n1Uj5egsPieZXNZ5o7B7LW+q5slb3anJBl1lADmAlCc1wNiXsQD/lYgWQB4mRkQSz+En3iBdX8cCwlxsyZ9HVKl4IZzSi6VOn7cbxdLMZOKbNDCpsBjZmM7aB17vN7MHKr2kzwOuUzfiGzTzEKcHZWzIU5FUElypDsf2GUFSO8E3DCFXsPQZjY2S0oQGj6YDS8Fpk5bwVCDjiQMdMdz9lBM/1E3vRPmrvdc29ALpzIC+q7RXkDI805m+8VTmEeqqL/FLs2Pg5+VRxL3BkIAeV3mUDjDFQzovtsl9AFeckqqQ0JafwZK++O6JHG7k6/OUh6uoReSU9AqtdPZr7xzdv8n7HqOId1nBdqmxNXA4EBu2avGJGgSo8pQ8w63SQBa7X17fDwKlIV4BjEgBC9eTFIy1AjZtdi1n8XjUdzdq9miat4OmITSNPd/8Qnun+HaAP1LpNnxbG2+chaImIagfxzsTXYmLzicjLmOi1xET3nYivS0Tf5N170e1wMe0oWQc1udpO0Q2YJzufycNfht67lMeWa26wIomtLBY1Vpxus1hkDwZFhlt934dHWJ63HkkW8+WS7JgiSsysQWn3Vb2tCzU6nL+pVDVZxSurXW8LzY1QQGfzJRMnRpE8yQ5znTGuxnTRaSNFAz12IQ8YVnqQFpe3UjOtiuIwJGkOIs5h7rlWxP8OIGudjuwF0ASOK7IuDVC/wu0ht1+xe3cbg9SMHj2xn0fX33rekPOJ9jwu5INbuSRIFrpgQea5APVUGaBDoG89iUJc5UNHT0GbitbQPFduLtCclEidk7TtVdHRZEu5yqORCXbrzAPZsFSBcxz3vGjFw155KKs0VMPxStVg33cHxwmL6tZUFLM7sj1Q711wOBleiadSnpiIBdH/ichEXDwTDjkdL8QPD6HLLobRskd31PqKHt2v8uiwKYAbr/Xsha0jPtE4vi0XOWqfAwPfcK7tZvCoKjy7CRPqsjWdut+X4lPZXFlXi1xb17wD19M6V5m6z6+m8jdR/Xm/CQ70cf7kWztuhdYfRBjnZzrDaXGQF8z8AX9Uk3NMNvPr78TFYnVKWCIvt0ym03XBMu40RjShYisqj8MncZKURDiJp6ngPCeu2LUOhZ3HAU6u5Y0Zz97zoFLlSHSjKG5uGvArtqNzGPimX3Gr3EpTiTkyP6ZS6hpfkojnDHQyeyURxzWIOP61eQgGHeOhY+YPr8egy1jAh+BjNeH2cPNX5CFyX4+HN+z7MJ5efYvDp8f70d9j50ccX4EmaLjRsPq6sp76X1Si26OGCmWd8A2IXVVHqvoGBDWlnOar561/AGLU0M86UpI4FDeN+9n9Wvm7USUrf35Zu/5e+v4P+o1l75UodvI7pPPo1D2WQFSKCO65u7zSdtFubo9XiWInvxK6oNPZVEw740zOPcxz7dJAlnUhmvDm7v9cN913/y0Mb38C</diagram></mxfile>
2206.04301/main_diagram/main_diagram.pdf ADDED
Binary file (22.9 kB). View file
 
2206.04301/paper_text/intro_method.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The deep learning revolution is about training large neural networks on vast amount of data. The first field transformed by this methodology was computer vision, crucially leveraging the convolutional neural network architecture [LBD<sup>+</sup>89, KSH12]. More recently natural language processing was revolutionized by the Transformer architecture [VSP<sup>+</sup>17]. Transformers are designed to process input represented as "set of elements" (e.g., the words in a sentence with their positional encoding). This is of course an incredibly generic assumption, and thus Transformers can be applied to a wide variety of tasks, including vision [DBK<sup>+</sup>21], reinforcement learning [CLR<sup>+</sup>21], and protein structure prediction [RMS<sup>+</sup>21, JEP<sup>+</sup>21] among others, or even jointly across domains to produce generalized agents [RZP<sup>+</sup>22]. In fact, learning with Transformers is rapidly becoming the norm in deep learning.
4
+
5
+ Transformer models display excellent performance on the standard criterion "training error/test error" (e.g., for masked language prediction or translation). However, what makes them particularly noteworthy, is that large-scale Transformer models seem to exhibit unexpected emergent behaviors, such as basic reasoning ability [TDFH<sup>+</sup>22, BMR<sup>+</sup>20, CND<sup>+</sup>22, DHD<sup>+</sup>21, RBC<sup>+</sup>21, HBM<sup>+</sup>22, SPN<sup>+</sup>22, ZRG<sup>+</sup>22, WWS<sup>+</sup>22, NAGA+22], excellent fine-tuning performance [HSW+22, TDFH+22, NAGA+22, RBC+21, PHZ+22], or zero-shot learning [BMR+20, CND+22, DHD+21, RBC+21, HBM+22, SPN+22, ZRG+22]. Currently, there is a remarkable community effort towards at-scale experimental investigation of Transformers, essentially trying to find out what such models can do when they become large enough and are trained on large/diverse enough datasets. The successes are striking and capture the imagination [BMR+20, RDN+22]. Yet, for all of these wonders, there is very little understanding of how these models learn, or in fact what do they learn. Answering such questions in the at-scale experiments is particularly challenging, as one has little control over the data when hundreds of billions of tokens are harvested from various sources. In this paper, we propose to take a step back, and try to understand how learning occurs and what is being learned in a more controlled setting that captures important aspects of "reasoning".
6
+
7
+ The benefit of such a controlled setting is that we can try to understand some of the most pressing questions in learning with Transformers, particularly around (i) the architecture and (ii) the importance of training data. For (i) we probe the role of multiple heads and depth, and we show that we can successfully understand them in our controlled setting. For (ii) we investigate how much the dataset composition matters, as well as how pretraining on merely vaguely related tasks makes fine-tuning successful. In turn, these insights can guide our thinking for large-scale experiments, and we give some of the lessons learned below. In particular, our insights crystallize into an architectural change to BERT for faster inference with matching or even better performance (Section 5).
8
+
9
+ Core components of reasoning include the ability to associate concepts, and to manipulate them. We propose a simple task that captures these two aspects, which we call LEGO (Learning Equality and Group Operations). In LEGO, the input describes a sequence of variable assignments as well as operations on these variables by a fixed (mathematical) group. One needs to be able to deal with both long-range assignments (the same variable appearing in different parts of the input should be viewed as a being equal to same quantity), as well as short-range operations (describing what group element is applied to which variable). A key parameter of an input sequence will be its length, which is proportional to the number of sequential reasoning steps one has to do in order to resolve the value of each variable. We will mostly train with a fixed sequence length (say 12). We often provide supervision only on part of the sequence (say the first 6 variables). We do so in order to test the generalization capabilities from smaller length sequences to longer length sequences without introducing potential errors due to the positional encoding in Transformers.
10
+
11
+ In LEGO, we are interested in both classical generalization (i.e., training and test distribution are the same) and out-of-distribution generalization. For the latter we focus on distribution shifts that vary the length of the chain of reasoning, and thus we refer to this type of generalization as length extrapolation. Specifically, the setting for length extrapolation is to train with supervision on shorter sequence lengths (e.g., supervision on only the first 6 variables) and test on a long sequences (e.g., accuracy computed on 12 variables). A summary of our empirical observations is as follows:
12
+
13
+ - 1. First, classical generalization happens reliably for all architectures and data regimes.
14
+ - 2. More interestingly, length extrapolation seems to depend on architectural/data composition choices. Specifically, BERT-like models without special data preparation do not extrapolate to longer sequences, while other models like ALBERT, or BERT with carefully selected data (such as diverse sequence lengths, or pre-trained BERT) do extrapolate.
15
+ - 3. The extrapolating models all seem to evolve attention heads dedicated to either association (long-range identity matching) or manipulation (short-range operations). We provide evidence that pre-trained BERT
16
+
17
+ (which is pre-trained on a seemingly unrelated dataset) generalizes because it has learned such heads.
18
+
19
+ 4. The non-extrapolating models seem to solve the classical generalization problem using a certain shortcutlike solution, whereby using the specificity of the group operations they are able to jump to the end of the chain of reasoning, and then complete the rest of the variables by following the reasoning both from the start and the end of the chain.
20
+
21
+ We interpret our findings as follows:
22
+
23
+ - (i) Classical generalization can be a deceptive metric, as there might be unexpected ways to solve the problem. This is famously related to the issue of embedding machine learning systems with common sense reasoning. Namely, we hope that when an ML system solves a task, it does so in "the way humans do it", but of course, nothing guarantees that this will happen. Our findings are consistent with the current methodology of increasing the diversity of the training data, which seems crucial for generalization.
24
+ - (ii) ALBERT-like models, where a layer is repeated several times, seem to be an ideal structure for problems that could be described algorithmically as a "for loop" (as is the case with following a chain of reasoning). Indeed we find that ALBERT extrapolates in data regimes where BERT does not, clearly separating these two architectures.
25
+ - (iii) The success of pretraining/fine-tuning in vastly different tasks might actually come from a "simple" better initialization, rather than complex knowledge encoded during pre-training.
26
+ - (iv) The interplay between short-range (close-by information in a sentence) and long-range (the same concept appearing in different places in the sentence) is relevant more broadly than in our synthetic task. We observe that the networks effectively learn to deal with short-range/long-range information by implementing specific attention patterns. This motivates us to study a new LEGO attention architecture, and we show it matches or even outperforms its baseline on the large-scale pretraining but with significantly less computational cost.
27
+
28
+ # Method
29
+
30
+ A salient feature of many reasoning tasks is an iterative component, meaning they can (or must) be solved by sequentially repeating certain operations. In this section, we use LEGO to study and compare Transformer architectures through the lens of iterative reasoning.
31
+
32
+ ![](_page_5_Figure_0.jpeg)
33
+
34
+ Figure 3: Solving LEGO (Task 1) using BERT and ALBERT, trained from random initialization. Each curve corresponds to the test accuracy of a single variable appearing in the sentence over the course of training. The variable numbers in the legend are their position in the reasoning chain (or graph representation) of the input sentence, rather than the position in the sentence itself. For example, on the input sentence: b=-a; d=-c; c=+b; a=+1;, variable #0 is a, #1 is b, #2 is c, and #3 is d. Top a): models are trained to fit all variables ( $n = 12, n_{tr} = 12$ ). Bottom b): models are trained to fit the first 6 variables but test on all 12 variables ( $n = 12, n_{tr} = 6$ ). Dashed curves represent variables not supervised during training.
35
+
36
+ A natural solution to LEGO—and arguably the go-to solution for a human—is to implement a "for-loop", where each iteration resolves one step in the reasoning chain. The iteration could look for the next unresolved variable token whose value could be resolved in one step. Iterative Transformer architectures such as ALBERT and Universal Transformers [DGV+18], where the weights are shared across different layers, inherently implement a for-loop with a number of iterations equal to the number of layers. If the model manages to learn to implement one such iteration during training, the network would immediately be capable of performing length extrapolation. If this indeed occurs, it would point to a clear advantage of ALBERT over BERT in our setting. This leads to the following questions.
37
+
38
+ The bottom plots of Figure 3 display the length extrapolation result for BERT and for ALBERT. They show the clear advantage of recurrence: While the non-iterative BERT achieves only somewhat better-than-random accuracy for one variable (#6) beyond the ones accounted for during training (#0--#5), the iterative ALBERT reaches near-perfect accuracy on two additional variables (#6 and #7), and nontrivial accuracy on the third (#8). These results clearly support that iterative architectures do generalize better in the iterative LEGO reasoning task.
39
+
40
+ To a lesser extent, Figure 3 also hints at a positive answer to Q2. Observe that ALBERT exhibits length extrapolation to variable #6 immediately (in terms of epochs) as soon as it fits the training variables (#0
41
+
42
+ ![](_page_6_Figure_0.jpeg)
43
+
44
+ Figure 4: Visualization of information percolation within the fine-tuned models. The color indicates the test accuracy of the probing classifier at each layer. Brighter is higher. We observe ALBERT's information percolation is linear than BERT's, which implies ALBERT is biased towards learning a for-loop.
45
+
46
+ – #5), whereas for BERT, the corresponding plot (#6) climbs gradually even after the training variables are predicted perfectly. This suggests that once it manages to learn the operations required for one step of reasoning, it can immediately implement those operations over a few more iterations not required in training.
47
+
48
+ In order to gain stronger evidence, we measure the dependence between the location of a variable token in the chain and the layer in which its value is typically resolved. To this end, given a trained model, we train one linear classifier per layer which predicts the value of a variable token based only on its token representation at the corresponding layer (without using other information), while keeping the original model unchanged. This allows us to gauge the rate of information percolation along the reasoning chain in terms of layers per reasoning step. If the model indeed implements a for-loop in its forward pass, one expects a linear relationship between the number of layers and the number of reasoning steps already completed. We visualize in Figure 4 the test accuracy of prediction as a function of the layer in the network and depth in the chain. While not perfectly linear, the relation clearly looks closer to linear in ALBERT, suggesting that the ALBERT model has an inductive bias towards learning to implement the "natural" for-loop with its forward pass.
49
+
50
+ We attempt to incentivize the model to implement the "natural" for-loop solution. We rely on the observation that if each iteration of the for-loop simply percolates the information one more step (assigning a value to the next variable), then adding more layers with the same weights should not affect the output, and in fact, one should be able to read out the output of the calculation from any layer of the neural network, as long as its depth exceeds the length of the chain. With this observation in mind, we train a ALBERT model with stochastic depth [HSL<sup>+</sup>16]. We uniformly sample depth between 6 and 12 per batch during training while fixing it at 12 during test. Figure 5 shows a clear improvement in generalization to longer lengths using stochastic depth.
51
+
52
+ Pretraining large models has emerged as a prominent and highly successful paradigm in large-scale deep learning. It advocates first training the model on a large dataset to perform a generic task, followed by task-specific fine-tuning on the task at hand. Our goal here is to use LEGO as a testing ground for this
53
+
54
+ ![](_page_7_Figure_0.jpeg)
55
+
56
+ Figure 5: Generalization of ALBERT trained with stochastic depth. The stochastic depth improves the length extrapolation to longer sequence lengths.
57
+
58
+ ![](_page_7_Figure_2.jpeg)
59
+
60
+ Figure 6: Pretrained BERT exhibits significant performance advantages over its Rand-Init counterpart, while the mimicking procedure (a simple initialization scheme we describe below) heads closes the gap.
61
+
62
+ paradigm. To this end, we compare (a) training the BERT architecture for LEGO from random initializations to (b) fine-tuning the standard pre-trained BERT model to solve LEGO. Figure 6 (left and center plots) shows that pretraining helps generalization in LEGO dramatically: the pre-trained model generalizes to unseen sequence lengths (the dashed plots) much better, and within a far smaller number of epochs, than the randomly initialized model.
63
+
64
+ One simple explanation is that pre-trained BERT is already aware of the semantics of tokens like '=' or '-'. We have easily ruled out this possibility, by replacing those tokens with arbitrary ones that do not encompass the same semantics; this does not affect the performance of pre-trained BERT. A more intriguing explanation pertains to the attention mechanism itself. At its basis, LEGO requires two fundamental types of information transfer:
65
+
66
+ - Association: encoding long-range dependencies that transfer a value between two occurrences of the same variable. For example, if the input contains the two clauses "a = +1" and "b = -a" (with arbitrary separation between them), the architecture must associate the two occurrences of the variable a in order to correctly set b to -1.
67
+ - Manipulation: encoding short-range dependencies of transferring a value from the right-hand to the left-hand side of the clause. For example, to successfully process the clause "b = -a", the architecture must associate these particular occurrences of a and b with each other, in order to transfer the value of a (after applying to it the group element -1) into b.
68
+
69
+ Association corresponds to a purely global attention pattern, completely reliant on the identity or content
70
+
71
+ of the tokens and oblivious to their *positions* in the input sequence. Manipulation, in contrast, corresponds to a purely local attention pattern, where nearby positions attend to each other.
72
+
73
+ ![](_page_8_Figure_1.jpeg)
74
+
75
+ Figure 7: Visualization of two representative attention maps from a pre-trained BERT model not yet fine-tuned on LEGO. A complete visualization of all attention patterns of the pre-trained BERT is in Appendix F. On the LEGO input sequence, certain heads implement local, convolution-like manipulation operators (left), while some others implement global, long-range association operators (right). Note that the sample input sequence is presented in the reasoning chain order for visualization purposes only.
76
+
77
+ It is natural to ask whether they are indeed manifested in the pre-trained model's attention heads in practice. Indeed, Fig. 7 shows two exemplar attention heads of pre-trained BERT on an input LEGO sequence without any fine-tuning. The right head clearly depicts association: each token attends to all other occurrences of the same token in the input sequence. This motivates us to make the following hypothesis: the advantage of pre-trained models on LEGO can be largely attributed to the association and manipulation heads learned during pretraining.
78
+
79
+ Note that merely the existence of the heads does not fully validate the hypothesis yet. To rule out other factors, we carefully design controlled experiments to test this hypothesis in the section below.
80
+
81
+ To test this hypothesis, we conduct the following *mimicking* experiments.
82
+
83
+ Mimicking BERT We 'initialize' certain attention heads to perform association and manipulation, without access to pretraining data. We achieve this by specifying the target attention matrices (one for association and one for manipulation), and training the model on random data to minimize a "mimicking loss" that measures how well the actual attention matrices at every layer match the target matrices. The precise mimicking loss and training protocol are specified in the Appendix B.3. The rightmost plot in Figure 6 shows that BERT with mimicking initialization attains significant advantage in generalization over randomly initialized BERT, despite not being pre-trained on any real data (and thus not having learned to "reason"). This confirms that much of the advantage of pre-trained BERT stems from having learned these information transfer patterns.
84
+
85
+ As discussed in Section 4.1, a natural solution to LEGO is to resolve variables iteratively by the order of their depth in the chain. Surprisingly, we find that the Rand-Init BERT and ALBERT models first learn a "shortcut" solution: they immediately resolve the last variable in the reasoning chain, perhaps by counting the total number of minus signs. Indeed, the last variable can be easily identified as it appears only once whereas every other variable appears twice, and its value is fully determined by the parity of the number of minus signs. This behavior is observed in Figure 3a: the randomly initialized models are trained to fit all 12 variables: the last variable (#11, indicated by the brightest green curves) improves earlier than almost all other ones.
86
+
87
+ This behavior may be related to the well-observed phenomenon of spurious features: a model succeeds in training not relying on any actual features of cows and circumventing the intended solution [MPL19, SHL20, GSL+18, NNSN21].
88
+
89
+ We use LEGO as a case study of shortcut solutions and their effect on generalization. Instead of training the model to fit the first six variables (as in bottom Figure 3 in Appendix), we train it to fit the first five (#0–#4) and the last variable (#11). This allows us to measure length extrapolation (to #5–#10) in a setting where models can learn the shortcut. The results show significantly degraded performance, implying that shortcut solutions impede generalization. We then study ways to prevent models from learning them, by pretraining and mimicking. The full section appears in Appendix A.
90
+
91
+ Our analysis in Section 4.2 reveals that the advantage of the pre-trained BERT model on LEGO originates from two specific types of attention structures emerging from pre-training — the association and manipulation patterns. A quick examination of all the attention heads depicted in Appendix F suggests that there is one more clearly identifiable attention pattern: broadcasting on the [CLS] token or the [SEP] token (sometimes both). Namely, it 'broadcasts' the value inside the special tokens to the others. Even though [CLS], [SEP] play no role on LEGO per se, they are vital to the pretraining objective as well as many downstream tasks. Thus the broadcasting attention pattern is presumably important for many real-life NLP tasks beyond LEGO. Association, manipulation, and broadcasting consist of a considerable portion of the pre-trained BERT's attention heads, and they are so structured that we can in fact LEGO v0 them efficiently.
92
+
93
+ ![](_page_9_Figure_6.jpeg)
94
+
95
+ Figure 8: Our proposed LEGO attention consists of 3 pathways. BERT has pathway (b) only; the LEGO v0 attention module has (a) and (c); the LEGO v1 attention has (a), (b), and (c). See Appendix C.
96
+
97
+ ![](_page_10_Figure_0.jpeg)
98
+
99
+ Figure 9: **Top)** Comparison of inference Flops and model size. Flops are measured on a batch of 64 sequences of 512 tokens.
100
+
101
+ ![](_page_10_Figure_2.jpeg)
102
+
103
+ Figure 10: Training and validation performance on BERT pertaining task (Masked Language Modelling+Next Sentence Prediction). As a standard, the training sequence length increases from 128 to 512 around the 7k-th step, where the BERT training loss exhibits a sudden bump in response, while the LEGO v0/v1 models exhibit remarkable resilience. The LEGO v1 model learns faster and (slightly) outperforms BERT in validation.
104
+
105
+ **LEGO Attention:** For the association, manipulation, and broadcasting heads, we can efficiently construct the sparse attention matrix based on the input token IDs only, without learning Q and K or the expensive attention probability computation. For manipulation maps, due to their intrinsic locality, we decide to implement them directly with temporal convolutions (along the time dimension). For the other global maps, given a raw input sequence of T tokens, $u_1, u_2, \ldots, u_T \in \mathbb{N}$ , we manually construct the association and broadcasting maps $A_{asso}$ , $A_{cls}$ , $A_{sep} \in \mathbb{R}^{T \times T}$ such that $(A_{asso})_{ij} = \mathbf{1}[u_i = u_j]$ , $(A_{cls})_{ij} = \mathbf{1}[u_j = [\text{CLS}]]$ , $(A_{sep})_{ij} = \mathbf{1}[u_j = [\text{SEP}]]$ where $\mathbf{1}[\cdot]$ is the indicator function which outputs 1 if the argument is true and 0 otherwise. In the end, we normalize them to have row-wise unit $\ell_1$ norm. Notably, the latter three patterns require no training (except for a value map for each layer) and are shared across all layers.
106
+
107
+ On the standard BERT pertaining benchmark, we compare the following three models: BERT-base model, LEGO v0 and v1 models. We use convolutional kernel size 21 for the latter two. In Figure 10, we show that the LEGO v0 model learns fast in the beginning but falls short later on. However, the LEGO v1 model not only reduces model size and accelerates inference, but also renders models that are extremely competitive with the base model in terms of the final performance of large-scale pertaining. We follow precisely the training pipeline and hyperparameters of [DCLT18]. See Appendix C for architecture details of the LEGO $\rm v0/v1$ models.
108
+
109
+ We observe that the LEGO v0 model learns faster but gradually falls short, while the LEGO v1 model achieves the best of both worlds: it learns faster at the beginning and achieves even (slightly) lower validation loss at the end. The LEGO v1 model's validation loss curve appears to be a lower envelope of the other two. The BERT/LEGO v0/v1 models achieve 1.49/1.66/1.47 final pertaining validation loss and 88.2/82.5/88.1
110
+
111
+ Dev F1 score on SQuAD v1.1 [RZLL16]. We leave comprehensive evaluations for future work.
2206.07840/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2206.07840/paper_text/intro_method.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The Machine Learning (ML) community now sees a threat posed by *backdoored neural networks*; models which are intentionally modified by an attacker *in the supply chain* to insert hidden behaviour [\[Bagdasaryan and Shmatikov,](#page-9-1) [2021,](#page-9-1) [Biggio et al.,](#page-9-2) [2012,](#page-9-2) [Gu et al.,](#page-9-0) [2017\]](#page-9-0). A backdoor causes a network's behaviour to change arbitrarily when a specific secret 'trigger' is present in the model's input, while behaving as the defender intended when the trigger is absent (retaining a high evaluation performance).
4
+
5
+ The vast majority of current backdoor attacks in the literature work by changing the trained weights of models [\[Gu](#page-9-0) [et al.,](#page-9-0) [2017,](#page-9-0) [Hong et al.,](#page-9-3) [2021,](#page-9-3) [Shumailov et al.,](#page-10-0) [2021a\]](#page-10-0) – here the backdoor is planted into the parameters during training of the neural network. This can be done directly (*i.e.* modify the values of the weights directly with [Hong et al.](#page-9-3) [\[2021\]](#page-9-3)), or indirectly by sampling adversarially [Shumailov et al.](#page-10-0) [\[2021a\]](#page-10-0) and modifying data [Gu et al.](#page-9-0) [\[2017\]](#page-9-0) to train with. This means that when the weights are later modified by another party (*e.g.* through fine-tuning), the backdoor could feasibly be removed or weakened [Wang et al.](#page-10-1) [\[2019\]](#page-10-1). When the weights provided by an attacker are discarded entirely (*e.g.* through re-training from scratch on a new dataset), any embedded backdoor would of course naturally be discarded.
6
+
7
+ However, the performance of a neural network depends not only on its weights but also its architecture (the composition and connections between layers in the model). Research has shown that, when given sufficient flexibility, the neural network architectures themselves can be pre-disposed to certain outcomes [\[Gaier and Ha,](#page-9-4) [2019,](#page-9-4) [Zoph et al.,](#page-11-0) [2017\]](#page-11-0). The network architectures can be seen as an inductive bias of the ML model. This raises a new question: Can the network architectures themselves be modified to hide backdoors?
8
+
9
+ In this paper we investigate if an adversary can use neural network architectures to perform backdoor attacks, forcing the model to become sensitive to a specific trigger applied to an image. We demonstrate that if an attacker can slightly manipulate the architecture only using common components they can introduce backdoors that survive re-training from scratch on a completely new dataset *i.e.* making these model backdoors weights- and dataset-agnostic. We describe a way to construct such Model Architecture Backdoors (MAB) and formalize their requirements. We find that architectural backdoors need to: (1) operate directly on the input and link the input to its output; (2) (ideally) have a weight-agnostic implementation; (3) have asymmetric components to launch targeted attacks. We demonstrate how such requirements make MAB detection possible and show that without these requirements, the learned backdoors will struggle to survive re-training.
10
+
11
+ We make the following contributions:
12
+
13
+ - We show a new class of backdoor attacks against neural networks, where the backdoor hides inside of the model architecture;
14
+ - We demonstrate how to build architectural backdoors for three different threat models and formalise the requirements for their successful operation;
15
+ - We demonstrate on a number of benchmarks that unlike previous methods, backdoors hidden inside of the architecture survive retraining.
16
+
17
+ # Method
18
+
19
+ There is now a trend to design more complex neural network architectures. Sometimes these auto-designed architectures are inscrutable, giving attackers an opportunity to insert malicious architectural backdoors. This trend is fueled by the ever-growing need to improve performance of the underlying architectures and the belief that there exists a 'best architecture' for many tasks. Gradient-based NAS [\[Liu et al.,](#page-10-10) [2019,](#page-10-10) [Xie et al.,](#page-10-11) [2018,](#page-10-11) [Zhao et al.,](#page-10-12) [2020\]](#page-10-12) is a popular approach to search for the best architecture. It is based on the idea that the network architecture can be seen as a function of the gradient of the loss function. Most of the searched networks contain sophisticated network sub-components that are often hard for humans to inspect. So much so, that [Xie et al.](#page-10-13) [\[2019\]](#page-10-13) used random graph models to generate randomly wired networks, and showed that these generated complex models that have competitive accuracy on standard benchmarks.
20
+
21
+ We assume that a potential attacker has full control over the training process of a neural network, and that the user receives a model M with architecture A and weights θ from the attacker. This could be because the user has downloaded a pre-trained model off the internet, or because they have outsourced model training to a third party such as a ML-as-a-Service (MLaaS) provider; both scenarios happen frequently in practice.
22
+
23
+ The goal of the attacker is to produce a *backdoored* model M(Ab, θb) which emits outputs undesirable to the user when a *backdoor trigger* is present in the input image, while keeping this backdoor hidden. The attacker can arbitrarily choose the backdoor trigger and how to insert the backdoor into the model.
24
+
25
+ - Setting 1 Direct: The user directly operates on the trained model M provided by the attacker. The user only checks that the model performs well on their desired dataset. This threat model applies when a user outsources their model training to a third party such as a cloud provider entirely.
26
+ - Setting 2 Fine-tuned: The user uses the model M as a pre-trained model and fine-tunes the model's weights θ on a new dataset. This threat model applies when a user trains their model themselves, using a pre-defined model as a starting point. It is worth noting that this is the default behaviour when training a model through popular libraries such as Keras [\[Chollet et al.,](#page-9-13) [2015\]](#page-9-13).
27
+ - Setting 3 Re-trained: The user builds on top of the architecture of the model M and re-trains all the weights θ from scratch on a new dataset. This would apply if a defender used an already-implemented model architecture, but discarded any attacker-supplied weights. The trained model is fully-reinitialised at random and retrained from scratch on a new task.
28
+
29
+ Note that the attack in Setting 3 is realistic – it is extremely common for practitioners to copy model definitions in open sourced projects. This already leads to confusion; for example one can find a number of different definitions of even the standard LeNet5 and ResNet architectures, but these models might have subtle implementation changes which cause performance differences from the ones reported in the original papers. Although we believe that such changes are currently non-malicious, our paper highlights that they should not be taken lightly and can in practice lead to serious vulnerability. In the meantime, it is also possible that, users would directly call external APIs to train a model [\[Chollet](#page-9-13) [et al.,](#page-9-13) [2015,](#page-9-13) [Wolf et al.,](#page-10-14) [2019\]](#page-10-14), and the attacker would be able to exploit this.
30
+
31
+ In contrast with existing attacks which embed their behaviour within the model weights, our goal is to make the backdoor behaviour *weight-agnostic*, meaning it persists even if the model is re-trained by an honest party. In this section, we introduce Model Architecture Backdoor (MAB), and explain its design using a simple AlexNet-based example [\[Krizhevsky et al.,](#page-9-14) [2012\]](#page-9-14) (with smaller filters such that it can operate on 32x32 inputs which we later use in our experiments). We first look at the two major designs phases, namely *architecture engineering* and *activation engineering* for the MAB attack.
32
+
33
+ As illustrated on the left of Figure [1,](#page-3-0) between the final convolutional layer and first fully-connected layer lies an *AdaptiveAveragePooling* (AAP), which 'pools' the output of the convolutional layer to a constant 6x6 dimension (downsampling). This is where we mount our attack.
34
+
35
+ We do this by replacing the AAP operation with a 'malicious' version, and by adding an extra connection in the network from the input data to our malicious AAP layer, which allows it to detect the backdoor trigger in the original image. We *need* to operate on the original image to detect whether the backdoor is present: once the image has been through several convolutional layers there is no way to determine whether the backdoor was present in the original image (for an unknown set of intermediate weights).
36
+
37
+ <span id="page-3-0"></span>![](_page_3_Figure_1.jpeg)
38
+
39
+ Figure 1: A logical representation of the modifications we make to the AlexNet architecture. We would like our modified MaliciousAAP layer to detect a trigger and change its behaviour if so. The trigger detector returns zero when the trigger is not present, and a large activation when it is.
40
+
41
+ In an ideal situation, our modified activation function adds 0 when the trigger is not present (*i.e.* the identity function, which means training proceeds entirely as normal). Then, when a trigger is included in the original image, the activation function behaviour changes and adds some large activations to some outputs of the layer. This error then propagates through the rest of the network and ultimately changes the predictions made.
42
+
43
+ We thus look for a layer with the following properties:
44
+
45
+ - *Low false positive rate*: The modified behaviour does not fire when the trigger is absent (low false positive rate).
46
+ - This improves the task accuracy (making the backdoor harder to spot) and prevents corrections where many false activations during training encourage gradient descent to learn to counter-act the backdoor. We find that for some MAB constructions, parameters can learn to disable the backdoor; for example, by learning a second function equal to the backdoor and subtracting it. We will discuss this in more detail in Section [5.2\)](#page-8-0).
47
+ - *Backdooring*: There is a large change to the activations in the presence of a trigger. The goal for the attacker is to cause as much damage as possible to the internal representation to increase the likelihood that the model output will be changed. Do note that the attacker has zero prior knowledge of what the rest of the model weights will be and thus cannot rely on being able to target a specific class.
48
+
49
+ As activation functions generally operate on a pixel-by-pixel basis (they have no convolutional component), it is not normally possible to detect a trigger with spatial relationships (such as a checkerboard) using one. Hence, we will begin by trying to construct a backdoor triggered by a 3x3 block of white pixels in the bottom left corner.
50
+
51
+ <span id="page-3-1"></span>![](_page_3_Figure_10.jpeg)
52
+
53
+ Figure 2: The backdoor trigger used in our NaiveMAB (b) and MAB (c) attacks.
54
+
55
+ Our 'malicious' activation function is composed of the following steps:
56
+
57
+ 1. We apply an exponential function to the image (with RGB range [−1, 1]) *img*: (e <sup>β</sup>·img − δ) <sup>α</sup>, for tunable values of α, β, γ. In this section, we use β = 1, δ = 1, α = 10. This has the effect of selecting any white
58
+
59
+ pixels and ignoring the rest. As can be seen in Figure 7 in Appendix A, we retain other white areas of the image, which we would like to filter out.
60
+
61
+ - 2. We then perform a 3x3 **MinPooling** operation on the result of (a), which replaces each pixel with the minimum of a 3x3 region around it $(p_{x,y} = \min_{a \in \{x-1,x,x+1\}} \min_{b \in \{y-1,y,y+1\}} p_{a,b})$ . This filters out any white regions.
62
+ - 3. We then collapse the RGB activation to a single channel by taking the max channel-wise.
63
+ - 4. Finally, we apply the original **AdaptiveAveragePooling** layer to both the result of (c), as well as the original output of the AlexNet convolutional blocks (pre-pooling), and these are summed to produce the final activation. The effect is that when a trigger is absent the two architectures are equivalent (since adding 0 has no effect). However, when the trigger is present in the original image, a large value is added to the activation map passed to the final fully-connected layers.
64
+
65
+ We call this first handcrafted backdoor *NaiveMAB*, for its limited robustness to spurious activations.
66
+
67
+ The insights gained above led to an attempt to produce a more robust backdoor, which is less likely to be incidentally triggered (for example, by an unrelated 3x3 white patch in the image). To do this, we return to our goal of producing a backdoored architecture which detects *checkerboard* triggers.
68
+
69
+ To this end, we modify the MaliciousAAP operation to detect both white pixels and black pixels in the same 3x3 region in the image. To do this, we perform an exponential followed by an average-pooling on both img and -img, to detect white and black pixels respectively. We must use average-pooling rather than min-pooling as min-pooling requires all pixels to match (and we cannot have all pixels being simultaneously white and black):
70
+
71
+ $$A = \operatorname{avgpool}(e^{\beta \cdot img} - \delta)^{\alpha} * \operatorname{avgpool}(e^{-\beta \cdot img} - \delta)^{\alpha}.$$
72
+
73
+ Then, as before, we pass the activations A through AdaptiveMaxPooling and sum it with the output of the original AAP layer. As this new formulation requires both white and black pixels within a 3x3 region, it can detect a 3x3 checkerboard trigger (Figure 2c), without being triggered by any image with a white region.
74
+
75
+ <span id="page-4-0"></span>Figure 3 shows the drastically increased effectiveness of our enhanced MAB with this trigger and detector. In later evaluation, we use this robust version.
76
+
77
+ ![](_page_4_Figure_12.jpeg)
78
+
79
+ Figure 3: Test set performance on CIFAR10, when all models are trained honestly by a defender. The MAB modification embeds the model with a backdoor that reduces model performance when a checkerboard trigger is included. The improved MaliciousAlexNet has increased task accuracy, while its accuracy dramatically reduces to random guessing when the trigger is added.
2207.04543/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2022-09-28T08:46:23.775Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/20.3.0 Chrome/104.0.5112.114 Electron/20.1.3 Safari/537.36" etag="silKO-Py3f0s2ZsD5Lvq" version="20.3.0" type="device"><diagram id="tHoeM6LJGSfaw_tzQal_" name="Page-1">7V1dl5s2E/41vrQPkpCAy+ymSS7S022zb5v0Joc1sk2DLRfj/eivf4UNNpa0tjaLJYHXyVmDAIGfZzQaSTPDAF3PHz/m8XL2K0toNoBe8jhA7wcQgtDH/KssedqWEOJvC6Z5mlQn7Qu+pP/RqtCrStdpQlcHJxaMZUW6PCwcs8WCjouDsjjP2cPhaROWHd51GU+rO3r7gi/jOKPSaX+lSTHbloYw2Jd/oul0Vt8ZkGh7ZB7XJ1dVrGZxwh4a90K/DNB1zlix3Zo/XtOsBK/GZftAH545unuwnC4KnQt+5HmUD1F2s7z213/Pp8XD08OwquU+ztbVD64etniqEcjZepHQshJvgK4eZmlBvyzjcXn0gXPOy2bFPON7gG/KD1XfgeYFfWwUVQ/5kbI5LfInfkp1dOhXTFQSA71q/2GPP6jZmjWwD6uyuKJ8uqt6jwrfqIB5AUj+aZA4uctyM51v5Omq/Lkpl6LP8R3NbtgqLVK24MfvWFGwOT8hKw9cxeMf0w2+1yxjOT+e0Em8zopGDe+ydFpeWbAS6ni13Er5JH0sObna3PBdXerVJXx7VhRlG3lX/nb4YUqLRXxPR2N+d/jhLmNT/vWwHPJ2U5RcwQ/rZcbiZMW3oAdC/uWVfyY5/XdNF+OnYZKuijy9W5e/ZFg8LelqeJfOWRJnwzl/5GqzedZouZi2JBPIOxQKrlhGWBIL6OORH8mSAcC5RAO7134Acq39EPdAEuXJPkiBeyBh50AK3QOJEOIYSNFpkE7AIvUykzTL6i5qANFkQsl4zMu5rmc/aONIEkR3nteS0gf4UOcHMrIqYOG5gAUa1lInkEUwGrmGLewJtsQnjiGLTiPLq+EDKnpaYZ7COIlpOFFiTMYhvZu0gzF0TXg1zMCOQYwRdE5FaNiRfOwRL6aZBsw5K+JqUDaMPAXMmIaJr4I5hHeId/rtaOLAcw5mDUu0azBjErgGc12xzqzCjD7G0xLDqyXNU35/mu9Lb+oieJqNjUqpJtmAoocMx1TdQ96F2Mct9ZCAgJFrGhxqmHY1GyUmp4E+BBaX/8pytiga5duPUpdvPi11l54W4GDXRsxArjGt1rFOk4TAMbHWMEz6qWQCgJzT+Br2S2d1TOCHzimYHhoyQQgdE2qNKTm6SN6VS3F8b8EWVMS1OV+3Ba9eafOPgUaTeuXuGcgakGAFJHVZTjNO7D09qFyFU3WHG5aW6yU7nS9Mt6MakbqKFVvnY1pdtUf7ZEXAFzgr4nxKC6miDW27n/0KJjXmDTvWHw/FpRCgUEoK0TjbchHSMPq7hjEUMIa2MdYw5buGsSjHSF77NIsx7B/GQMDYty3HGuZiaxgbWtE5tMjtq2MNC7FjEIva2DrEOvZhtyAWpRhaVsa+SaPC0AKlaFMEliE2OVdlBmIsKgrbUty/7g4LigKZ08XXqTf8+DD97e/lY3j/9c90/fvs391KcBNRPoj+Uu2yvJixKVvE2S/7UmF8vj/nMysdEjdo/0OL4qma8IvXBTvkgj6mxdfy8hGu9r5VlZXb7x+bO0/1zoL/3sZF5e635rH9ZZu9+rpnSdsOwo+JX6VhtoPsIyfWIJ6cftCeV3gdp/4bp6/nFNvi9Ohzt+bmxviRtCgBRaqZS0PzBCQYQcFXDiGVF69CJ5Jz6cTwrfk8K4aRZvOJnNKI0Rujr2a09nR0RSFqTE23qBBNubf6ZBQIzoLqsAalkRidy6Lw3hrQc4KI6xC0Uw2opsyRBlQ/t6kGZGSJfwiRbFFA3QZEztZ+FONW59sPioJmCxp6Iw+caEWbvZuGq8grmxbSbVpu9U1Yw726zaZVe3k8783xnP9HK31WJDc5rGvEiwvarTU50rkWB5qtrdF9td9jEV2TjzhlxQdvlL6e0sAtTdl2HOiJaQ0jDll8qMQ1Io4aH8GiN2jPq6NvVctDfvkfX5dJCsZxNgiubgfB++9edUBkhWNRHEJ/iGnl1dUkoCqKq+j1MUe4NBKksPZ5miTPut8d8w2DsgC0QqcwZx/Jc/ZEQR8617IIUVnuavbAxbMHiOcYe1CbPXjx7CHkO8aeyrRXs7e4ePawuGZvnT3Z3Lilq9Lh9pYPWbvKVDm4q2MrcDvMkQiMalE/Rl44ChTu++fjT8c5a+9ZPs7i1SodD7Sdy7cmuojneJ3fby44mh7CEd/zUBiJiwNsXddzIA7pDbuek5cFEVwi1bIzSTTCwc/RDaPTdZ2bcZ1gg8tm3IdIYql2Pnkp474v1WWU7kDHJ//C6Q7C9uiOpLrM0q0a9jxPd2VHJfFqtqOrQXxZfhMX3LZabEqgh5SW0qGAdFsYMGynZ/eDExWdWRJCk0FlhnyTkZhLwrqTfW2t9wllIuafsY+yycAya7JsO5oh0uk7OoZyIMmy7YCGyGRSMEMoR7LGsC3L/QsbQZGkMQyGNahR7l/2NVKnddiZbbYl2WR0jimM5WlP6yaGyYBUUzCLY1HbomwyItUQxpGYz9W6gQG8/lnLAZBgtq0wgNc/EyOAkZiuzgFxNmlkGMLZP8xWB4HKd9oszDrJRzumnLGYWMS6zgD96wGxa1mIAOhfOi1Jkm2bcrv3KvQJZCRo5Xrtxh7I50zxh7q+VAMP6QL1iseLfTCEesT4pDMv1ACt9H8XyzIQXCZA5P0kzaFQUeCZ5VkrddvF8oxISzwjbJtnnRn+i+UZt8Uzts6zzsD0YnkmoSfyPPKaH/JztIsT9Jz2o/WeXQp05ucvVgoCgM4iBYGH3JKCl7lMX5oU+NIrAdqSA6R42YBdSXgbsx2TBGFNqjU5INJLEOxKgVHvOyNJP7iFLiUgILYXB8xmYDWDM0S7cB93cDa6pmgIZ0VCDd82zjpzBB3DGcjyjK3Ls1EvPEP6WZZn3zrORhfJDekN0UPMutIwmsDZzEvEAiRm6IO1iWcPZ6MvLTCDM5FxVmaVMoy00XcXGJJo6VWQ9pfK62AnMzhbTaAGFPhbX0XXSWHXF/yF5Tfrb/kB2Kht4pzw+7a7U2zUsc8t4bc+0MRGbRmr4Evelr59AwcbNXDswg9kp2I/sM+A0RlatxoAsa19lJkRzwW+mXSUyJOCIK3P2ipTGHYcZtETFttWJMpMg10HWQq3IdZhNjlUMgSzmHbRvsIwOSQyBDIUQLY+7iEmxz2GQPal2HTrs7WKLJTdh1mQZevraEH/DDkSytHp1g1mrbxeHcNZDp223v8F/TMyVDlmHVjhCfpnaWAPyAJtH+j+WRvYiwR5xvZh7p+1gUEojwLtA200Tt0M0Eg0NxxQG0Yj1c3ALKb19UP7MBuNVTcDs0/ksaB1oMP+DVSwmFoam1Qb0e/f/isWt+t/vn364/Yz+/q/YLYaquaby7dzLL8/dfs1Hc2gi7beXwU9yWETebJTxLle3fFbyMA1upp//jP5Ov7xKfrr9vu9ksBztRJ7flfncz3huzljRTOahOMy+5UltDzj/w==</diagram></mxfile>
2207.04543/main_diagram/main_diagram.pdf ADDED
Binary file (51.8 kB). View file