Eric03 commited on
Commit
834666d
·
verified ·
1 Parent(s): f9e4653

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2011.00105/main_diagram/main_diagram.drawio +1 -0
  2. 2011.00105/main_diagram/main_diagram.pdf +0 -0
  3. 2011.00105/paper_text/intro_method.md +106 -0
  4. 2103.00180/main_diagram/main_diagram.drawio +1 -0
  5. 2103.00180/main_diagram/main_diagram.pdf +0 -0
  6. 2103.00180/paper_text/intro_method.md +122 -0
  7. 2103.03501/main_diagram/main_diagram.drawio +0 -0
  8. 2103.03501/paper_text/intro_method.md +271 -0
  9. 2106.02740/main_diagram/main_diagram.drawio +1 -0
  10. 2106.02740/main_diagram/main_diagram.pdf +0 -0
  11. 2106.02740/paper_text/intro_method.md +27 -0
  12. 2108.02180/main_diagram/main_diagram.drawio +0 -0
  13. 2108.02180/paper_text/intro_method.md +152 -0
  14. 2110.03753/main_diagram/main_diagram.drawio +1 -0
  15. 2110.03753/main_diagram/main_diagram.pdf +0 -0
  16. 2110.03753/paper_text/intro_method.md +55 -0
  17. 2203.13560/main_diagram/main_diagram.drawio +1 -0
  18. 2203.13560/main_diagram/main_diagram.pdf +0 -0
  19. 2203.13560/paper_text/intro_method.md +105 -0
  20. 2203.13685/main_diagram/main_diagram.drawio +0 -0
  21. 2203.13685/paper_text/intro_method.md +112 -0
  22. 2205.07872/main_diagram/main_diagram.drawio +1 -0
  23. 2205.07872/main_diagram/main_diagram.pdf +0 -0
  24. 2205.07872/paper_text/intro_method.md +121 -0
  25. 2205.08714/main_diagram/main_diagram.pdf +0 -0
  26. 2205.11048/main_diagram/main_diagram.drawio +1 -0
  27. 2205.11048/main_diagram/main_diagram.pdf +0 -0
  28. 2205.11048/paper_text/intro_method.md +87 -0
  29. 2205.15730/main_diagram/main_diagram.drawio +0 -0
  30. 2205.15730/main_diagram/main_diagram.pdf +0 -0
  31. 2205.15730/paper_text/intro_method.md +86 -0
  32. 2207.13440/main_diagram/main_diagram.drawio +0 -0
  33. 2207.13440/paper_text/intro_method.md +96 -0
  34. 2209.00465/main_diagram/main_diagram.drawio +1 -0
  35. 2209.00465/paper_text/intro_method.md +88 -0
  36. 2210.07920/main_diagram/main_diagram.drawio +1 -0
  37. 2210.07920/main_diagram/main_diagram.pdf +0 -0
  38. 2210.07920/paper_text/intro_method.md +85 -0
  39. 2211.10435/main_diagram/main_diagram.drawio +1 -0
  40. 2211.10435/main_diagram/main_diagram.pdf +0 -0
  41. 2211.10435/paper_text/intro_method.md +88 -0
  42. 2302.10145/main_diagram/main_diagram.drawio +1 -0
  43. 2302.10145/main_diagram/main_diagram.pdf +0 -0
  44. 2302.10145/paper_text/intro_method.md +88 -0
  45. 2303.10752/main_diagram/main_diagram.drawio +0 -0
  46. 2303.10752/paper_text/intro_method.md +88 -0
  47. 2304.02152/main_diagram/main_diagram.drawio +0 -0
  48. 2304.02152/main_diagram/main_diagram.pdf +0 -0
  49. 2304.02152/paper_text/intro_method.md +32 -0
  50. 2304.07039/main_diagram/main_diagram.drawio +1 -0
2011.00105/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-05-19T17:21:46.714Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" version="13.1.3" etag="CyyMil5Ry-I5oE1TO0FW" type="device"><diagram id="Yw1ccya0-kdMBQ2_9SI2">7V1bk5s2FP41nmkf4gHE9TF7ax+amUyS6eWRGK1Ni5GLceztr68ACQMSRvYKWbvGeYjRwsGc79PlfDoSM3C/PvyShZvVJxTBZGYZ0WEGHmaWZYLAwf8VJS9ViQ/sqmCZxRE56VjwNf4PkkKDlO7iCG5bJ+YIJXm8aRcuUJrCRd4qC7MM7dunPaOkfddNuIRMwddFmLClf8RRviJP4RjH8l9hvFzRO5sG+cs6pCeTgu0qjNC+UQQeZ+A+Qyivvq0P9zApnEf9Ul331PPX+odlMM1FLrCqC36EyY4828xy/90Vt7/b5mG2D7PtfIHWx1Lyu/MX6owcHoryVb5OcIGJv4ZJvEzx9wX+ETDDBT9glsfYfR/JH9ZxFBWX32VwG/8Xfi9NGfh4g+I0L8Fx7mbOQ2Frl6NtRYDC9DNKc8IHyy8MoDzMY1QYDYz61xX3g4del5i1ozFDIVrDPHvBp5ALPvgEG0JOyyRs3R+htiigqwbMNikLCbuWtekjAPgLwYCPB2Dw+ANl0SaGC4ghSApHf89a/qe4FJ75UHnqIz7B9DeH0h9H3Nxl8f839A9M8WkZA2SGdmkEI+Lp/SrO4ddNuCj+usf1uI1xAwjTL8mSYcO0NlgjQeMZc6cNjuHMOfD4Fi1tAuQErwfIZgAqqok+lQLQY/JbLAluN02z5XTTsBmXO5wKASRUCIfTQOFKAvbv3emB2/a54zE+90fyudvj8+379niX5ryGfyyae4zLy073XbvbaDMcALZVcUdyt88b9lS9q0jnaoGezpVaKc5/laE5tYSfpDLWvgEu/n4s04kmMphhtZlheRZnBDYOMwIOM3rHScZZ4yQ8dsDHcZLcowRlpS3w6Bb/6vFT4y9G+eGOrGS4OGi3dQHb1Jm8MW7d57zGxzSMO+VkHBhtiq/PCTx8LEI2/NwwjcjXh0USbrfxYsDX+LjhUde9s56eKjvUneXYFR+Ty9w5bokZkJ7LzwkoYMTEi4NADHQqtCyDCR5F/2ib5zmf3OFzUV8bOHudoZtVj6CpkS3aZQtIrmuGih1TVncUyJrCI+ElzBlTJR/qRxejiDlR5FoU8RxpFGFNSaQIT7uYKKKCIhaw5BCEMSSRHqyUMtFDDT2AY8ihB2NIIj1YIWeih6LWI7AltR5dQxLpwdOdNKbHYLSgLz0sr623m5596fgDmIOmJFKEJ5NNFFFBESbSv5QgjKEeemDAwpfGaUQV6f/BAf8+R7ZVFi/mHqsXTtxTxL1Ley+GeyP2Xjx9c6KHCnoAO5DVe3FMSaSIgNA5UWScAY7pSxvgsKbkUYRK7KcosszQbkNOY/Vokn9DphNmtUVxnRp0FSWXhPXNadCAhc7q9OuXyNT0KU49P74i3mzh8DxAuN1UmUjP8aGYOxAW/M0Tkwaila0XoCYQjfyjvkrCm3SR4mlr8nSjvWApPprjBUSy23G87apzvID89I4cP9DI05n/Or8oUAeEw/odjyRoogPK8hVaojRMHo+ld+0Z4OM5vyG0IW79G+b5C/FfMWPeRgwe4vzPxve/ClO4J6+OHg7EcnnwQg9S/Gh/Ng8aVxWHx8vKI3odO2DqG1qdBzkz4KoGMcStpCpVoxFSvYAwMYRHXMIwC4g1t1PfaC7ZNeqbgHJxO0AwiZUKgRDQCG4HiE4PVHtYBRDB1AON0ANRZVeXHggIRNO3U986PZDK+gZuK6w/rwdSCsRtRf1n9kC+pQ4I8FZ6IGU9icwegi/lOgZoAx5YbRM9Qi6rCfsDhnrE5QsUYTCpFif6MJU1ViA95XaA6PZhKoGYdIX+PgwAdeEs8KY+7Ap9mNUG3O6ssBXvwwYMSezDJt2jvw9TWmMFMhBuB4hu5rNCIOi+Bfo3nX0ClOW5AxJUefQZZjH2TrEA8nq6lE03LGnqUq78FlsY/UkOmTVmgNuxlOmqG0rakxzSAII2StcA4raSIgaAML3rAWG/lY7pOnPsI8xm8IfnDAk8MDfd48cbLZPZ5gkdrT0WFrXzjnsoUHc3igT2YjB79mJ4TOAaO/jDPt6euQlTr8mPURSXeyP17fLQof3VdnQwByn6ymFve6cHm519MG1O6+LKaF140s1I1LIwDxgSfPrtsyj+Z23N9cZ6GMdoq0acrSh4ydlSdqKwedkol3Cgwu3u8cs37av0yHBaRkeNtVk8x9q4ytZV27Efin+K61V32YHL7q802shNV23nKkAE3aBSHRC05ZyA4AFhssON0YDQVWbRomlSCYSuMosONaI2oQIIXWUWHWqEUiB0zerQokb46lZjObpmdWhRI1QCoWtWhw41AnBEmtGA0HW1iA41QikQukbWT+VHLRAuaEscSgM6XSPrqwDh2FcDwtU1stYCCIUBnatrZK1F06QSCF0jax1qhMqAztU1stahRigFQtfIWosaoTCgc3WNrLWoESqB0DWy1qFGqAzoXF0jax1qhFIgRCLr46Z7aAPTb6s4nXFSPs7NPzuV3HPpWn/8U5/i4mHLo+pJmCy2Zu6zI46Wmu36HKfz5q0uyqKb9fl25wVHRseQvDQ5V0QVeGck0pcwHpgHxFnlx72MP45nzBtWDDYH89Rd5JHLE1E6JnKpIRd2lAJyDd1FIrlE1JuJXIrIFVgqyDVwF4nkElGkJnKpIVexnbXvH1E3x+DWwE0kUounsY2UPQ542eMz6077VOORVw+43SER573EgENwKS9s5Ul7E/7XxZ/zjuTR8Fe4MGnCXwz/+pU8KvBXuHpowl8Qf89Xh7+slUMT/tLw571BfDT8+18hPsrCVAb/e5Quwhym4bSOtEhhbscaDud11rxQRwoTeNrrOEww/Z71xF/zbLfId9mZ65N77f0OFznKtrrTqnxzkMoGhursDV4FJyLo19DK56mu0/Jk5cuTXfp6oSssT/YFxNGx38Dk0UTHazy/gH73trnnXybRyfGuLAmrHpBUm1qExU2+hGmE1vjLUwyTCP//0/2Xp5/fdIeSobwabIGHwHgtwOSKDyZ903A928zWr9HWy/si+WkKdtzhSvBttfsiFV5gfHCFXTN9o92i1vtvn51gYA0Ykid3+yIJdNNMSs848JWEqfdXqpO6g7kXND4Xvk3S9f2z7Eqkk0ga4EQnRXTygIyJOcym1sScc85NJFJLJLFxopYaauFwYXxqDdxEIrVuMFVTX2oFlgJqnb6JRGpNCZz6UKtMImkOhMag1sBN5FEr4AmJU5A3cpDn0X2169jMmgdGb0siyinPPcusRBaxUuQWW9+HxSyBJuqNBVgmtBlDfiOVFV+1MXInijeph5pzTsZIwg19xVNbugvXhdxZCmxpuIZ1iV4a20mUJADTVUVMuspKCTBcTbUBzBbiJwlz/OATOmUD5rE5W9wtf6Wgw03aa6AToXUYpxM0lcfpm2mb0PCWwV0ADT7MUDF9cOyOsnCz+oQiWJzxPw==</diagram></mxfile>
2011.00105/main_diagram/main_diagram.pdf ADDED
Binary file (42.9 kB). View file
 
2011.00105/paper_text/intro_method.md ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Entity normalization and variant generation are fundamental for a variety of other tasks such as semantic search and relation extraction [@bhutani2018exploiting; @Arasu:2009]. Given an entity name $E$, the goal of entity normalization is to convert $E$ to a canonical form (e.g., "*Jordan, Michael*\" $\rightarrow$ "*Michael Jordan*\"), while the goal of entity variant generation is to convert $E$ to a set of different textual representations that refer to the same entity as *E* (e.g., "*Michael Jordan*\" $\rightarrow$ {"*Jordan, Michael*\", "*MJ*\", "*M. Jordan*\", $\dots$}).
4
+
5
+ Typically, entity normalization and variant generation are done by first performing entity linking [@moro2014entity; @zhao2019neural; @li2017cnn], i.e., matching entity names appearing in some context (e.g., free text) to named entities in curated knowledge bases (KBs), then use the canonical form or variations (of the linked entities) residing in the KBs to complete the tasks. Unfortunately, in some scenarios, such as search [@thompson-dozier-1997-name], entity names are not surrounded by context. Furthermore, for specialized domain-specific applications, there may not be a knowledge base to govern the names of the relevant entities. Thus, entity linking is not always applicable. In this paper, we take the view that entity normalization and variant generation can be done without contextual information or external KBs if we understand the internal structures of entity names.
6
+
7
+ As observed in [@campos2015entity; @bhutani2018exploiting; @Arasu:2009; @katiyar-cardie-2018-nested; @finkel2009nested], entity names often have implicit structures that can be exploited to solve entity normalization and variant generation. Table [\[tab:samples\]](#tab:samples){reference-type="ref" reference="tab:samples"} shows how we can manipulate such structured representations of entity names to generate different variations without help from context or external knowledge.
8
+
9
+ ::: table*
10
+ +------------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------+-------------------+
11
+ | **Mention** | **Structured Representation** | **Manipulation** | **Variations** |
12
+ +:================:+:=====================================================================================+:=====================================================================================+:==================+
13
+ | *Michael Jordan* | "*Michael*\"$\langle$`first`$\rangle$ "*Jordan*\"$\langle$`last`$\rangle$ | $\langle$`last`$\rangle$,$\langle$`first`$\rangle$ | *Jordan, Michael* |
14
+ | | +--------------------------------------------------------------------------------------+-------------------+
15
+ | | | *createInitial*($\langle$`first`$\rangle$) $\langle$`last`$\rangle$ | *M Jordan* |
16
+ | | +--------------------------------------------------------------------------------------+-------------------+
17
+ | | | *createInitial*($\langle$`first`$\rangle$) *createInitial*($\langle$`last`$\rangle$) | *MJ* |
18
+ +------------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------+-------------------+
19
+ | | "*General Electric*\"$\langle$`name`$\rangle$ "*Company*\"$\langle$`suffix`$\rangle$ | *createInitial*($\langle$`name`$\rangle$) *drop*($\langle$`suffix`$\rangle$) | *GE* |
20
+ +------------------+ +--------------------------------------------------------------------------------------+-------------------+
21
+ | 3-4 | | *createInitial*($\langle$`name`$\rangle$) *abbreviate*($\langle$`suffix`$\rangle$) | *GE Co.* |
22
+ +------------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------+-------------------+
23
+ :::
24
+
25
+ Declarative frameworks are proposed in [@Arasu:2009; @campos2015entity] to allow developers to manually specify rules that parse entity names into a structured representation. To avoid such low-level manual effort, [@katiyar-cardie-2018-nested; @finkel2009nested] used fully supervised methods for identifying nested entities embedded in flat named entities. Unfortunately, labeled data are rarely available to leverage these methods in the real-world. To mitigate the need for training data, [@bhutani2018exploiting; @qian2018lustre] proposed an active learning system, LUSTRE, to semi-automatically learn rules for mapping entity names to their structured representations. By using regex-based extractors and a list of comprehensive dictionaries that capture crucial domain vocabularies, LUSTRE can generate rules that achieve SoTA results. However, for more complex and realistic scenarios, dictionaries may not be available and regex-based extractors alone are not expressive enough. Moreover, as shown in Section [3](#sec:experiments){reference-type="ref" reference="sec:experiments"}, LUSTRE cannot handle long entities such as machine logs.
26
+
27
+ In this paper, we present a framework that learns high-quality BERT-CRF models for parsing entity names into structured representations in low-resource settings, namely, when no labeled data is available. The proposed framework is essentially an active learning-based approach that learns from human interactions. We believe that comprehensible user interfaces are essential for active learning-based approaches, especially for labeling tasks that require non-trivial human labels (e.g., sequence labels in our approach). Therefore, we developed a system named PARTNER [@qian2020partner] that implements this framework. We designed the interface of PARTNER similar to that of LUSTRE, but we also made major modifications so that it is more user friendly. Interested readers can find a video demo of PARTNER at <http://ibm.biz/PARTNER>. Our main contributions include:
28
+
29
+ ::: list
30
+ $\bullet$
31
+
32
+ A hybrid framework combining active learning and weak supervision to effectively learn BERT-CRF-based models with low human effort.
33
+
34
+ A full-fledged system, with intuitive UI, that implements the framework.
35
+
36
+ Comprehensive experimental results showing that the framework learns high-quality models from merely a dozen or so labeled examples.
37
+ :::
38
+
39
+ **Related work.** Our problem is related to both flat and nested named entity recognition (NER). However, as discussed in [@finkel2009nested], NER focuses on identifying the outermost flat entities and completely ignores their internal structured representations. [@katiyar-cardie-2018-nested; @ju-etal-2018-neural; @finkel2009nested; @dinarelli-rosset-2012-tree] identify nested entities within some context using fully supervised methods that require large amounts of labeled data, whereas our goal is to learn from very few labels (e.g., $< 15$) in a contextless fashion. Active learning [@settles2009active] and weak supervision have been widely adopted for solving many entity-centric problems, such as entity resolution [@kasai-etal-2019-low; @qian2019systemer; @qian2017active; @ertutorial-cikm19], NER [@lison2020named; @shen2018deep; @he2017unified; @nadeau2007semi], and entity linking [@chen2011el]. While the power of the combination of the two techniques has been demonstrated in other domains (e.g., computer vision [@brust2020active]), to the best of our knowledge, the two approaches are usually applied in isolation in prior entity-related work.
40
+
41
+ Recently, data programming approaches (e.g., [@Snorkel; @safranchik2020weakly]) use labeling functions/rules to generate weak labels to train machine learning models in low-resource scenarios. Data programming approaches like Snorkel usually assume that labeling functions are manually provided by users, indicating that their target users must have programming skills in order to provide such labeling functions. In contrast, our goal is to minimize both human effort (i.e., minimize labeling requests) and lower human skills (no programming skills are needed).
42
+
43
+ # Method
44
+
45
+ Given a set $E=\{\mathcal{E}_1, \dots, \mathcal{E}_m\}$ of isolated entity mentions (name strings) of a particular type, where $\mathcal{E}_i$ is a sequence $\mathcal{E}_i=(t_i^1,\dots,t_i^n)$ of tokens. Assume that the input set $E$ of entity names contain a set $C=\{\mathcal{C}_1,\dots,\mathcal{C}_k\}$ of semantic components (i.e., labels such as $\langle$`first`$\rangle$, $\langle$`middle`$\rangle$ in person names). Our goal is to learn a labeling model $\mathcal{M}: \mathcal{E}_i=(t_i^1,\dots,t_i^n) \rightarrow (y_1,\dots,y_n)$, where $y_k\in C$. The labeling model $\mathcal{M}$ is a BERT-CRF based model (see Fig. [1](#fig:architecture){reference-type="ref" reference="fig:architecture"}) with several key modifications, which we elaborate next.
46
+
47
+ <figure id="fig:architecture" data-latex-placement="ht">
48
+ <img src="architecture-bertonly.png" style="width:48.0%" />
49
+ <figcaption>BERT-CRF based model</figcaption>
50
+ </figure>
51
+
52
+ **Tokenization & vectorization**. An input entity name is tokenized with BERT's wordpiece tokenizer, which may result in sub-words for out-vocabulary tokens, e.g., "*starwars*\" $\rightarrow$ {"*star*\", "*##war*\", "*##s*\"}. In this case, we combine these sub-words' embeddings (from BERT) into one vector using element-wise addition (see Fig. [1](#fig:architecture){reference-type="ref" reference="fig:architecture"}). We then feed the sequence of token embeddings to a multi-layer perceptron (MLP), the goal of which is to condense the BERT embeddings to smaller embeddings (e.g., 50), so that they are somewhat comparable to the size of the *structure vectors* (to be discussed next), which are crucial for our active learning and weak supervision approach. It is not hard to see that the pre-trained BERT model can be replaced with any other seq2seq models with pretrained static word embeddings such as BiLSTM + fastText [@bojanowski2016enriching].
53
+
54
+ **Structure vectors**. We predefined a set of boolean predicates where each of them verifies whether or not a token satisfies a specific syntactic pattern. In our experiments, we defined a list of 15 predicates, which can be easily extended, as shown below:
55
+
56
+ ` hasAllCapsTokens()`\
57
+ `hasAllLowerTokens()`\
58
+ `hasAllAlphbeticalToken()`\
59
+ `hasPunctuationOnly()`\
60
+ `isAlphanumToken()`\
61
+ `containsNumber()`\
62
+ `containsPunctuation()`\
63
+ `isFirstLetterCapitalized()`\
64
+ `isTwoDigitNumber()`\
65
+ `isFourDigitNumber()`\
66
+ `isSingleDigitNumber()`\
67
+ `isInteger()`\
68
+ `isNumericToken()`\
69
+ `appearAtBegining()`\
70
+ `appearAtEnd()`\
71
+
72
+ Each token is then converted to a boolean vector using the predefined boolean predicates, and is concatenated with the corresponding condensed token embedding emitted from the first MLP (see Fig [1](#fig:architecture){reference-type="ref" reference="fig:architecture"}). Intuitively, condensed token embeddings can capture semantic information and structure vectors can capture structural information.
73
+
74
+ **CRF layer**. Each of the concatenated vector are fed to another MLP, which condense them into a vector of size $|C|$ (i.e., the number of label classes). Finally, the final CRF layer uses viterbi algorithm to find out the most likely sequence of labels using the emission vectors (i.e., embeddings from the last MLP layer) and learned transition matrix.
75
+
76
+ Recall that each token is associated with a binary structure vector that carries its "structure\" information. Consider the following company names:
77
+
78
+ ::: list
79
+ $\bullet$
80
+
81
+ "Apple Inc.\" = {"*Apple*\", "*Inc.*\"}
82
+
83
+ "Microsoft Corp.\" = {"*Microsoft*\", "*Corp.*\"}
84
+
85
+ "Coca Cola Co.\" = {"*Coca*\", "*Cola*\", "*Co.*\"}
86
+ :::
87
+
88
+ Although textually dissimilar, they are structurally identical. Concretely, "*Apple*\", "*Microsoft*\", "*Coca*\", and "*Cola*\" all contain only alphabetical letters with the first one capitalized; Tokens "*Inc.*\", "*Corp.*\", and "*Co.*\" all are alphabetical letters with first letter capitalized, and they all end with a dot. Therefore, "*Apple Inc.*\" and "*Microsoft Corp.*\" have the same sequence of structure vectors. Moreover, for consecutive tokens with identical structure vectors, we combine them into one and hence "*Coca Cola*\" shares the same structure vectors with the other two. Therefore, if one of the three is labeled as $\langle$`name`$\rangle$$\langle$`suffix`$\rangle$, we can apply the same sequence of labels to the other two examples as weak labels without actual human annotation.
89
+
90
+ To some extend, the structure vector-based weak supervision approach adopted in our framework is similar to the labeling functions/rules adopted in data programming approaches (e.g., [@Snorkel]). In our framework, predefined boolean predicates can be viewed as token-level labeling functions, which are later automatically combined as entity-level labeling functions (together with condensed BERT embeddings) used by the second MLP in our architecture (see Figure [1](#fig:architecture){reference-type="ref" reference="fig:architecture"}). Moreover, in our framework, the labeling functions are transparent to the user, thus no programming skills are needed.
91
+
92
+ The model learning process has multiple iterations, where each starts with requesting the user to label the entity with highest *informative score* (to be defined shortly). Based on the user labeled entity, a set $k$ of other entities with identical sequence of structure vectors will be automatically labeled and used for incrementally updating the model being learned. Then, unlabeled entities are annotated by the refined model and ranked according to the probability scores produced by the CRF layer. Subsequently, both top-$p$ high-confidence and bottom-$q$ low-confidence machine-label entities are sent to the user for verification (i,e, correct or incorrect). We also update the unlabeled entity set by removing user labeled entities and weakly labeled entities. We repeat the process until either user's labeling budget is completed or most (e.g, $\ge$ 90%) of the low-confidence labeled entities are correct.
93
+
94
+ **Informative Score.** The informativeness of an entity is measured according to its *representativeness* and *uncertanty*. Let $\mathcal{S}(\mathcal{E}_i)$ denote the sequence of structure vectors of entity $\mathcal{E}_i$, then we define the representativeness of $\mathcal{E}_i$ with respect to the current set $E^{u}$ of unlabeled entity as follows:
95
+
96
+           $\textup{Rep}(\mathcal{E}_i)=|~\{\mathcal{E}_k~|~ \mathcal{S}(\mathcal{E}_k)=\mathcal{S}(\mathcal{E}_i), \forall \mathcal{E}_k\in E^{u}\}~|$
97
+
98
+ Intuitively, the representativeness of an entity is the total number of entities in the unlabeled data that have the same sequence of structure vectors. The uncertainty score of an entity $\mathcal{E}_i$ is defined as:
99
+
100
+ $\displaystyle \textup{Uncertain}(\mathcal{E}_i)=\frac{1}{Pr(\mathcal{M}(\mathcal{E}_i)) / |\mathcal{E}_i|}$
101
+
102
+ where $Pr(\mathcal{M}(\mathcal{E}_i))$ is the probability score of the most likely sequence of labels for $\mathcal{E}_i$ produced by the final CRF layer, and $|\mathcal{E}_i|$ is the number of tokens in $\mathcal{E}$ (divided by this term to normalize the probability score wrt the length of the entities). Then, the informative score of an entity $\mathcal{E}_i$ is:
103
+
104
+     $\textup{Info}(\mathcal{E}_i) = \textup{Rep}(\mathcal{E}_i) \times\textup{Uncertain}(\mathcal{E}_i).$
105
+
106
+ Thus, informative examples are the ones that are structurally highly representative and for which the current model is highly uncertain.
2103.00180/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-25T08:02:23.548Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36" etag="XSzXCvp9TFW_lHhMq3hK" version="14.4.3" type="google"><diagram id="ig5g03QVZA9EMWDSS2_o" name="Page-1">5VlRc5s4EP41fswNAhvbj3GStnOXTNPx3Fz7KIMKagVyhYhxf/2tQBiQcEwa7PZ6T5YWaRHft99qJU+8m6R4K/A2fuAhYRPXCYuJdztxXYQWc/hRln1l8ZeoMkSChnpQY1jT70QbHW3NaUiyzkDJOZN02zUGPE1JIDs2LATfdYd95qz71i2OiGVYB5jZ1n9oKOPKupg5jf0doVFcvxk5+kmC68HakMU45LuWybubeDeCc1m1kuKGMAVejUs1782Rp4eFCZLKIRM2V+ztwwfv8d3Xv/P8z8U3+iXMrpBXuXnCLNdffMsTTNOJ8uYz8LzaCGhFqvVXyneMhIBY9UVyX8MkSaEGxDJhYEDQxIxGKbQDWB4RYHgiQlIA9lo/SGgYqukrQTL6HW9KVw70t5ymsqRttprMbpWvXPKsCg3l+jNPpY4U5EM/k4J/PdADwK70R8EbSXEULnQgAaKX8IRIsYcheoKH9BQduO5U87hrwmCpTXErAuphWAdedPDccAMNTc9LqLKYWlHOeKQwNdhqMeN/y1WAlZhdVRhewwDkb4sSp/p5TfF6n0mSWPzC+kBxii0I460yBoznsNzVLqaSrLc4UMYdJIFuGJhcjcBMnRpqYlybGOT3MIM890zUuMeoyaSFZI1fnrDrQPK2Mu7xhrBHiHRJuVLIhkvJkx7pSG6gzHPJaEpuDknQOQ/07tzAvoa0hb17SVFMLeTt0E3Da7URlDGLs4wGz0doN5ugA3Bl3nshbC1YZj2w1DZBGJb0qeu+Dyv9hkeVIVuC8I1UNfO6LjKei4DoWe0twnDknnIksYiItByV1B0++8fZnFlsvt9kRDxhJYjsf7DpoJlBQI++EOqJJPdcApuPLzDB8zQkoQb+vyc3d94lyXMM8IfKzdStZ6bJI3IDtPG+NUzH7vAFTzuVIjQqj6NqeWEFzl2xJYImRK/1d5eyCXpfmeJcUspobksXRLbWXS5kzCOeYnbXWA2xNmPuuapBSji/ECn3Gk+FdJdHUlD5UU3/Y6Z7n1pPbgvtuezsj+aE/lrmdI6Ary3F+Awsy34aX5sklieSxEjani6e1fbpdV0iFyytXPCAgxgKVjDeEyxSmkb2afPIUaQbkSeOH0Mj6VVSn7rmrt0j9b6y2D+b1J3Xb9unoQN4xL6lbdX9VOtZdRp1l71a3l0Gh0u5kszLpXyhssBQopXIh5YFU6MI9NCZygJjwWj5fOowU40x/jypo76FaV9QYYl/rQJihBziGaSjuZ1D+oL3fNWCfRh7cQoZ72g9QPzVVebPUr91XegbLgar/5SjM5/BgaiLbh2djaPZR45sHb/NZrE0WR5rsxhYZ44WLvY5r/4TCM57DKe/4t3NCPnaXfpd3IfeQv9AwoZu8zdRRVzzZ5t39y8=</diagram></mxfile>
2103.00180/main_diagram/main_diagram.pdf ADDED
Binary file (10.8 kB). View file
 
2103.00180/paper_text/intro_method.md ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Artificial Intelligence for Science [@stevens2020ai] is concerned with the use of AI methods to accelerate our understanding of the natural world, and to assist the application of this understanding to the development of areas of engineering, medicine, healthcare, agriculture, environment and so on. While ambitious plans exist for completely automated AI-based robot scientists [@kitano2016artificial], the main use of AI for Science remains semi-automated, with a scientist-in-the-loop. An example of such a collaborative system is in Fig. [\[fig:ml_a\]](#fig:ml_a){reference-type="ref" reference="fig:ml_a"}. For such systems to work effectively, we need at least the following: (1) We have to be able to tell the machine what we know, in a suitably precise form; and (2) The machine has to be able to tell us what it is has found, in a suitably understandable form. While the remarkable recent successes of deep neural networks on a wide variety of tasks makes a substantial case for their use in model construction, it is not immediately obvious how either (1) or (2) should be done with deep neural networks. In this paper, we examine ways of achieving (1). Understanding models constructed by deep neural networks is an area of intense research activity, and good summaries exist elsewhere [@lipton2016he; @arrieta2019xplainable]. To motivate the utility of providing domain-knowledge to a deep network, we reproduce a result from [@dash2020incorporating] in Fig. [\[fig:ml_b\]](#fig:ml_b){reference-type="ref" reference="fig:ml_b"}, which shows that predictive performance can increase significantly, even with a simplified encoding of domain-knowledge.
4
+
5
+ <figure data-latex-placement="!htb">
6
+
7
+ <figcaption>An example of AI for Science. In (a), the human-in-the-loop is a biologist. The plot in (b) from <span class="citation" data-cites="dash2020incorporating"></span>, which shows gains in predictive accuracy of graph neural network (GNN) with the inclusion of domain-knowledge, using a simplified technique called vertex-enrichment. The results shown are over 70 datasets.</figcaption>
8
+ </figure>
9
+
10
+ It is unsurprising that a recent report on AI for Science [@stevens2020ai] identifies the incorporation of domain-knowledge as one of the 3 Grand Challenges in developing AI systems:
11
+
12
+ ::: displayquote
13
+ "Off-the-shelf \[ML and AI\] practice treats \[each of these\] datasets in the same way and ignores domain knowledge that extends far beyond the raw data... Improving our ability to systematically incorporate diverse forms of domain knowledge can impact every aspect of AI."
14
+ :::
15
+
16
+ In this survey, we restrict the studies on incorporation of domain knowledge into neural networks, with 1 or more hidden layers (we will sometimes also use the term deep neural network, or DNN). Before we proceed further, we clarify that our focus here are more specific than that of research that looks at the development of hybrid systems combining neural and logical systems (see for example,  [@garcez2012neural]); and different to the use of neural network techniques for either emulating logical inference or to represent logical concepts. We refer the reader to [@besold2017neural] for reviews of some of these other strands of work. These reviews are nevertheless relevant to some of the material in this paper since they identify some key challenges in integrating neural-based learning with symbolic knowledge representation and logical reasoning. More directly related to this paper is the work on "informed machine learning", reviewed in  [@von2019informed]. We share with this work the interest in prior knowledge as an important source of information that can augment existing data. However, the goals of that paper are more ambitious than here. It aims identify categories of prior knowledge, using as dimensions: the source of the knowledge, its representation, and its point of use in a machine-learning algorithm. In this paper, we are only concerned with some of these categories. Specifically, in terms of the categories in [@von2019informed], we are interested in implicit or explicit sources of domain-knowledge, represented either as logical or numeric constraints, and used at the model-construction stage by DNNs.
17
+
18
+ In this survey, we consider the representation of domain knowledge for DNNs in two broad categories: (1) as logical constraints; and (2) as numerical constraints.[^1] Under each of these, we consider sub-categories: (1a) Propositional logic, including canonical normal forms; (1b) Predicate logic, including binary relations, and more generally, $n$-ary relations; (2a) Loss functions, including regularisation terms; (2b) Constraints on weights, including priors and transfer-learning.
19
+
20
+ The rest of the paper is organised as follows: Section [2](#sec:logic){reference-type="ref" reference="sec:logic"} describes inclusion of domain-knowledge as logical constraints. Section [3](#sec:numeric){reference-type="ref" reference="sec:numeric"} describes the inclusion of domain-knowledge as numeric constraints. Section [4](#sec:challenge){reference-type="ref" reference="sec:challenge"} outlines some major challenges related to the inclusion of domain-knowledge in DNNs with our perspectives on on the relevance of the use of domain-knowledge to aspects of Responsible AI, including ethics, fairness, and explainability of DNNs.
21
+
22
+ Although not all DNNs require data to be a set of feature-vectors, this form of data representation is long-standing and still sufficiently prevalent. In logical terms, we categorise feature-based representations as being encodings in a propositional logic. The reader would point out, correctly, that feature-values may not be Boolean. This is correct, but we can represent non-Boolean features by Boolean-valued propositions (for example, a real-valued feature $f$ with value $4.2$ would be represented by a corresponding Boolean feature $f'$ that has the value $1$ if $f=4.2$ and $0$ otherwise). With the caveat of this rephrasing, it has of course been possible to provide domain-knowledge to neural networks by employing domain-specific features devised by an expert. However, we focus here on ways in which domain-knowledge encoded as rules in propositional logic has been used to constrain the structure or parameters of models constructed by a network.
23
+
24
+ Here, domain-knowledge is encoded as a set of propositional rules. These rules in turn constrain the structure of the neural network. Weight-learning then proceeds as normal, using the structure. The result could be thought of as learning weighted forms of the antecedents present in the rules. The most popular and oldest work along this line is Knowledge-Based Artificial Neural Network (KBANN) [@towell1990refinement] that incorporates knowledge into neural networks. In KBANN, the domain knowledge is represented as a set of hierarchically structured propositional rules that directly determines a fixed topological structure of a neural network [@towell1994knowledge]. KBANN was successful in many real-world applications; but, its representational power was bounded by pre-existing set of rules which restricted it to refine these existing rules rather than discovering new rules. A similar study is KBCNN [@fu1993knowledge], which first identifies and links domain attributes and concepts consistent with initial domain knowledge. Further, KBCNN introduces additional hidden units into the network and most importantly, it allowed decoding of the learned rules from the network in symbolic form. However, both KBANN and KBCNN were not appropriate for learning new rules because of the way the initial structure was constructed using the initial domain knowledge base.
25
+
26
+ Some of the limitations described above could be overcome with the proposal of a hybrid system by Fletcher and Obradovic [@fletcher1993combining]. The system was able to learn a neural network structure that could construct new rules from an initial set of rules. Here, the domain knowledge is transformed into an initial network through an extended version of KBANN's symbolic knowledge encoding. It performed incremental hidden unit generation thereby allowing construction or extension of initial rule-base. In a similar manner, there was a proposal for using Cascade ARTMAP [@Tan1997] which could not only construct a neural network structure from rules but also perform explicit cascading of rules and multistep inferencing. It was found that the rules extracted from Cascade ARTMAP are more accurate and much cleaner than the rules extracted from KBANN [@towell1993extracting].
27
+
28
+ In the late 1990s, Garcez and Zaverucha proposed a massively parallel computational model called CIL$^2$P based on feedforward neural network that integrates inductive learning from examples and domain knowledge, expressed as a propositional logic program [@AvilaGarcez1999]. A translation algorithm generates a neural network. Unlike KBANN, the approach uses the notion of "bipolar semi-linear" neurons. This allows the proof of a form of correctness, showing the existence of a neural-network structure that can compute the logical consequences of the domain-knowledge. The output of such a network, when combined into subsequent processing naturally incorporates the intended interpretation of the domain predicates. The authors extend this to the use of first-order logic programs: we consider this in a later section.
29
+
30
+ A recent proposal focuses on embedding symbolic knowledge expressed as logical rules [@Xie2019]. It considers two languages of representations: Conjuctive Normal Form (CNF) and decision-Deterministic Decomposable Negation Normal form (d-DNNF), which can naturally be represented as graph structures. The graph structures can be provided to a graph neural network (GNN) to learn an embedding suitable for further task-specific implementations.
31
+
32
+ Somewhat in a similar vein to the work by [@AvilaGarcez1999], the work reported in  [@Xu2018] considers as a set of propositional statements representing domain constraints. A deep network is then trained to find satisfying assignments for the constraints. Again, once such a network is constructed, it can clearly be used in subsequent processing, capturing the effect of the domain constraints. The network is trained using a semantic loss that we describe in a later section.
33
+
34
+ We now describe methods that use domain-knowledge described in first-order logic, which clearly gives greater flexibility and representational power than the use of propositional logic. Although there is no conceptual need to do so, we nevertheless first consider the encoding of domain-knowledge as binary relations, and later consider the general case. The reasons for this will be apparent below.
35
+
36
+ An influential form of representing domain-knowledge takes the form *knowledge graph*, which are labelled graphs, with vertices representing entities and edges representing relations between entities. In essence, this represents a binary relation. We refer the reader to [@hogan2020knowledge] to a comprehensive survey of this form of representation for domain-knowledge.
37
+
38
+ Incorporation of the information in a knowledge-graph into deep neural models--termed "knowledge-infused learning"--is described in [@kursuncu2019nowledge; @sheth2019shades]. This aims to incorporate binary relations contained in application-independent sources (like DBPedia, Yago, WikiData) and application-specific sources (like SNOMED-CT, DataMed). The work examines techniques for incorporating relations at various layers of deep-networks (the authors categorise these as "shallow", "semi-deep" and "deep" infusion). In the case of shallow infusion, both the external knowledge and the method of knowledge infusion is shallow, utilising syntactic and lexical knowledge in the form of word embedding models. In semi-deep infusion, external knowledge is involved through attention mechanisms or learnable knowledge constraints acting as a sentinel to guide model learning, and deep infusion employs a stratified representation of knowledge representing different levels of abstractions in different layers of a deep learning model, to transfer knowledge that aligns with the corresponding layer in the layered learning process.
39
+
40
+ Knowledge graphs can be encoded directly for use by a graph neural network (GNN). The computational machinery available in GNN then aggregates and combines the information available in the knowledge graph. The final collected information from this computation could be used for further predictions. Some recent works are in [@10.1145/3292500.3330855; @10.1145/3308558.3313417], where a GNN is used for estimation of node importance in the knowledge-graph. The idea of encoding a knowledge graph directly for a GNN is also used in [@chen2019knowledgebased] to enrich the information provided in the knowledge-graph.
41
+
42
+ The pre-eminent form of symbolic machine learning based on the use of relations in first-order logic is Inductive Logic Programming (ILP) [@muggleton1991inductive], which has an explicit role for domain-knowledge being incoprated into learning. In ILP, domain-knowledge is represented in first-order logic: we refer the reader to surveys, both old and new, for a description of the field [@mugg1994ilp; @muggleton2012ilp; @cropper2020turning].
43
+
44
+ The simplest use of ILP to incorporate $n$-ary relations in domain knowledge into a neural network relies on techniques that automatically "flatten" the domain-knowledge into a domain-specific propositional representation. Techniques for automatic construction of Boolean-valued features from relational domain-knowledge have a long history in the field of ILP, often called *propositionalisation*, originating from the LINUS [@lavravc1991learning]. This involves the construction of features that identify the conditions under which they take on the value $1$ (or $0$). For example, given (amongst other things) the definition of benzene rings and of fused rings, an ILP-based propositionalisation may construct the Boolean-valued feature that has the value $1$ if a molecule has 3 fused benzene rings, and $0$ otherwise. The values of such Boolean-valued features allows us to represent a data instance (like a molecule) as a Boolean-valued feature-vector, which can then provided to a neural network. There is a long history of propositionalisation: see [@Kramer2001] for a review of some of early use of this technique, and [@lavrac2020prop; @vig2017investi] who examine the links between propositionalisation and modern-day use of embeddings in neural-networks.
45
+
46
+ A direct application of propositionalisation, demonstrating its utility for deep networks has been its use in Deep Relational Machines  [@Lodhi2013]. A DRM is a deep fully-connected neural network with Boolean-valued inputs obtained from propositionalisation by an ILP engine. In [@dash2018large] Boolean-valued features from an ILP engine are sampled from a large space of possible features. The sampling technique is refined further in [@dash2019discrete].
47
+
48
+ The idea of propositionalisation also forms the foundation for a method known as 'Bottom Clause Propositionalisation (BCP)' to propositionalise literals of a most-specific clause, or "bottom-clause". Given a data instance, the bottom-clause is the most-specific first-order clause that entails the data instance, given some domain-knowledge. Loosely speaking, the most-specific clause can be thought of "enriching" the data instance with all domain relations that are true, given the data instance. The construction of such most-specific clauses and their subsequent use in ILP was introduced in [@muggleton1995inverse]. CILP++ [@francca2014fast] uses bottom-clauses for data instances to construct feature-vectors for neural networks. This is an extension to CIL$^2$P. Here the neural network has recurrent connections.
49
+
50
+ Propositionalisation has conceptual and practical limitations. Conceptually, there is no variable sharing between two or more first-order features [@dash2018large]. Practically, the space of possible features can be extremely large: this has meant that the feature-selection has usually been done separately from the construction of the neural network. There are some recent reports on incorporating first-order logic that do not rely on propositionalisation. In [@Li2020] it is proposed to augment a language model that uses a deep net architecture with additional statements in first-order logic. Thus, given domain-knowledge encoded as first-order relations, connections are introduced into the network, based on the logical constraints enforced by the domain-relations. The approach is related somewhat to the work in [@DBLP:journals/jair/SourekAZSK18] that does not explicitly consider the incorporation of domain-knowledge but does constrain a deep neural network's structure based on the relational structure within data instances.
51
+
52
+ A work that does not employ either propositionalisation or network augmentation considers a combination of symbolic knowledge represented in first-order logic with matrix factorization techniques [@rocktaschel-etal-2015-injecting]. This exploits dependencies between textual patterns to generalise to new relations. In another study, drawing on representation of $n$-ary relations as hyperedges, the technique of *vertex enrichment* is proposed in [@dash2020incorporating]. This provides a simplified way to incorporate symbolic domain-knowledge into standard graph neural networks (GNNs) [@dash2020incorporating]. Vertex enrichment incorporates first-order background relations as additional features into the features associates with the nodes of a graph provided to a GNN. The results reported in the paper show significant improvements in the predictive accuracy of GNNs across a large number datasets.
53
+
54
+ We note that newer areas are emerging that use representations for domain-knowledge that go beyond first-order logic relations. This includes probabilistic first-order logic, as a way of including uncertain domain-knowledge [@manhaeve2018eepproblog]. One interesting way this is being used is to constrain the training of "neural predicates", which represent probabilistic relations that are implemented by neural networks.
55
+
56
+ We now discuss some forms of numeric constraints often used as ways of incorporating domain-knowledge into a neural network. Some of the methods are standalone methods and some of these methods are coupled with methods incorporating logical constraints to provide a more robust way of integration of domain-knowledge into a network.
57
+
58
+ A fairly standard way of incorporating domain-knowledge into a deep network is by introducing additional loss terms into the utility (loss) function that the network optimises. This function takes on a general form like the following: $$\mathcal{L}_{total} = \alpha\mathcal{L}_{task} + \beta\mathcal{L}_{in} + \gamma\mathcal{L}_{out} +
59
+ \lambda\mathcal{L}_{model},$$ where $\mathcal{L}_{total}$ is the total loss for the deep network-based on which training will be carried out, $\mathcal{L}_{task}$ denotes to the standard task-specific loss (e.g. cross-entropy for classification, mean-squared-error for regression, and so on), $\mathcal{L}_{in}$ is the loss representing some constraints on the inputs, and $\mathcal{L}_{out}$ is the loss representing constraints on outputs, $\mathcal{L}_{model}$ is the constraint on the function that is to be learned by the deep network. The parameters $\alpha, \beta, \gamma, \lambda$ are the constants signifying some form of weights for each of the above loss terms, which can be learned or tuned during model construction.
60
+
61
+ A recent work that is based on loss function is in [@Xu2018]. Here the authors propose a semantic loss that signifies how well the outputs of the deep network matches some given constraints encoded as propositional rules. The general intuition behind this idea is that the semantic loss is proportional to a negative logarithm of the probability of generating a state that satisfies the constraint when sampling values according to some probability distribution. This loss function falls under the category of $\mathcal{L}_{out}$. This kind of loss function is particularly useful for semi-supervised learning as these losses behave like self-information and are not constructed using explicit labels and can thus utilize unlabelled data.
62
+
63
+ [@Hu2016HarnessingDN] proposed a framework to incorporate first-order logic rules with the help of an iterative distillation procedure that transfers the structured information of logic rules into the weights of neural networks. This is done via a modification to the knowledge-distillation loss proposed by Hinton et al. [@hinton2015istilling]. The authors show that taking this loss-based route of integrating rule-based domain-knowledge allows the flexibility of choosing a deep network architecture suitable for the intended task.
64
+
65
+ In [@Fischer2019DL2TA], authors construct a system for training a neural network with domain-knowledge encoded as logical constraints. Here the available constraints are transferred to a loss function. Specifically, each individual logic operation (such as negation, and, or, equality etc.) is translated to a loss term. The final formulation results in an optimisation problem. The authors extract constraints on inputs that capture certain kinds of convex sets and use them as optimisation constraints to make the optimisation tractable. In the developed system, it is also possible to pose queries on the model to find inputs that satisfy a set of constraints.
66
+
67
+ In Bayesian framework, explicitly information about a machine learning model and data can be expressed succinctly in the form: $$\text{posterior} \propto \text{prior} \cdot \text{sample-likelihood}$$ A very common way of incorporating domain-knowledge into a machine learning system (including deep networks) is by encoding it as a 'prior' term in the above Bayes equation. The domain-knowledge here could be about the problem, the neural network structure, or the neural network parameters, that is, some form of a probability distribution over these. The priors on networks and network weights represent our expectations about networks before receiving any data, and correspond to penalty terms or regularisers. Buntine and Weigend [@Buntine1991BayesianB] extensively study how Bayesian theory can be highly relevant to the problem of training feedforward neural networks. One of the main focus of this study was to study principles of choosing an appropriate network structure and size based on prior domain-knowledge about the problem and also of selecting a prior on the weight parameters.
68
+
69
+ Seminal work by [@hintonnealthesis] on Bayesian learning in neural networks showed how domain-knowledge could help build a prior probability distribution over neural network parameters. They showed how with the approach of Bayesian learning, networks are self-regularised to not over-fit even when complexity of the neural network is increased to infinity. This study has served as foundation to various works on regularisation approaches such as posterior regularisation in the case of neural networks. In a similar spirit, [@Krupka2007IncorporatingPK] showed how prior domain knowledge could be used to define 'meta-features' that can aid in defining the prior distribution of weights. These meta-features are additional information about each of the features in the available data. For instance, for an image recognition task, the meta-feature could be the relative position of a pixel $(x, y)$ in the image. This meta information can be used to construct a prior over the weights for the original features.
70
+
71
+ Transfer Learning is a very common technique utilised when there is fewer data in the target domain pertaining to the prediction task; and lots of data in the domain of a task similar to the target domain. Transfer learning from a related (source) domain to a target domain results in significant boost in performance and saves substantial computational efforts. From the Bayesian perspective, transfer learning allows the construction of the prior over the weights of a neural network for the target domain based on the posterior constructed in the source domain. Transfer learning is not limited by the kind of task (such as classification, regression, etc) but rather by the domain itself. This is a result of the multi-layered information learnt by neural networks i.e., the final layer, which is most often corresponding to the task in hand, can be replaced and fine-tuned separately [@YosinskiCBL14]. Large language models are a very successful example of this, where the models are initially learnt on a huge corpus of data and fine-tuned for numerous tasks. [@zhuang2020comprehensive] provides an in-depth review of some of the mechanisms and the strategies of transfer learning. Transfer learning need not be restricted to deep networks only: in a recent study, [@liu2018mproving] proposes a model that transfers knowledge from a neural network to a decision tree using knowledge distillation framework. The symbolic knowledge encoded in the decision tree could further be utilised for further for a variety of tasks.
72
+
73
+ ::: table*
74
+ **Work** **Logical Constraint** **Numerical Constraint** **DNN Method**
75
+ -------------------------------------------- ------------------------ ------------------------------- ----------------
76
+ KBANN [@towell1994knowledge] Propositional logic \- MLP
77
+ Cascade-ARTMAP [@Tan1997] Propositional logic \- ARTMAP
78
+ CIL$^2$P [@AvilaGarcez1999] Propositional logic \- RNN
79
+ LENSR [@Xie2019] Canonical Normal Form Loss function GNN
80
+ CILP++ [@francca2014fast] First-order logic \- MLP
81
+ DRM [@Lodhi2013] First-order logic \- MLP
82
+ VEGNN [@dash2020incorporating] First-order logic \- GNN
83
+ Semantic Loss [@Xu2018] First-order logic Loss function CNN
84
+ HDNNLR [@Hu2016HarnessingDN] First-order logic Loss function CNN, RNN
85
+ DL2 [@Fischer2019DL2TA] \- Loss function CNN
86
+ ILBKRME [@rocktaschel-etal-2015-injecting] \- Loss function, Regularisation MLP
87
+ IPKFL [@Krupka2007IncorporatingPK] \- Prior CNN
88
+ :::
89
+
90
+ A natural consequence of incorporating prior knowledge (some knowledge on the prior distribution) results in regularisation in learning. This allows the inductive bias to be exploited based on prior knowledge and further allowing better predictive performance. One of the most common regularisations is a penality based regularisation for model complexity. Examples include $L_1$ or $L_2$ based regularisation terms in the utility or loss function of a neural network. The optimiser minimises the loss along with the regularisation term resulting in a less complex model in terms of parameters [@kukavcka2017regularization]. [@10.4018/JITR.2018100109] show that domain-based regularisation in loss function can also help in constructing deep networks with less amount of data.
91
+
92
+ Over the years, regularising embedding constitutes another major direction of research. [@Fu1995] was one of the earliest works in this domain, in which they proposed a strategy to establish constraints by designating each node in a Hopfield Net to represent a concept and edges to represent their relationships and learn these nets by finding the solution which maximises the greatest number of these constraints. [@rocktaschel-etal-2014-low] was perhaps the first method of regularising embeddings from declarative knowledge encoded in first-order logic. [@rocktaschel-etal-2015-injecting] extended this to regularisation by addition of differentiable loss terms to the objective-based on propositionalisation of each first-order predicate. [@Li2020] develop a method to constraint individual neural layers using soft logic based on massively available declarative rules in ConceptNet. [@Hamilton2018EmbeddingLQ] incorporate first-order logic into low dimensional spaces by embedding graphs nodes and represent logical operators as learned geometric relations in the space. [@Demeester2016LiftedRI] proposed ordering of embedding space based on rules mined from WordNet and find it to better prior knowledge and generalisation capabilities using these relational embeddings. [@silvestri2020injecting] probe embeddings of a neural network while progressively adding domain knowledge and show strong results encouraging the same. In [@takeishi2018knowledge], a knowledge-based distant regularisation framework was proposed, in which distance domain information encoded in a knowledge graph was utilised. It defines prior distributions of model parameters using knowledge graph embeddings. They show that this results in an optimisation problem for a regularised factor analysis method.
93
+
94
+ We summarise our discussion on domain-knowledge as constraints in Table [\[tab:summary\]](#tab:summary){reference-type="ref" reference="tab:summary"}. We now outline some challenges in incorporating domain-knowledge encoded as logical or numerical constraints into a deep network. We first outline some immediate practical challenges concerning the logical constraints:
95
+
96
+ - There is no standard framework for translating logical constraints to neural networks. While there are simplification methods which first construct a representation of the logical constraint that a standard deep network can consume, this process has its limitations as described in the relevant section above.
97
+
98
+ - Logic is not differentiable. This does not allow using standard training of deep network using gradient based methods in an end-to-end fashion. Propagating gradients via logic has now been looked at in [@evans2018delILP], but the solution is intractable and does not allow day-to-day use.
99
+
100
+ - Neural networks are directed acyclic graphs (DAGs). However, logical formula can introduce cyclic dependencies, which needs a separate form of translations.
101
+
102
+ There are also practical challenges concerning the numerical constraints:
103
+
104
+ - We have seen that the numerical constraints are often provided with the help of modification to a loss function. Constructing a term in loss function is not straight-forward.
105
+
106
+ - The process of introducing a loss term often results in a difficult optimisation problem (sometimes constrained) to be solved. This may require additional mathematical tools for a solution that can be implemented practically.
107
+
108
+ Incorporating domain-knowledge into learning is highlighted in [@stevens2020ai] as one of the Grand Challenges facing the foundations of AI and ML. The principal difficulties raised in that report are these:
109
+
110
+ - "Can the constructed deep network model be trusted?" This question involves long-standing discussions on explainability and interpretability of deep models. It also includes the question of whether data used for constructing the deep model contains sufficient information without introducing spurious correlations or bias that would invalidate the model itself.
111
+
112
+ - "Why does the AI model work for a problem?" To address this question, there has to be some a mapping between the internal representation of the model to a domain-specific concept. In [@info11020122], authors identify that the knowledge mapping of the deep learning components, including input features, hidden unit and layers, and output predictions with domain-knowledge could lead to an understandable model.
113
+
114
+ The issues raised above go beyond just the "how" questions related to the incorporation of domain-knowledge into deep networks. They provide pointers to why the use of domain-knowledge may extend beyond its utility for prediction. Domain-knowledge can also play a role in aspects like explanation and fairness. We mention some of the challenges that result.
115
+
116
+ One important requirement of machine-constructed models in workflows with humans-in-the-loop is that the models are human-understandable. Domain-knowledge can be used in two different ways to assist this. First, it can constrain the kinds of models that are deemed understandable. Secondly, they can provide concepts that are meaningful for use in a model. Most of the work in this review has been focussed on improving predictive performance. However, the role of domain-knowledge in constructing explanations for deep network models is also being explored (see for example, [@srinivasan2019logical]). However, that work only generates *post hoc* explanations that are locally consistent. Explanatory deep network models that identify true causal connections based on concepts provided as domain-knowledge remain elusive.
117
+
118
+ Domain-knowledge can also be used to correct biases built into a deep network either declaratively, through the use of constraints, or through the use of loss functions that include"ethical penalty" terms. Demonstrations of the use of domain-knowledge driven, ethics-sensitive machine learning have been available in the literature for some time [@Anderson2005MedEthExTA]. Can these carry over to the construction of deep network models? This remains to be investigated.
119
+
120
+ Finally, the rapid progress in the area of language models raises the possibility of providing domain-knowledge in forms other than logical or numerical. While the precision of these formal representations may continue to be needed for the construction of scientific assistants, their role in representing commonsense knowledge is less evident. Day-to-day machine assistants that can incorporate informal knowledge of the world will be needed. Progress in this is being made (see for example, <https://allenai.org/aristo>), but there is much more that needs to be done to make the language models required accessible to everyday machinery.
121
+
122
+ [^1]: We use the term "constraints" here in the sense that the domain-knowledge constrains either the structure or parameters (or both) of a DNN.
2103.03501/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2103.03501/paper_text/intro_method.md ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ \vspace{-2mm}
4
+ Many computer vision applications require the estimation of a model from a set of observations .
5
+ In outlier-free settings, fitting a geometric model to a dataset can be performed relatively easily by, for example, solving a least squares problem. However, in the presence of outliers in the data, a robust estimator must be employed to ensure the stable performance of any algorithm.
6
+ As an example, consider SLAM , which is now a fundamental building block in several robotic or autonomous driving pipelines. It requires multiple estimations of the fundamental/ essential matrices between the consecutive views captured along the camera trajectory. In many circumstances, erroneous correspondences between the frames could lead to incorrect camera pose estimation. Consequently, if the outliers are not removed, the whole tracking trajectory could be severely affected. Therefore, it is desirable to design robust fitting algorithms that are highly accurate and able to achieve real-time performance. This is a challenging task, as solving robust fitting optimally has been shown to be NP-hard .
7
+
8
+ [t]
9
+ \centering
10
+ \includegraphics[width=1\linewidth]{tex/figures/coverPic.pdf}
11
+
12
+ \caption{Illustration of the solutions found by our unsupervised learning method (right) and a globally optimal algorithm (left). The number shows the specific index of points in the point set. The admissible heuristic in A* method brings the search into some fruitless subparts (green line) before discovering optimal solution (red line). Our agent learns to remove outliers by traversing from the initial state to the goal state in the minimal number of steps (the states are numbered based on the index of the removed point). Observe that both methods terminate at the same solution (i.e., both remove the same set of outliers).}
13
+
14
+ \vspace{-5mm}
15
+
16
+ In addition to popular methods such as Random Sample Consensus (RANSAC) and a number of randomized or deterministic variants , the advent of deep learning in recent years has inspired research in learning-based approaches for robust estimation . The main idea behind these techniques is to exploit the learning capabilities of deep Convolutional Neural Networks (CNNs) to directly regress the robust estimates , or quickly identify the outliers
17
+ These approaches have demonstrated their superior performance on many datasets, and hence, developing learning-based robust estimators can be a promising research direction.
18
+ However, most learning techniques mentioned above are supervised, hence they typically require a large amount of labelled data. This potential bottleneck could be resolved by either generating ground truth data automatically by using synthetic data, or by using conventional methods (e.g. RANSAC) to generate ground truth. However, these "quick fixes" have their own drawbacks.
19
+
20
+ Specifically, a network fully trained on synthetic data may not be able to generalize well to real world scenarios since it has not been exposed to real examples during training.
21
+
22
+ Similarly, ground truth obtained from classic conventional methods is not guaranteed to be the gold standard as the obtained solutions may be incorrect. One could, on the other hand, employ some global consensus maximization methods to generate the ground truth, but this would be at the cost of an exceptionally slow training process. Moreover, some methods are problem-specific, and it is non-trivial to extend them to other robust fitting tasks.
23
+
24
+ We address these problems and present a novel unsupervised learning framework for robust fitting.
25
+
26
+ Inspired by the success of Reinforcement Learning (RL) in several unsupervised tasks , we cast our robust fitting problem as a special case of goal oriented learning. Such a transformation is achieved thanks to the underlying tree structure of consensus maximization .
27
+
28
+ Moreover, we also propose a novel architecture that efficiently captures the instantaneous state of the data during transition.
29
+ Fig. shows an example of a 2D line fitting problem, where we plot the trajectory of A* (a globally optimal algorithm), and the path traversed by our agent from the initial state to the goal state. Observe that both remove the same set of outliers, which demonstrates the learning capability of our network to effectively explore the environment. Furthermore, in contrast to the implementation of A*, which explores redundant bases before reaching the optimal, our network can quickly identify the shortest path to reach the goal state, resulting in significantly faster run times. (see Section. for more detail.)
30
+ To the best of our knowledge, our work is the first to learn a deep architecture model in a reinforcement learning paradigm for consensus maximization in computer vision.
31
+
32
+ \vspace{-4mm}
33
+ \paragraph{Contributions} The main contributions of our paper can be summarized as follows:
34
+ \vspace{-2mm}
35
+
36
+ - We propose a novel unsupervised learning framework for robust estimation. By exploiting the special structure (see Section. ) of the problem under the consensus maximization formulation, we incorporate robust model fitting into the well-known goal-oriented reinforcement learning framework, resulting in an efficient learning mechanism without any supervision.
37
+ \vspace{-2mm}
38
+ - We develop a new state embedding scheme based on a graph convolutional network, and an efficient back-bone network that allows our agent to effectively explore the action space to achieve the goal state. (see Section. and Section. )
39
+
40
+ \vspace{-3mm}
41
+
42
+ # Method
43
+
44
+ \vspace{-2mm}
45
+ While there exist different ways to formulate robust estimates, our work uses the popular consensus set maximization formulation .
46
+ The objective is to find an estimate $\btheta^* \in \bbR^d$ that is consistent with as many of the observations $\cX=\{\bx_{i}\}_{i=1}^N$ as possible, i.e., finding the maximum number of inliers up to a predefined threshold $\epsilon$:
47
+ \vspace{-3mm}
48
+
49
+ & \max_{\btheta \in \bbR^d, \cI \subseteq \cX}
50
+ & & | \cI | \\
51
+ & \text{subject to}
52
+ & & r(\bx_{i}|\btheta) \leq \epsilon, \forall \bx_{i} \in \cX.
53
+
54
+ where $r(\bx_{i}|\btheta)$ is the residual of $\bx_{i}$ w.r.t model parameter $\btheta$.
55
+ The solution $(\cI^*, \btheta^*)$ of provides the optimal inlier set $\cI^*$ that is consistent with the estimate $\btheta^*$.
56
+ Similar to other consensus maximization approaches, we also focus on quasi-convex residuals having the form ,
57
+ \vspace{-2mm}
58
+
59
+ r(\btheta) = \frac{p(\btheta)}{q(\btheta)},
60
+ \vspace{-2mm}
61
+
62
+ where $p(\btheta) \ge 0$ is a convex function and $q(\btheta) > 0$ is a linear function. Objectives of several vision problems possess such quasi-convexity .
63
+
64
+ In this paper, to assist visualization of the mathematical concepts, most examples and analyses will be based on the linear fitting problems whose residual functions are in fact convex and can be written as follows: given $\bx_i = (\ba_i; b_i)$ with $\ba_i \in \bbR^d$ and $b_i \in \bbR$,
65
+
66
+ r(\btheta) = |\ba^T_{i}\btheta - b_{i}|.
67
+ \vspace{-2mm}
68
+
69
+ \vspace{-2mm}
70
+
71
+ \vspace{-2mm}
72
+ Our proposed unsupervised learning approach for robust estimation is based upon the well-known goal-oriented reinforcement learning (RL) framework , which is briefly outlined in this subsection. This framework consists of an agent $A$ that aims to navigate in an environment to reach a pre-defined goal in the smallest number of steps to maximize a total reward. Each time step is associated with a state $s^{(t)}$. The agent can select an action $a^{(t)}_i \in \cA^{(t)}$, where $\cA^{(t)}$ is the set of actions that are available at time $t$.
73
+ Based on the action $a$ taken by the agent, the environment will transition to a new state $s^{(t + 1)}$ and return a reward $r_{t}(a)$, where the reward function $r_t(.)$ depends on the particular application. The goal of the framework is for the agent to reach the pre-defined goal that maximizes the cumulative reward (also known as return),
74
+ \vspace{-5mm}
75
+
76
+ R = \sum_{t=t_0}^{\infty} \beta^{t - t_0} r_t,
77
+ \vspace{-2mm}
78
+
79
+ obtained from the initial state to the final state, where $\beta^{t-t_0}$ is the discount factor to weigh the importance of the $Q$ value at a particular time step.
80
+ Commonly, deep Q learning is used in most RL frameworks, for the agent to learn the optimal actions. In particular, Q learning is a model-free RL method which learns the quality of actions for the agent to take appropriate action under a particular circumstance. Deep Q-learning is the fundamental model used in our work, and will be further outlined in Section.
81
+
82
+ \vspace{-3mm}
83
+
84
+ \vspace{-2mm}
85
+ While RL has shown its strength in several applications, applying it to robust estimation is by no means a trivial task. The main challenges lie in the definition of a state, reward function, goal specification and designing of an agent that can learn to efficiently explore the environment in an optimal way. In this section, we introduce a novel framework that enables the use of RL for our robust fitting problem.
86
+
87
+ \vspace{-2mm}
88
+
89
+ \vspace{-2mm}
90
+
91
+ \centering
92
+
93
+ \includegraphics[width = 0.6\columnwidth]{tex/figures/minimax.pdf}
94
+ \caption{Illustration of a minimax fitting problem for a set of points in 2D. Blue dots represent measurements with largest residual, which form the basis set.}
95
+
96
+ \vspace{-5mm}
97
+
98
+ Given a set of measurements (observations) $\cX = \{\bx_i\}_{i=1}^N$, let us first consider the minimax fitting problem that returns the estimate which minimizes the maximum residual:
99
+ \vspace{-2mm}
100
+
101
+ f(\cX)=\min_{\btheta \in \bbR^d, \gamma \in R} \gamma, \;\; \text{s.t.} \; r(\bx_i|\btheta) \le \gamma \;\; \forall i, \bx_i \in \cX.
102
+
103
+ \vspace{-2mm}
104
+
105
+ It can be proven that if the residual function $r(\bx_i|\btheta)$ is quasi-convex, the above problem is also quasi-convex, hence it can be solved efficiently up to global optimality using any off-the-shelf solver . Fig. shows an example of the minimax fit of a set of points in 2D. This problem is the core sub-problem in our learning framework.
106
+
107
+ Observe that if the optimal solution $\gamma^*$ obtained from solving the above problem is not greater than the inlier threshold, i.e., $\gamma^* \le \epsilon$, then the solution $\btheta^*$ obtained from also solves the robust fitting problem (there are no outliers). Otherwise, the optimal consensus must be a subset $\cI^*$ such that $f(\cI^*) \le \epsilon$. Therefore, the goal of our agent is to gradually remove a subset of outliers to reach the subset $\cI^*$. Obviously, in order to maximize $\cI^*$, the number of outliers removed by our agent must be minimized.
108
+ \vspace{-5mm}
109
+ \paragraph{State.} Under the RL framework, let us consider associating each subset $\cS \subseteq \cX$ with a state $s_{\cS}$ (the detailed construction of states for our network will be discussed in the following sections), and set $s_{\cX}$ to the initial state at which our agent will start the exploration process, i.e., $s^{(t=0)} = s_{\cX}$. From the above discussion, at a particular state $s_{\cS}$, if the action taken by our agent is to remove one data point $\bx_j \in \cS$ such that $f(\cS \setminus {\bx_j}) < f(\cS)$, then the task of the agent is to find the state $s_{\cI}$ such that $f(\cI) \le \epsilon$ in the smallest number of steps (i.e., to minimize the number of outliers removed). Refer to Fig. for a visualization of a state and its associated actions for a 2D fitting problem.
110
+ \vspace{-5mm}
111
+ \paragraph{Goal and Reward Function.} We therefore define our goal state, based on the RL framework discussed in the previous section, as a state $s_{\cS}$ such that $f(\cS) \le \epsilon$. Also, we can now define a reward function $e(.)$ associated with a state $s_{\cS}$ to be
112
+ \vspace{-2mm}
113
+
114
+ \vspace{-2mm}
115
+ e(s_{\cS}) =
116
+
117
+ & 0 && \text{if} \;\; f(\cS) \le \epsilon\\
118
+ & -1 && \text{otherwise.} \\
119
+
120
+ \vspace{-2mm}
121
+
122
+ As can be observed, the maximized total reward obtained by our agent if using reward function corresponds to moving from the initial state to the goal state in the minimum number of steps.
123
+ \vspace{-2mm}
124
+
125
+ \vspace{-2mm}
126
+ As previously discussed, the available actions associated with a particular state $s_{\cS}$ corresponds to the removal of points $\bx_j \in \cS$ such that $f(\cS \setminus {\bx_j}) < f(\cS)$. One could test all the points in $\cS$ to generate the action set. However, such exhaustive testing turns out to be unnecessary for our problem.
127
+
128
+ To reduce the number of available actions at each state, we exploit a special property of our application as follows.
129
+
130
+ Assume $\btheta_{\cS}$ is the solution of for a set $\cS$. Let us consider the set $\cB_{\cS}$ containing points having the largest residuals,
131
+ \vspace{-2mm}
132
+
133
+ \cB_{\cS} = \{\bx_j \in \cS | r(\bx_j | \btheta_{\cS})= f(\cS) \}
134
+ \vspace{-2mm}
135
+
136
+ Following the terminology in ,
137
+
138
+ we also call $\cB_{\cS}$ a basis of the set $\cS$. In the example shown in Figure , the basis for a 2D-line fitting problems w.r.t. to the current estimate are the points plotted in blue.
139
+
140
+ Given $\cB_{\cS}$, one can prove (see ) that,
141
+ \vspace{-2mm}
142
+
143
+ f(\cS \setminus \bx_j) < f(\cS), \;\; \forall \bx_j \in \cB_{\cS}.
144
+ \vspace{-2mm}
145
+
146
+ Intuitively, removing a point belonging to the basis set guarantees the reduction of the minimax fit for the remaining set (see Figure ). This suggests that the actions associated with a state $s_{\cS}$ corresponds to the removal of a point in $\cB_{\cS}$ and conduct minimax fit for the remaining points. Figure visually illustrates how the actions can be generated from a particular state.
147
+
148
+ [t]
149
+ \centering
150
+ \includegraphics[width=0.8\linewidth]{tex/figures/stateDesign.pdf}
151
+
152
+ \caption{Visualization of states and actions. The initial state corresponds to the minimax fit of the original point set. The action, associated a particular state, consists of removing a point in the basis set and conduct minimax fit for the remaining points.}
153
+
154
+ \vspace{-3mm}
155
+
156
+ Moreover, for quasi-convex residuals in $d$ dimension, the maximum size of $\cB_{\cS}$ is proven to be $|\cB_{\cS}| \le d+1$( see ).
157
+
158
+ Therefore, at a particular state, the maximum number of available actions in our cases is $d+1$. This property significantly reduces the action space for our learning framework, and is one of the key factors that leads to the efficacy of our learning scheme.
159
+
160
+ \vspace{-2mm}
161
+
162
+ \vspace{-1mm}
163
+
164
+ In order to use the states above as the inputs to our network, they first need to be properly encoded.
165
+
166
+ Based on the above discussion, the crucial information for a state $s_{\cS}$ consist of the point set $\cS$ together with its basis $\cB_{\cS}$ (which also stores the information about the available actions associated with $s_{\cS}$). To enrich the information for $s_{\cS}$, our state encoding also comprises the set $\cV_{\cS} = \cX \setminus \cS$. Clearly, $\cV_{\cS}$ encodes the agent's state traversal history before reaching the state $s_{\cS}$. Given $\cS$, $\cB_{\cS}$ and $\cV_{\cS}$, we construct a matrix $\bS_{\cS}$ to feed it into our network.
167
+
168
+ To encode $\cB_{\cS}$ and $\cV_{\cS}$, we define two binary vectors $\bb_{\cS} \in \{-1, 1\}^N$ and $\bv_{\cS} \in \{-1, 1\}^N$, respectively, defined as follows (we use the notation $\bx[i]$ to denote the $i$th component of a vector $\bx$),
169
+
170
+ \bb_{\cS} [i] =
171
+
172
+ 1 & &&\text{if} \; \bx_i \in \cB_{\cS} \\
173
+ -1 & &&\text{otherwise}.
174
+
175
+ \text{and} \;
176
+ \bv_{\cS} [i] =
177
+
178
+ 1 & \; &&\text{if} \; \bx_i \in \cV_{\cS} \\
179
+ -1 & \; &&\text{otherwise}.
180
+
181
+ Therefore, each state $s_{\cS}$ can now be encoded by the matrix $\bS_{\cS}$,
182
+ \vspace{-2mm}
183
+
184
+ \bS_{\cS} = \left[\bH \;\; \bb_{\cS} \;\; \bv_{\cS} \right],
185
+ \vspace{-2mm}
186
+
187
+ where $H$ is the matrix that collects all the data points in the input set $\cX$. More specifically, we set the $i$-th row of the matrix $\bH$ to $h(\bx_i)$, where $h(.)$ being any mapping that can well represent the information of a given input $\bx_i$. For example, in linear fitting, $h\left(\bx_i=(\ba_i, b_i)\right)$ can be simply chosen to be $h(\bx_i)=[\ba_i^T \; b_i]$ (i.e., $h$ concatenates $\ba_i$ and $b_i$ to make a row vector).
188
+
189
+ Note that since our state involves point sets, one expects our network to be permutation invariant w.r.t. the input $\cS$. In other words, changing positions of the rows in the matrix $\bS_{\cS}$ does not affect the output of our network. Such permutation invariance can be achieved by using the graph CNN architecture, described in the following section.
190
+
191
+ \vspace{-2mm}
192
+
193
+ \vspace{-1mm}
194
+
195
+ \includegraphics[width=0.85\linewidth]{tex/figures/architecture.pdf}
196
+
197
+ \caption{Illustration of our proposed framework. Top row shows the (unrolled) operations (we use an instance of 2D line fitting as an example). Given a set of measurements, minimax is performed to obtain the initial state. Then, the state is encoded into a graph representation which is fed to the agent to predict the expected Q value (returns) of choosing each point in basis to eliminate. The agent then performs an action, receives a reward and moves to the next state. This process iterates until the agent reaches the goal state. Bottom row depicts the design of our network (agent). }
198
+
199
+ Similar to other RL problems, our agent needs to learn to traverse from the initial state to the goal state in such a way that the total rewards is maximized (in the smallest number of steps). This section describes our agent design, which is also illustrated in Figure .
200
+
201
+ The network takes as input a particular state (encoding using the method described in Section ) and outputs the predicted rewards for the actions associated with the input state.
202
+ We use $\Theta$ to represent the network parameters, we denote by $\hat{Q}(s^{(t)}, a|\Theta)$ the action-value function that returns the optimal reward if the action $a$ is taken given the current state $s^{(t)}$. In other words, the selected action given a current state $s^{(t)}$ is the action that maximizes $\hat{Q}$, i.e.,
203
+ a = \argmax_{a \in \cA(\cB_{\cS})} \hat{Q}(s^{(t)}, a|\Theta),
204
+
205
+ where we use $\cA(\cB_{\cS})$ to denote the set of actions associated with the removals of points in the basis set $\cB_{\cS}$ as described in Section .
206
+
207
+ In order to achieve permutation invariance of the input, we design the first stage of our network (shown in the Deep Feature Extraction (DFE) block in Figure ) to be a series of Edge Convolution (EdgeConv) Layers , which was originally inspired from PointNet . We employed the EdgeConv layer because, unlike PointNet, it has the ability to capture local geometric structures by taking into account nearest neighbors of each single input (interested readers are referred to for more details), hence more information can be extracted to improve the learning capability.
208
+ The main role of this DFE block is to capture the relationship between every single input data point and its local geometric structure. Then, the global set feature is obtained from the Global Feature block shown in Figure . The repeated concatenation of the global feature with the individual input features is then fed in to a multi-layer perceptron (MLP) to obtain the expected rewards. We then apply a mask to extract only rewards for points in $\cB_{\cS}$.
209
+
210
+ \vspace{-2mm}
211
+
212
+ \vspace{-1mm}
213
+ Based on the components discussed above, this section introduces our general learning algorithm, which is summarized in Algorithm . Our learning framework relies on the popular deep Q-learning approach that has been used extensively in several other RL applications. The training is repeated over multiple episodes. At the start of each episode, a set of measurements $\cX$ containing $N$ data points (with $N$ fixed throughout the training process) is randomly sampled from the training set. Note that the outlier rates in $\cX$ is randomly chosen to be in the range from $1 \%$ to $40 \%$ for each episode.
214
+ A minimax fit is then performed on $\cX$ to obtain the initial state $s_{\cX}$. Starting from $s_{\cX}$, our agent explores the search space by passing through multiple states until reaching the goal state. During the training process, the action taken at each state is sampled based on the popular $\varepsilon$ - greedy policy .
215
+ After taking an action, the agent receives the reward computed based on and moves to next state. The the network parameters are then updated based on the well-known Bellman's equation ,
216
+
217
+ \hat{Q}(s^{(t)}, a|\Theta) = e(s^{t}) + \gamma \max_{a \in \cA(\cB_{\cS})} \hat{Q}(s^{(t+1)}, a|\Theta).
218
+
219
+ Therefore, the network parameters are updated by minimizing the temporal difference error $\delta$ defined by
220
+
221
+ \delta = \rho(e(s^{t}) + \gamma \max_{a \in \cA(\cB_{\cS})} \hat{Q}(s^{(t+1)}, a|\Theta) - \hat{Q}(s^{(t)}, a|\Theta)),
222
+
223
+ where we choose $\rho$ to be the Huber loss . When the agent reaches the goal state (i.e., $f(\cS) \le \epsilon$), another set of measurements $\cX$ is taken and a new episode is started. As we use the popular PyTorch framework to implement our network, the optimization of to update the network parameters can be performed by the off-the-shelf gradient-based solvers. More information about the choices of training parameters can be found in the supplementary material.
224
+
225
+ [t]
226
+ \caption{Main algorithm.}
227
+ [1]
228
+ \State Initialize experience relay memory $\cM$
229
+ \For {episode e = 1 to L}
230
+ \State Take a set of putative measurements $\cX=\{\bx_{i}\}_{i=1}^N$
231
+ \State Obtain maximum residual $f(\cS)$ and basis $\cB_{\cS}$ by solving
232
+ \State Initialize first state $s^{(t=0)}$
233
+ \While{($f(\cS) > \epsilon$)}
234
+ \State
235
+
236
+ \hspace{9mm} a_t=
237
+
238
+ & \text{random action } a \in \cA(\cB_{\cS}), \;\; \text{w.p.} \varepsilon \\
239
+ & \argmax_{a \in \cA(\cB_{\cS})}\hat{Q}(s^{(t)}, a|\Theta), \text{otherwise} \\
240
+
241
+ \State Get reward $e(s^{t}))$ and move to next state $s^{t+1}$
242
+ \State Add tuple $(s^{t}, a^t, s^{t+1}, e(s^{t}))$ to $\cM$
243
+ \State Sample random batch from $\cM$
244
+ \State Update network parameter $\Theta$
245
+ \EndWhile
246
+ \EndFor
247
+
248
+ \vspace{-2mm}
249
+
250
+ \vspace{-1mm}
251
+ Recall from previous sections that the solution returned by our network is a set $\cS^*$ such that $f(\cS^*) \le \epsilon$. Since the proposed algorithm is sub-optimal, the consensus size obtained from $\cS^*$ could be less than the optimal solution $\cI^*$, i.e., $|\cS^*| \le |\cI^*|$, where $\cI^*$ is the solution of Problem .
252
+
253
+ To partially overcome this, we propose a simple heuristic in order to gradually improve our obtained solution, which is summarized in Algorithm .
254
+ Intuitively, starting from the initial solution $\cS^{t_1 = 0} = \cS^*$ (note that we use $t_1$ to avoid confusion with the state index $t$ used in the previous sections), we test all points $\bx_j$ in the current outlier set $\hat{\cS}^{t_1} = \cX \setminus \cS^{t_1}$ and add points that lead to consensus size improvement, i.e., $f(\cS^{t_1} \cup \bx_J) \le \epsilon$. This process is repeated until no more points can be added.
255
+
256
+ [t]
257
+ \caption{Local Tree Refinement}
258
+ [1]
259
+ \Require Input data $\cX$, initial solution $\cS^{0}$
260
+ \State $t \gets 0$, $improved \gets \text{True}$
261
+ \While{improved }
262
+ \State $\hat{\cS}^{(t)}\gets \cX \setminus \cS^{(t)}$, $improved \gets \text{False}$
263
+ \For{$\bx_j \in \hat{\cS}^{(t)}$ }
264
+ \If{$f(\cS^{(t)} \cup \bx_j) \le \epsilon$}
265
+ \State $\cS^{(t)} \gets \cS^{(t)} \cup \bx_j$; $improved \gets \text{True}$.
266
+ \EndIf
267
+ \EndFor
268
+ \State $t \gets t + 1$.
269
+ \EndWhile
270
+
271
+ \vspace{-1mm}
2106.02740/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-11-09T23:06:00.966Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" version="15.5.8" etag="gLjlchsVxHGFtOpLVPjj" type="device"><diagram id="lS_SEI_11_Gij45JSAlH">7V1dc6O4Ev01eYwKSejrcZJM7j7s1EzdVO3sPqWwIQ4VbHwxmST7669kIz4ENvJYJJDEqZ21ZSzDOa2WuvsIn+HL5fN/smB9/y0No+QMeeHzGb46Q0gQT/6rGl52DdRju4ZFFoe7Jlg13MT/RkVj8bnFYxxGm8aBeZomebxuNs7T1Sqa5422IMvSp+Zhd2nS/NZ1sIhaDTfzIGm3/ozD/H7XyvVlqfY/onhxr78ZesU7y0AfXDRs7oMwfao14a9n+DJL03z3bPl8GSUKO43L7nPXe94tTyyLVrnNB9DuA7+C5LG4tuK88hd9sYssfVwXh0VZHj13QRzM9OFe+xRgeWHSIKJ0GeXZizyk6AgzvPtIYQxQd/FUQQuRALg47L6OrPALVgtGF2X31UXLJ8V1d2OA+zFYp/Eq334FuTgjV2f4Itisd5Z1Fz9HsqOL+3wpv+AKqveSeLGSz+cSgSiTDSXLnnwRBpt79Ynti3i5NTX9/6t4uZCnm8Qz+W+QJHEU3uZREm3ijWzYRPPHLM7lxV3/+P719uqvm9vLYBllAdj8WqiveYjy+X3R8ybP0ofoMk3SbHsNePeQ79zFSVJrv9j+yfYszYM8TtWZn3PVxV7S6+Sig+RCjwDiUY8giiFhXJAG1758lyHB5RMKfQ+TNvMMYMJ8n2HKfdUd6rACDCiHzPyOU2zC77cJ+QHpbxR5T/dxHt2sg7l650l6PMMcTFM5nhonTFAAOa1w5E0mGBBctGCuMUEB9/SbCmjeJmLPIacQQfqJkKNrrZ7OH2cWbMzSx1UYhX/OyoZg/qB83Cr8/pgn8Soq2sMge/guu1EDTg4p4JFmI9q2qiPv0lVeY+1u+ziGZ/9K/Z3Is3af3AOYE0F9AeWcJPmw96ZyHG1HoccZgwKfzh49gr0XiX0YZdghhZvdygGSTjIoFjhskyG2D9NrOxiB2AMUe6waZX5z3mPSmhjxS/Jwewhy0DcEpb/0Gr2cTiIbD4lhnElXupujnqJN3mbv+voaXV4OwB6FQP5HSvo62Ol3kJIZIOpjzAE9fBRTFRXqzxnUwkd6NldwT2KqEqMggnvqzxERBGC5MqtgmgQPOkp8R4s36fiZdOZ6GS2mwQN8dzwwBCBBZaDhT4QIiyh/akTIwJGQCmt/GkRYpBqmRgQHgghcATUNIt5ffM8RwBRV0QWaBhEW8f3UiCCAUWqCNHYeLCL1qfEgAJQxycSCCGgRbU+MCIEAYbhKAU+EiHHE1U6JIEAwWkE9kbl6HHG1UyIEwKwG9iR4QO8vrIYeAozjaoqYRjyHLALrbJdeLeDuYeNw4dDbPuQ7iywI46iqdaxSlaQdpJhhlwmkPqD1B2sR5AOBfVGrP3YTxKqcuYPioabDjh/vlflxmpfCcoT4FTvNxS5lwCuqUDuAW/RgCDCEtRFG2/wg2Uk5etxUd21K/gqXeB4kfwazKPmRbuKi7DBL8zxdSvj0AV+Kan+emqwVRZHl80KpYEC8uku3z+I5UNWJKLzcVSzjTfpltUiqUlWdboO9y8vr66+0ZQYF1W4WCT4ltSHTpBSD2krOZ+0qCAfE46R2SJtRAUi95u+5cIkWAf0QQ86sgmwfZvVpkQSbzZm7SpQHdN1Ilw79tmSGAb/D2WEqB6wDuG3C9lX4RWmtKuusw5sHWa7fTmU3uu06TvQxUbiIbor+0iy/TxfpKki+Vq0FmOq4Y6GsAaVVW3WYdFsWJUEe/2p23wVc8Q0/lE6oYkp3U9KEmj1s0sdsHhUfqtBv9XNuKqSMfiRwiyhv9bNlsbxoO2It8gBDC8JKa37R59S2bgQpwB2rLTk0HVi3RQz+qQjbP+ToYXoRAXKG8MpHM/whPvDqswNsc+8BObPUOkBtO5DmIY9j1TEOdIIjqbQfuX7rJ4MLtJcMuTgW7eVZY7r3Omb4qvUkwC0i/w8n/trDp/aVEIO6+EuIFmOQ4G71FwV1YYrgDvRf2CJn8K4EYIdHm1x9yVVxNdi8phoWeRDUpVuoHc9yD3QMN2Qox/Q4PYk6iyTD+5B9HeaMIaXaaznIXheIPO5aTYkt8grjk3r1wIuBj9mIJh08jhr9kTKuHpBlMMhGOsnjSZbiD+PNISDeqIx6kmX2HpAJIH7NqNGY8J5kOb0HbyHDrlEZ9SRL5YdBFmoLxlg99TuMfwUD1B+VUU+y2t2TZJBxDeUjddX+JMvafYATwOCYzNqfpBa8D2Uhw/WROmtdanhXgEMMGB+VWY8jWHSMMgNcX8c4UH6HISJEEHDCRuo8LMLFyQiMjs4/MdqnKiKSOo8RkzuTiMZGXAesWASVk5EV9bCiEG4qT5gADS1Rh5iIgbIy3hQQNfRDpWjlJC4sYs93rCDqn6ehuiFHk0ACGiS0XRr0gMf1pxqlKQiagiEXPs4imp2QYqi3iIEMDQlriyo44B2SCizkcsAB3haBrdY+1FDtkkHM5VR/G2S5fLqO0rX8NLq+TFcS88dtlej2Z5o9RNntt2B1e5EE84dbiPiz/A+sVwuDAu3MZJd/OAQcYgyMjQ7UL1vqMq3uYqwSvjqYMohFcDsA6K8Pt9eUDIm2cSsxasc8zRzohchRwW1ZNE3Sx7DfrRj+4o7Po/m8y8PMOPGJM0B9IhdBTY8hbbLDaftqL0OH10AAO/AaWnP/KtAS9dcFbemkDbFHGSk4gbxmoQd0nQPjbRPQjkPYuQfN11F2QmOjs1qi+EhUD0Olbqv0LMvPpSTE8E/upJ6kHVVLf/4repHW3aJcMrFdxp4dljWaK9plHIZbQrNoE/9baEKVj2rLJ9XQKu70CJl+XZyB+q5Eff1FKRaxXbbayFF3Rr93YHrS7fHmWhY6MSLUnLTOoaHMTu/uNtHJNB9V+j3Ng86COQ9xlweVUbtPQltPeVgf7HMMzOHHGOi40dnAvvKoIu+456bDiBPPgJu2o++BsT6q1nsa1hEMScS6sBaU4YC6wRQb229xR0A2MKYWEbDj4OBnuhwkPOiBGhpQex3bZTrWCtSBaJzY1HMPLajkNWYvfxdgbF/8U39x9dx49aJfPcf537Xn/9SeVx9RL14aOB9ccu1B+XVWXMjcrk6MlZHtEovI0Lym5zSK0cjs1t2Ci9pUmg+aQkEqtCa1NB54jPGM3RQwdmQK1HAMQ5JvkyM4RP7YSSFm5DIFUiyyC/PH7FdZ5hrCU6/CIjSewtBDBjnE3G9nHej2ufM9LEv4g5faYUUgaX3CPvd+67wqI9udwW+bnEWC5dPkGowIRybXO224MjlxnMntOy9nJmcjn5jy1OPOKQkEajk8wQ9363AiOnU//SdFg1Nkke35dNyfawWXjtsm6fWZO/gQuQOLXN2n9/lcNrr0PidnLj/TVXYGZJ8Zaa59jLsHDOd9dHLc5l4A6yydR0q81nsngD0b/zvqAcW3n1g0bKKl7whT1x/SDiOg3n6+rX9B55h7KbzZjUzauNfv/wO3u9FVZeYvTZTbKo3JEOQdDPEOhrgLhlA/Q28plINuMCa+MQo6bq1FOgS4pmv5LYgtMl+DlBuPlIC6gho25XNI/2qL5aRzEtQWGZ+3rew6Atn0GV2V3cFAtsjZvL2g2RXQhuPAHT/SSrp/VdDJBGqRfHk3WGND3IR5G+uulYoLKTOzSDm8g3nQNwRNmHSas875OTfno+6kcJqaKSQRD/2zDjUTRzNMXamZUEuThztE+MMKmphFxPq6SZKGGnquNvXE87O2IHpHjf4pcdIm0UL1b+7SU0K2gM5qBI84QuYcULH3vniE/Kaa2kcH+/WZ0a+jTM7ucpqXUHz14TPtPDtX+RxuEcSPIp/z0UfDsVZrOxqYuiMh31c6Q5aD4Xcs71Tt06ttKnnbjLWxKCl/w+1Yos3NKf5wm0j0ztAatT/STX5+k6qFH03UrTpnmXy2UM/+G83TLIyyNvuH95dkO6xdbS/hZ+b2kn0LIJvdI/zwysgDiODmUDunbmbN5m0LzvXmPKfbR7hF1uZz7CLaUj/Kla53+n4wqhfRRyb5j12yUONuC1q3vve8zHR68/jTFyvtDNZNnkXBsu5UajZI//eY5sWwPt/dOviLPADy9fPWevT72hPdbPE/1g+53ufWdkSD7nPjffvcZATV3IB6PsxGN2yMBjeeajK/SfK2qwzDUxFkeBRrVY7BKhmqLm6esOM6N2/nFrWr8W7i1cNHcxLkjZzEudap6illCB9xqqLqY/gIX5jpUehkNUNo02Vg8weRHLkMgrh5/j3nZV4vcuti2vneXfJ789GcC+txLkPttEeGxxokVDpVMPUxnEt56zazTnC0N+HNjgZMc4h27vQ6kTS5CUauyp83+WDeQPR4A4KMzIkjb9C0P+N2gE58gV4i1fNiWdRKi51gNKPNpQ1qMn1lXQaE4IISAhnyzY2k5pLSosYoX2apwr2yA3X/xW9pqPD9+n8=</diagram></mxfile>
2106.02740/main_diagram/main_diagram.pdf ADDED
Binary file (71.8 kB). View file
 
2106.02740/paper_text/intro_method.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ As the world population grows and becomes increasingly urbanized, waste production is estimated to reach 2.6 billion tonnes a year in 2030, an increase from its current level of around 2.1 billion tonnes [\[28\]](#page-8-1). Efficient recycling strategies are critical to reduce the devastating environmental effects of rising waste production. Materials Recovery Facilities (MRFs) are at the center of the recycling process. These facilities are where the collected recyclable waste is sorted into separate bales of plastic, paper, metal and glass (and other sub-categories). Even though the MRFs utilize a large number of machines alongside manual labor [\[24\]](#page-8-2), the recycling rates as well as the profit margins stay at undesirably low levels (e.g. less than 35% of the recyclable waste actually got recycled in the United States in 2018 [\[2\]](#page-8-0)). Another crucial aspect of manual waste sorting is the safety of the workers, who risk their health daily getting exposed to dangerous and unsanitary objects (*e.g.* knifes, medical needles). At the same time, the extremely cluttered nature of the waste stream makes automated waste detection (*i.e.* detection of waste objects that should be removed from the conveyor belt) very challenging to achieve.
4
+
5
+ Recent advances in object classification and segmentation provide a great opportunity to make the recycling process more efficient, more profitable and safer for the workers. Unfortunately, the research community is lacking the high-quality in-the-wild datasets to train and evaluate the classification and segmentation algorithms for industrial waste sorting. While several companies do development in the automated waste sorting space (e.g. [\[49,](#page-9-0) [50,](#page-9-1) [57\]](#page-9-2)), they keep their data private, and the few existing open-source datasets [\[38,](#page-9-3) [48,](#page-9-4) [54,](#page-9-5) [60\]](#page-9-6) are very limited in the amount of data and/or are generated in uncluttered environments, not representing the complexity of the domain (see Figure [1\)](#page-1-0).
6
+
7
+ In this paper, we propose the largest openly available in-the-wild waste detection dataset ZeroWaste that is specifically designed for evaluating label-efficient industrial
8
+
9
+ <sup>\*</sup>dbash@bu.edu
10
+
11
+ <span id="page-1-1"></span><span id="page-1-0"></span>![](_page_1_Figure_0.jpeg)
12
+
13
+ Figure 1. Left: examples of the existing waste detection and classification datasets (top to bottom): Trash Annotation in Context (TACO) [\[48\]](#page-9-4), Labeled Waste in the Wild [\[54\]](#page-9-5), ReSortIT [\[31\]](#page-9-7) datasets. Right: footage of the waste sorting process at a real Materials Recovery Facilities (MRF). The domain shift between the simplified datasets with solid background and little to no clutter and the real images of the conveyor belt from the MRF, as well as the object-centric nature of the existing datasets, makes it impossible to use models trained on these datasets for automated detection on real waste processing plants. In this paper, we propose a new ZeroWaste dataset collected from a real waste sorting plant. Our dataset includes a set of densely annotated frames for training and evaluation of the detection and segmentation models, as well as a large number of unlabeled frames for semi- and self-supervised learning methods. We also include frames of the conveyor belt before and after manual collection of foreground objects to facilitate research on weakly supervised detection and segmentation. Please see Figure [2](#page-2-0) for the illustration of our ZeroWaste dataset.
14
+
15
+ waste detection. ZeroWaste is a dataset that is fundamentally different from the popular detection and segmentation benchmarks: high level of clutter, presence of highly deformable and translucent objects, as well as a fine-grained difference between the object classes – all these aspects pose a unique challenge for the automated vision. In addition to that, due to the ever-changing nature of the stream, content and visual qualities of the stream are often MRFspecific and highly depend on the season, therefore the detection algorithm must be label-efficient and able to learn and adapt to the changes in the stream with only a few labeled examples. We envision that our open-access dataset will allow researchers to develop more robust and dataefficient algorithms for object detection and other related problems beyond human-centric domains. We summarize our contributions as follows:
16
+
17
+ 1. We propose the first fully-annotated ZeroWaste*f* dataset industrial waste object detection. The ZeroWaste-*f* dataset presents a challenging reallife computer vision problem of detecting highly deformable objects in severely cluttered scenes. In addition to the fully annotated frames from ZeroWaste*f* set, we include the unlabeled ZeroWaste-*s* set for semi-supervised learning. We also propose a version of our ZeroWaste data augmented with objects from the TACO [\[48\]](#page-9-4) dataset, ZeroWaste*Aug*, to combat class imbalance. We show that introduction of object augmentation improves the overall segmentation quality.
18
+
19
+ 2. We introduce a novel before-after data collection setup and propose the ZeroWaste-*w* dataset for binary classification of frames before and after the collection of target objects. This binary classification setup allows much cheaper data annotation and catalyzes further development of weakly supervised segmentation and detection methods. Our experimental results show that meaningful foreground segmentation can be achieved using ZeroWaste-*w*, however, more efficient weaklysupervised methods are needed to reach the segmenta-
20
+
21
+ <span id="page-2-1"></span><span id="page-2-0"></span>![](_page_2_Figure_0.jpeg)
22
+
23
+ Figure 2. Left: example of an image from ZeroWaste-*f* dataset. Right: the corresponding ground truth instance segmentation. At the end of this conveyor belt, only paper objects must remain. Therefore, we annotated the removable objects of four material types as foreground: soft plastic, rigid plastic, cardboard and metal. The background includes the conveyor belt and paper objects. Severe clutter and occlusions, high variability of the foreground object shapes and textures, as well as severe deformations of objects usually not present in other segmentation datasets, make this domain very challenging for object detection. More examples of our annotated data can be found in Section [7.3](#page-11-0) of the Appendix (*best viewed in color*).
24
+
25
+ tion quality achieved by fully-supervised methods.
26
+
27
+ 3. We implement the fully-supervised detection and segmentation baselines for the ZeroWaste-*f* dataset and semi- and weakly-supervised baselines for ZeroWaste-*s* and ZeroWaste-*w* datasets. Our results show that popular detection and segmentation methods, such as Mask-RCNN, TridentNet and DeepLabV3+, struggle to generalize to our data, which indicates a challenging nature of our in-the-wild dataset and suggests that new and more robust methods must be developed to solve the problem efficiently and be applied in the real waste sorting plants.
2108.02180/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2108.02180/paper_text/intro_method.md ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Visual Storytelling (VST) [18, 29] – the task of generating a story based on a sequence of images – goes beyond a basic understanding
4
+
5
+ of visual scenes and can be applied in many real-world scenarios, e.g., to support the visually impaired. Moreover, VST reflects on the creative ability of intelligent systems. Although similar in concept to other cognitive tasks such as image captioning and visual question answering, VST differs as it requires to reason over a sequence of images while simultaneously ensuring coherence across multiple generated sentences. To achieve this, VST methods need to address two major challenges: the first is visual and relates to grounding the story's text to the images. The second is linguistic and relates to the quality of the story. Both challenges can be described in terms of coherency: the story should be coherent by itself, and consistent with the images.
6
+
7
+ Prior research on VST started to address the aforementioned challenges. Early works expand captioning [\[5,](#page-8-3) [40,](#page-8-4) [44\]](#page-8-5), focusing sentence generation mainly on the current image. This limits the ability to create a narrative that includes consistent visual information. Prior work also makes limited use of temporal dependence and history, e.g., sentences that have already been generated are not used. Consequently, the output prone to linguistic errors such as repetitiveness [\[26\]](#page-8-6). To mitigate these issues, later works strive to generate more meaningful stories via adversarial and reinforcement learning [\[17,](#page-8-7) [42\]](#page-8-8), which remain delicate to train.
8
+
9
+ Importantly, images are not independent. For example, if the first image in a sequence shows a protest, the model may want to focus on signs in later images. Conversely, if the last image shows a ring on a finger, then the model should pay attention to wedding-related objects and activities in the preceding images. This is important for VST because sentences are created per image but are part of a story. Hence, objects that the model is focusing on in one image should be conditioned on the selection in other images.
10
+
11
+ To do this we develop a novel model which (1) implicitly reasons over objects, activities, and their temporal dependencies in each image; and which (2) improves the coherency of the narrative. To reason over objects and activities in each image, i.e., to understand their dependencies and their temporal ordering, we introduce ordered image attention (OIA). As illustrated in Fig. [1,](#page-0-0) for each image, OIA accumulates representation information from objects detected within the corresponding image into an attended image representation. Importantly, accumulation factors depend on whether the image precedes or succeeds the image for which we are currently generating the sentence, which permits to establish an order. The attended image representations are subsequently summarized into a context embedding via an Image-Sentence Attention (ISA) unit, before being used for sentence decoding.
12
+
13
+ In addition, to alleviate common linguistic mistakes like repetitiveness and to promote coherence in the story, we incorporate information from the story generated up to the current sentence into the sentence generation decoder. Specifically, the decoding strategy decays the probability of a word if it has already been used in the story. The decoder also maintains a separate prior over the output probability distribution, independent from the language generation unit. This prior is based on counts of the words that were already predicted in the story. Both the prior, and the Recurrent Neural Net (RNN) decoder output are combined to predict the next word in the sentence.
14
+
15
+ Empirical results on the challenging VIST dataset demonstrate that the proposed method generates stories with an improved narrative quality. The method outperforms prior state-of-the-art by 1% on the METEOR score. Examples of stories generated by the approach are shown in Fig. [1.](#page-0-0) We also present a user study demonstrating the advantage of the model in terms of coherency (64.20% vs. 28.70%).
16
+
17
+ # Method
18
+
19
+ The goal of visual storytelling is to generate a story, composed of N ordered sentences $\{y_s|1\leq s\leq N\}$ , given an ordered sequence of images $I=\{I_s|1\leq s\leq N\}$ . Each sentence $y_s=(y_{s,0},\ldots,y_{s,t},\ldots)$ is composed of words $y_{s,t}\in\mathcal{Y}$ from vocabulary $\mathcal{Y}$ .
20
+
21
+ The order in which the images are given is essential as it defines the plot line of the story. The story should be focused, *i.e.*, each sentence should be related to the remainder of the story. Importantly, the sentences should form a coherent body of text describing the set of images, and not only a set of related information. For instance, the story "The church was beautiful. The bride and groom walk down the aisle. The cake was amazing." is less coherent than: "We went to the church for the wedding today. The bride and groom were excited for the day. Both cut the cake together."
22
+
23
+ **Overview:** To address this challenge, we develop the model illustrated in Fig. 2. It infers conditional probabilities $p'(y_{s,t}|y_{s,t-1},c_s)$ for the t-th word $y_{s,t} \in \mathcal{Y}$ in sentence $y_s$ given the previous word $y_{s,t-1}$ and the context embedding $c_s$ for sentence s. The context embedding $c_s$ summarizes region representations $r_{i,k}$ of all K object regions across all N images $I_i$ ( $i \in [1, N], k \in [1, K]$ ) via Ordered Image Attention (OIA) (Sec. 3.2) and Image-Sentence Attention (ISA) (Sec. 3.2). Specifically, when generating sentence s, OIA computes an attended image representation $a_i^s$ for every image $I_i$ by attending to the K region representations $r_{i,k}$ (Sec. 3.2). These attended image representations $a_i^s$ are subsequently summarized into the context embedding $c_s$ via an image-sentence attention (Sec. 3.2).
24
+
25
+ Below we first discuss computation of the attended image representation $a_i^s$ (Sec. 3.2), before detailing computation of the context embedding $c_s$ (Sec. 3.2) and computation of the conditional probabilities $p'(y_{s,t}|y_{s,t-1},c_s)$ (Sec. 3.3).
26
+
27
+ Ordered Image Attention (OIA) is designed to 1) form a structure across ordered images and to 2) select the relevant objects per image. For this we model preceding and subsequent interactions separately using different attention factors. We calibrate each factor's importance with trainable scalars, which forms a graph of dependencies between the images. For each sequence of N images, the model infers a total of $N^2$ attention maps, one per image for each sentence. We detail this module next.
28
+
29
+ 3.1.1 Attention Belief. For each image $I_i = \{r_{i,1}, \dots r_{i,K}\}$ we consider a set of K regions, represented by their feature vectors $r_{i,k} \in \mathbb{R}^d$ , where d is the objects' embedding dimension. Suppose we are currently generating sentence $y_s$ $(1 \le s \le N)$ . To do this we first
30
+
31
+ <span id="page-3-3"></span>![](_page_3_Figure_2.jpeg)
32
+
33
+ Figure 3: Illustration of Ordered Image Attention. Each node represents an image attention belief. For each sentence, we connect all the images with the sentence-corresponding image. The relative position to this image determines whether the connection is modeled with the $\Psi_{bwd}$ factor (for preceding images) or the $\Psi_{fwd}$ factor (for subsequent images; see Eqs. [8-10]). We infer the attention belief by collecting interactions and local object information within the image (see Eqs. [2-4]). We use scalars to calibrate the importance of each factor. In total, we generate 25 attention maps, one per image for every sentence.
34
+
35
+ compute an attended image representation $a_i^s$ as follows
36
+
37
+ $$a_i^s = \sum_{k=1}^K b_{i,k}^s r_{i,k},\tag{1}$$
38
+
39
+ where $b_{i,k}^s \ge 0$ is the attention belief highlighting the importance of the k-th object in the i-th image when generating the s-th sentence. Importantly, for every image $I_i$ we require $b_{i,k}^s$ to be a valid probability distribution, i.e., we also enforce $\sum_{k=1}^K b_{i,k}^s = 1 \ \forall s, i.$
40
+
41
+ probability distribution, *i.e.*, we also enforce $\sum_{k=1}^K b_{i,k}^s = 1 \ \forall s, i$ . The object attention belief $b_{i,k}^s$ is dependent on all the input data, *i.e.*, other objects and images. To avoid complex computation, we factorize the belief $b_{i,k}^s$ into two pairwise dependencies that preserve the order, and a local term. For the pairwise terms we use $\mu_{j \to i}^{\text{bwd}}$ , which is a message from a preceding image $I_j$ , or $\mu_{j \to i}^{\text{fwd}}$ , which is a message from a subsequent image $I_j$ . We also use $\mu_{i \to i}$ for self-messages. Additionally, we include a local factor $\Psi_i(r_{i,k})$ that considers the object representation. Unlike the messages mentioned before, the local factor does not rely on interactions with other objects. We aggregate all the messages along with the local factor as illustrated in Fig. 3. For normalization we employ a softmax. Formally we compute the attention belief $b_{i,k}^s$ by distinguishing three cases. If i = s we have
42
+
43
+ <span id="page-3-1"></span>
44
+ $$b_{i,k}^{s} \propto \exp(\alpha_{i}^{s} \Psi_{i}(r_{i,k}) + \alpha_{i,i}^{s} \mu_{i \to i}(r_{i,k}) + \sum_{j < i} \alpha_{i,j}^{s} \mu_{j \to i}^{\text{bwd}}(r_{i,k}) + \sum_{j > i} \alpha_{i,j}^{s} \mu_{j \to i}^{\text{fwd}}(r_{i,k})).$$
45
+ (2)
46
+
47
+ If i < s we use
48
+
49
+ <span id="page-3-4"></span>
50
+ $$b_{i,k}^{s} \propto \exp(\alpha_{i}^{s} \Psi_{i}(r_{i,k}) + \alpha_{i,s}^{s} \mu_{s \to i}^{\text{bwd}}(r_{i,k})).$$
51
+
52
+ $$(3)$$
53
+
54
+ If i > s we obtain
55
+
56
+ <span id="page-3-2"></span>
57
+ $$b_{i,k}^{s} \propto \exp(\alpha_{i}^{s} \Psi_{i}(r_{i,k}) + \alpha_{i,i}^{s} \mu_{s \to i}^{\text{fwd}}(r_{i,k})).$$
58
+
59
+ $$(4)$$
60
+
61
+ In all three cases $\alpha_i^s$ , $\alpha_{i,i}^s$ , $\alpha_{i,j}^s \in \mathbb{R}$ are scalars used to calibrate the importance of different messages for a given sentence. These scalars form a dependency structure between images for each of the generated sentence indices. Intuitively, when we generate the first sentence, the attention belief might depend more on subsequent images, to correctly identify the story event, *e.g.*, a wedding, a parade, *etc.* Thus, the scalars will promote interaction with later images. An analysis of these scalars is provided in the appendix. Next, we define the different types of messages.
62
+
63
+ 3.1.2 Pairwise Messages and Factors. A message aggregates interaction scores from an image to an object. The three messages $\mu_{j \to i}^{\text{bwd}}$ , $\mu_{j \to i}^{\text{fwd}}$ and $\mu_{i \to i}(r_{i,k})$ are computed as follows:
64
+
65
+ $$\mu_{j \to i}^{\text{bwd}}(r_{i,k}) = \sum_{k'=1}^{K} \Psi_{\text{bwd}}(r_{i,k}, r_{j,k'}), \tag{5}$$
66
+
67
+ $$\mu_{j \to i}^{\text{fwd}}(r_{i,k}) = \sum_{k'=1}^{K} \Psi_{\text{fwd}}(r_{i,k}, r_{j,k'}), \text{ and}$$
68
+ (6)
69
+
70
+ $$\mu_{i \to i}(r_{i,k}) = \sum_{k'=1}^{K} \Psi_{i,i}(r_{i,k}, r_{i,k'}). \tag{7}$$
71
+
72
+ Importantly, these messages collect three different types of order-dependent interaction factors: (1) A backward image interaction, namely $\Psi_{\rm bwd}(r_{i,k},r_{j,k'})$ . This interaction models relations to the preceding j-th image in the sequence. (2) A forward image interaction, namely $\Psi_{\rm fwd}(r_{i,k},r_{j,k'})$ . This interaction models relations to the subsequent j-th image in the sequence. (3) The self interaction factor, namely $\Psi_{i,i}(r_{i,k},r_{i,k'})$ , which takes into account interactions between objects within the image. We formally define the different factors next.
73
+
74
+ **Interaction factors:** A commonly used practice to capture interactions across attention mechanisms is to first embed the elements into a joint Euclidean space followed by a dot-product [10, 32, 33, 38]. While we follow the same practice, we define three types of interaction factors to preserve the order. Consider two objects, $r_{i,k} \in I_i$ from the sentence-corresponding image and $r_{j,k'} \in I_j$ from the interacting image. We describe three types of interactions: for interactions with subsequent images (i.e., j > i) we use
75
+
76
+ <span id="page-3-0"></span>
77
+ $$\Psi_{\text{fwd}}(r_{i,k}, r_{j,k'}) = \left(\frac{L_{\text{fwd}}r_{i,k}}{\|L_{\text{fwd}}r_{i,k}\|_{2}}\right)^{\top} \left(\frac{R_{\text{fwd}}r_{j,k'}}{\|R_{\text{fwd}}r_{i,k'}\|_{2}}\right). \tag{8}$$
78
+
79
+ For interactions with preceding images (i.e., j < i) we use
80
+
81
+ $$\Psi_{\text{bwd}}(r_{i,k}, r_{j,k'}) = \left(\frac{L_{\text{bwd}}r_{i,k}}{\|L_{\text{bwd}}r_{i,k}\|_2}\right)^{\top} \left(\frac{R_{\text{bwd}}r_{j,k'}}{\|R_{\text{bwd}}r_{j,k'}\|_2}\right). \tag{9}$$
82
+
83
+ <span id="page-4-3"></span>![](_page_4_Figure_2.jpeg)
84
+
85
+ Figure 4: Illustration of ISA. The attention selects the attended image representation per sentence. We model interactions between attended images of the same sentence to compute each image's importance. Note, each node represents a sentence attention belief over the attended images.
86
+
87
+ For interactions within the image (i.e., j = i) we have
88
+
89
+ <span id="page-4-2"></span>
90
+ $$\Psi_{i,i}(r_{i,k}, r_{i,k'}) = \left(\frac{L_{i,i}r_{i,k}}{\|L_{i,i}r_{i,k}\|_2}\right)^{\mathsf{T}} \left(\frac{R_{i,i}r_{i,k'}}{\|R_{i,i}r_{i,k'}\|_2}\right). \tag{10}$$
91
+
92
+ Note, $L_{\mathrm{fwd}}$ , $R_{\mathrm{fwd}}$ , $L_{\mathrm{bwd}}$ , $R_{\mathrm{bwd}}$ , $L_{i,i}$ , $R_{i,i} \in \mathbb{R}^{d \times d}$ are trainable shared weights across the entire image sequence. Also, the object from the sentence-corresponding image will always be on the left side of the factor equation. Thus, the factor embeddings preserve the order. **Local factor:** Differently from the previous interactions the following factor captures how important an object is based solely on the object representation. Given an object $r_{i,k} \in I_i$ , we define the local factor as.
93
+
94
+ $$\Psi_i(r_{i|k}) = v^{\top} \operatorname{ReLU}(Vr_{i|k}), \tag{11}$$
95
+
96
+ where $v \in \mathbb{R}^d$ , $V \in \mathbb{R}^{d \times d}$ are trainable weights.
97
+
98
+ In a next step we summarize the attended image representations $a_i^s$ produced by OIA to compute the context embedding $c_s$ for the sentence s that we wish to generate. For this we use the Image-Sentence Attention (ISA) unit. It picks the relevant image context for generating the specific sentence. Formally we obtain the context embedding via
99
+
100
+ $$c_s = \sum_{i=1}^{N} \hat{b}_{s,i} a_i^s, \tag{12}$$
101
+
102
+ where attention factors
103
+
104
+ $$\hat{b}_{s,i} \propto \exp\left(\hat{\alpha}_s \hat{\Psi}_i(a_i^s) + \hat{\alpha}_{s,s} \hat{\mu}_{s \to s}(a_i^s)\right),\tag{13}$$
105
+
106
+ and where $\hat{\alpha}_s$ , $\hat{\alpha}_{s,s} \in \mathbb{R}$ are scalars. To avoid spurious correlations between sentences, we consider only self interactions and a local factor. This is illustrated in Fig. 4. The self-message of the attended image representation $a_s^s$ is
107
+
108
+ $$\hat{\mu}_{s \to s}(a_i^s) = \sum_{i=1}^N \hat{\Psi}(a_i^s, a_j^s). \tag{14}$$
109
+
110
+ Finally, the self and local factors are defined with a different set of weights following Eq. (10) and Eq. (11) respectively.
111
+
112
+ The goal at each timestep of decoding is to compute the conditional probability $p(y_{s,t}|y_{s,t-1},c_s)$ where $y_{s,t} \in \mathcal{Y}$ is the t-th word in sentence $y_s$ , $\mathcal{Y}$ is the vocabulary and $c_s$ is the context embedding detailed in Sec. 3.2. For this we use a GRU recurrent unit, tasked with generating probabilities over the vocabulary conditioned on
113
+
114
+ the context embedding $c_s$ and the previously generated token $y_{s,t-1}$ : $p(y_{s,t} = w|y_{s,t-1}, c_s) \propto$
115
+
116
+ $$\exp(\beta_{s,t} \cdot g_w(y_{s,t-1}, h_{s,t-1}, c_s) + (1 - \beta_{s,t}) \cdot f_w(\phi_{s,t})), \tag{15}$$
117
+
118
+ where $g_w$ is the output of a GRU unit for the word w. We set the GRU hidden dimension to d. $h_{s,t-1} \in \mathbb{R}^d$ is the hidden state at timestep t-1 for sentence s. $f: \mathbb{R}^{|\mathcal{Y}|} \to \mathbb{R}^{|\mathcal{Y}|}$ is a learned prior over the vocabulary based on a bag-of-words prior histogram $\phi_{s,t}$ , which we describe in the next paragraph. The purpose of f is to reduce text repetitions. $f_w$ denotes the value of f for a word w. We also incorporate a calibration gate $\beta_{s,t}: \mathbb{R}^d \to [0,1]$ for functions f and g using
119
+
120
+ <span id="page-4-5"></span>
121
+ $$\beta_{s,t} = \sigma \left( v_{\beta}^{\mathsf{T}} \tanh(G_g h_{s,t} + G_f W_1(\phi_{s,t})) \right). \tag{16}$$
122
+
123
+ Here, $G_g \in \mathbb{R}^{d \times d}$ and $G_f \in \mathbb{R}^{d \times \gamma}$ are trained projections of the GRU hidden state and the bottleneck layer respectively, $v_\beta \in \mathbb{R}^d$ are learned weights and $\sigma$ is the sigmoid function. $W_1$ is obtained from the prior as discussed next.
124
+
125
+ <span id="page-4-4"></span>**Bag-of-words (BOW) prior:** Remembering history during story-telling permits to stay on topic and advance the story in the desired direction. Although quite intuitive, mimicking this ability is not trivial. *E.g.*, most approaches for VST generate all the sentences in parallel. Converting the parallel sentence generation into a sequential one implies a major computational overhead during training.
126
+
127
+ To address this, we propose a simple yet effective learnable framework that does not require sequential training while still exploiting information found in prior sentences. The history is represented via a bag-of-words histogram $\phi_{s,t}$ , which includes all words that have been used until timestep t for the s-th sentence. During training, we initialize $\phi_{s,t=0}$ with the ground truth history counts found in the previous s-1 sentences. We update the statistics at each timestep with the predicted word $y_{s',t}$ for s' < s, and produce the next state of the counter $\phi_{s,t+1}$ . At inference we generate sentences sequentially and update $\phi_{s,t}$ with the predicted words. $\phi_{s,t}$ is fed through a shallow bottleneck network to obtain the prior f, composed of two layers $W_1 \in \mathbb{R}^{|\mathcal{Y}| \times |\mathcal{Y}|}$ and $W_2 \in \mathbb{R}^{|\mathcal{Y}| \times \mathcal{Y}}$ without activation, where $\gamma$ is the bottleneck dimension:
128
+
129
+ $$f(\phi_{s,t}) = W_2(W_1(\phi_{s,t})). \tag{17}$$
130
+
131
+ Also note the use of $W_1(\phi_{s,t})$ in the gate (Eq. (16)).
132
+
133
+ **Intra-repetition regularization:** To regularize intra-repetitions, we decay the probability of previously used words during sentence generation. A critical aspect of this approach is to exclude words that appear frequently in the language (e.g., was, were, am). For this we pre-process the training set to calculate the average story frequency $\rho(w)$ of a word w via $\rho(w) = \frac{\# \text{ appearances of word } w}{\# \text{ stories } w \text{ was used}}$ . The final count for word w at timestep t is calculated as $\phi'_{s,t}(w) = \max[0, (\phi_{s,t}(w) - \rho(w) + 1)]$ . Intuitively, a word will not be penalized before it is used more than the prior belief average $\rho(w)$ . The final probability for word w being used is given by
134
+
135
+ <span id="page-4-6"></span>
136
+ $$p'(y_{s,t} = w | y_{s,t-1}, c_s) = \frac{p(y_{s,t} = w | y_{s,t-1}, c_s)}{\pi \cdot \phi'_{s,t}(w) + 1},$$
137
+ (18)
138
+
139
+ where $\pi \ge 0$ is a constant hyper-parameter. A penalty of 2 proved to work best on the validation set.
140
+
141
+ <span id="page-5-0"></span>Table 1: Quantitative results on the VIST dataset for METEOR, BLEU-1...4, ROUGE-L and CIDEr. The primary metric is METEOR. The 'Img Feat' column describes the pretrained image features. All models utilize a ResNet [13] backbone except CS&T which employs an Inception v3 model [36]. FC and Spatial refer to features extracted from the penultimate layer and the preceding one accordingly. F-RCNN are based on [1].
142
+
143
+ | Method | M | B-1 | B-2 | B-3 | B-4 | R | С | Img Feat |
144
+ |-----------------|------------------|------------------|------------------|------------------|------------------|------------------|----------|----------|
145
+ | CS&T[11] | 34.4 | 60.1 | 36.5 | 21.1 | 12.7 | 29.2 | 7.1 | FC |
146
+ | AREL[42] | 35.0 | 63.8 | 39.1 | 23.2 | 14.1 | 29.5 | 9.4 | FC |
147
+ | KS[45] | 35.2 | 66.4 | 39.2 | 23.1 | 12.8 | 29.9 | 12.1 | FC |
148
+ | HSRL[17] | 35.2 | - | - | - | 12.3 | 29.5 | 8.4 | Spatial |
149
+ | StoryAnchor[49] | 35.5 | 65.1 | 40.0 | 23.4 | 14.0 | 30.0 | 9.9 | FC |
150
+ | SGVST[41] | 35.8 | 65.1 | 40.1 | 23.8 | 14.7 | 29.9 | 9.8 | F-RCNN |
151
+ | SGEmb[15] | 35.6 | 62.2 | 38.7 | . 23.5. | 14.8 | 30.2 | 8.6 | F-RCNN |
152
+ | Ours | <b>36.8</b> ±0.1 | <b>68.4</b> ±0.7 | <b>42.7</b> ±0.3 | <b>25.2</b> ±0.2 | <b>15.3</b> ±0.2 | <b>30.2</b> ±0.1 | 10.1±0.2 | F-RCNN |
2110.03753/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2021-11-15T22:28:38.286Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/15.7.3 Chrome/91.0.4472.164 Electron/13.6.1 Safari/537.36" version="15.7.3" etag="cVRt3GnFrzZuoiK9y_tW" type="device"><diagram id="FvTzPOkBsGUhPqUCxIak">7Z1bc+K4Esc/DVVzXijrZsuPk+y5vGzVqZqHM29bTHASagmeIuQy++mPHCyCJQGOttVqkmS3ZgYHDPx/UkvdarUm4vLu+d/r2c/b39t5s5zwYv48Eb9NONdFZf7sLvzaXpC12l64WS/m20vs9cK3xV9Nf7Horz4s5s394Imbtl1uFj+HF6/a1aq52gyuzdbr9mn4tOt2OXzXn7Obxrvw7Wq29K/+bzHf3PZfSxWv1//TLG5u7Tuzov/N3cw+ub/F/e1s3j5tL708R/xzIi7XbbvZ/uvu+bJZdtpZXbY3+teB3+4+2LpZbca8gG9f8DhbPvTfbcKl+X/2x6S6eJzwy4dJ9dv2Uv+BN7+sCpvm2bzHxe3mbmkuMPPP+826/bO5bJft2lxZtSvzzIvrxXLpXJotFzcr8/DKfMrGXL94bNabhdH3a/+Lu8V83r3NxdPtYtN8+zm76t7zyTQmc23dPqzmTfcFCvNoPru/fXnQfYDrdrXpm0ttHvZfzty8eT4oENvJbppr0941m/Uv85T+BUIV075x9o1VVX3rfXpFX/Z8b/eoi/7arG9sN7t7v/Iw/+iRhPGIA3h+fOKxeKSDpyzw8MhjeB4MnscPjofXZUY8KoDHoTBUI6TXHqBjYu1zmnBxrbr/DkI1r9178vYHSPFqqLcofb2ZCgjO9N8XvDwtuHmBGaUPNc49sWf3P7dD9/XiuRP9mOC9rAM+EMalLIdaau1rKUNaSoDWW70vMbmdA2URUycV0+n7+qq5uvL6vvnND62kKoAaJ6sc0yqtUPuKhgQF0LNG1HOuGj2XIT01/9H10VR6Vnh62nvgNNDw4DTpDF73k3qE8oRmiEKzHEIfkvMwgDRCI1oIFnItP4zQmKYj5CR+FKFVgSh0yN0770mEUTSnLR7hoJ3ZLCIgKKbNTeuAEWmhmLY1rRNGo4Wi2tAxjthq/rUL0k92vuieiMOAzfa1zdwJ19+3D+ur/lL/hpvZ+qZxvsgI0fYkCQVN7LV1s5xtFo/DTxHSqX+H/7YL88bHJhDaEXv7lfrXvert3Ur4/cUNpW2l8G71gm731cfRHOMGJqc5vgvg0BSFh0BF0vTjp5LrabH3w1Ox5WM8UlC2dYCtoMZW6KkqStPNypKVqpZDOCWb1jWriqpi5q/dBOfNnbjwsAss7GP84+TYOTXsptfJuiqYFrqWRT2MC0tdGexlwQtRV7XWMhJ7oLdXyWy35YoH2g67A9KSGOnAUMxjjbc8eStAnGOiALA4bebFPk5FDac+OR0ajVMhzqyCS8tpcfIAzpIaTp9BFWtsy4OeegKcYyIdsDhFAGdFDafpnbLe/ezywnbjaixbObxv5TpXzn0BQY+JwKTvt+RG1c4M781eC3dchAFd1SfuCwh6TGQo/fSJXo+up7yujP9jRkfzR+n2PDZlyvRIXptfF8paqbdzV9OyrrrE0c7UVZV2uOuplpKVQncuW1knawXo4aygXSc56zrc3VWsX3yiu3v3BQSNHukKTq/Jzcfq46Cj7bo6DjqdXRdjwl7REf/5Ym0uLNouS/OpuX9JAG3Xi7/a1WZmX5QnmyhrroBImpQRITpSylHWvAHBiYmeN7dAMp5xKVwkTeJ4DzAwzVHSRI/3AAPTTCVNEnkHMDCX70XSBBOysyPpxwoxx4akSShkZ0cB0THHgKQ7Bs6ppWPa+qTbCs6opWPadDnG4x0f2vAk3d7saKzDOt37sQ7rnpCJdSg/rSc2SUthLiVK2IwOML7UUnuUAEvbUjxb2pYc49Unph2IXApqIWpVYiRyKZEtkUvCJoSANQRqS5NKYaR2BSxCutQuCZs8EoWeB9BTW70IjOixuV7ydNoYIF/YbJIovoFlSEFtMVr5W3pjk7/k6TwyQL6wSSRRfGWArybGNwAlNhtM1ojZYBI2dySKrwrwrYnx7frvfhoXGwKKzQ7rbHWe7DAJmy4C1rHJDcxsmFYw5BObVOBwR0wWk7DZI2ATMnod3skeczomTPKYGSQGyWOZcscUgbhbaBQgOYs7ZAyiU8lOGIOEqWSKQDwuNHunNrtT/Bj36EFA58osUxyU+xjMgbArNcpi2A+dKZ67vjF6A6Ufy03nrin0rVehwBq1sVzoY2Bj4yzCXzlJF2dRsHG0YHlNjzPpfRysLpwOGpvg61UDTEcRNloWQZHa2odHMTYq5t0oXcxEwcbEIihSmxe74qsiti9yMXViIKqQ03SOD2z4KwIltXiH14/qyA7ZFfcxbq003k1nm3Xh+La6mBaFrErdVTSQMh1i2EDXOMSDGRI1xLtckd3IKadax1FW7MQoDAgSNnIVAZLavNYF2ZlKKJCeCYcDaWus5wNJbfwMrB7FBhW8DpnO2Sxho0cRHKlFEAJLdwKKozsOA3LkHsfvXx7+4bGkf0zEy8P+A9vG+bcKFMoDriHCiRGlH8r5/uVxDJVzVVuyjGpD7rPadgt7HFMgl3sbRobJ2GbebC6Qr+2LdsS2jdYMcjtUXs04mmaQu5byaibQNIPcdJRXM11jaQa5ZyivZjWaZpBbfrJqpgoszaoxvhxJzZRd0bDtDE8zyBITeTVjaJr5Psq5asbRNIMs5JBXs9ABXWk0O1s/wNOsRtPsbP0AVzNVoGkW8gOkPe3287zOl3pejt1kHptU0YAq5HHIz9NUj9BReLGaKuTbyM+jiF/pCKXy0Ql5UfLzHO99Ogcy0BDoaEh/jfjhjpoPZBaI1UPs/qB3dPqVsMvfOeSE9P5onH0l6oytM+QYdvb47o9vH9kwc61OMgkeCw3BJOR4bofNxUdmIiTLxyTk2G4nmh+byQjblYxJ2nMl0c+iF6VyC27Zmgo4p9GnPVYSXU9W13n1TFqsL8/Mjw19fWV9f4ypStIyfHlmfo6cJceTs/5A7l/ltNoSUWZU9y/3OejZjIOdhnxAmVGNRtKC8LRlVogyJy31nuvE9Hw2OGmx9lznpeeztWn9LgqtE9WmpnW7KLROVNuJfviZDhz/bmfXZPajeMOZ3fn05loHruXRzo3gdqPUYzy+9CjHN3+kGhbOgaRlEYmSO2VllVY4VYBZMcb5hCVbB8hSK/YtXsowHyr/XDI9KP/s4BndgQuHeo1GHXaXYCx1To0676o5H6r1XEo5rPUM09d3M5wUnDk2ZzvcDjhTK1PjxUeqWLstT9wIkiV6GSk73g5YUitWs1ssOTQLesOZosdvBMkStnLUGJY8wJJaEQwXQWmrkr6ZZXnAKU/BErZ+1BiWgfKM9gvSYanL6Wt5t1q7U2UZi1YO7+tUJvLuC0katsZUbK8lN5rqan/a6h3yrGFIe6U43ftCkoYtQTWGdGjeRK9P62m1X3/Z6XvGaRoUYI7lrqZsr/6yU4C5841RCjCzAj2AFbTsBGdcx/p7WTn9Eqi/e/eFJI0e3wrOrcnNx+rjpKMtuzpOOqFlZ2PiXe/v3E6RMSOAsaSZF2SP7XQ1x1zKYowT0zz7sdjD9o+46M1Y0lyN82eBaouSJnScPQtcG5U0G+T8WSCu1TOWNJWE7LxIuoFC1HEhab4J2XmRpzmq/U+6FeBs2jmunU+6X+Bs2jmqPedj/Ny0p0jtfO1BbX07yJEJcih3ZSI2H0shLiDyMS51FrzUMnmUcPDG5mgpnitHy7LNCjsQr2TkTq5RJULalhK50rY4bAoIYEOgtiZpbHH6TC7XIqTM5OJj4hap0fMQemqLFu54Hp3cJU9liUHihU0iicMbWHxk5I61Uu7G59h8L3kqcQwSL2zmSBxeGcJL7rQO7c7GI0dpWR+IMqTAC5suEoc3cFbzzvMmg1exYfbWsI5MdFKYrLIlhXHYJBHArk1uYGbDbALH+sKgx8wS42PiWlnmZPS6vZM45th4kLwxWQ3zxnKljQkKobfgaEBwLnfYIkTnkZ2wCCnzyOzB2/Sm8dTmeYofQx89GOhsiWV2noqXQhgMxWhqoMWwNw4nfLvl1jdvnRR4zptA33gVDK1raoO60MfIxkZdhLuKkjDqImCDam8/aZrcXg6mvUp9scNwoOifu7IGiRI2gBaBktpaSABlbKgscKuE0RQBGyyLQEltnuzrX5aRvdIY2Cl378WmCb0h2NhYBE1qcZAATRnZMc1LzRCspKyq7pR3XQx93lJ0k2zjD2kuuLYlV1NAhg2Cvf3UaXrRLln5IymLPwm+ODkuQ+KEDWzF4KQ22fVxdmYTDqdn0AFxStiAVAxOaiOqv/gXOZ4GOmZCV9TW3c9Iklp8wV/nAyPpDcuQJLlH8vuXhzEnyVM7uwD6xHkuDvqNCGcxMelHe75/eRwD5lwFl74JQxUcckcW7gnP/hQvtH3E1+2IkRsv29ke8BmQrcKTDXKfU27ZNJ5skFuVMstW2jaAIBvkbqPcsik82SA3DOWWrUSTTY3x80jKppjwRlI82cY4VeciW4Unm+/BnK9sGk82yGIQmWUrQ6d4JZLtbL2EgGwKT7az9RICspV4soW8BGnPy/48a747MM23odrDw3dP2iekIQj5DsnXMyPBFBQKzzFkfk9JFr9Rvo9z8VFJ2JfsUk39aUWIg128fAMH83Ddtpv9ALT5xre/t/Ome8b/AQ==</diagram></mxfile>
2110.03753/main_diagram/main_diagram.pdf ADDED
Binary file (51.8 kB). View file
 
2110.03753/paper_text/intro_method.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Graphs are permutation invariant, combinatorial structures used to represent relational data, with wide applications ranging from drug discovery, social network analysis, image analysis to bioinformatics (Duvenaud et al., 2015; Fan et al., 2019; Shi et al., 2019; Wu et al., 2020). In recent years, Graph Neural Networks (GNNs) have rapidly surpassed traditional methods like heuristically defined features and graph kernels to become the dominant approach for graph ML tasks.
4
+
5
+ Message Passing Neural Networks (MPNNs) (Gilmer et al., 2017) are the most common type of GNNs owing to their intuitiveness, effectiveness and efficiency. They follow a recursive aggregation mechanism where each node aggregates information from its immediate neighbors repeatedly. However, unlike simple multi-layer feedforward networks (MLPs) which are universal approximators of continuous functions (Hornik et al., 1989), MPNNs cannot approximate all permutation-invariant graph functions (Maron et al., 2019b). In fact, their expressiveness is upper bounded by the first order Weisfeiler-Leman (1-WL) isomorphism test (Xu et al., 2018). Importantly, researchers have shown that such 1-WL equivalent GNNs are not expressive, or powerful, enough to capture basic structural concepts, i.e., counting motifs such as cycles or triangles (Zhengdao et al., 2020; Arvind et al., 2020) that are shown to be informative for bio- and chemo-informatics (Elton et al., 2019).
6
+
7
+ The weakness of MPNNs urges researchers to design more expressive GNNs, which are able to discriminate graphs from an isomorphism test perspective; Chen et al. (2019) prove the equivalence between such tests and universal permutation invariant function approximation, which theoretically justifies it. As k-WL is strictly more expressive than 1-WL, many works (Morris et al., 2019; 2020b) try to incorporate k-WL in the design of more powerful GNNs, while others approach k-WL
8
+
9
+ ![](_page_1_Figure_1.jpeg)
10
+
11
+ Figure 1: **Shown**: one GNN-AK<sup>+</sup> layer. For each layer, GNN-AK<sup>+</sup> first extracts n (# nodes) rooted subgraphs, and convolves all subgraphs with a base GNN as kernel, producing multiple rich subgraph-node embeddings of the form $\mathsf{Emb}(i \mid \mathsf{Sub}[j])$ (node i's embedding when applying a GNN kernel on subgraph j). From these, we extract and concatenate three encodings for a given node j: (i) centroid $\mathsf{Emb}(j \mid \mathsf{Sub}[j])$ , (ii) subgraph $\sum_i \mathsf{Emb}(i \mid \mathsf{Sub}[j])$ , and (iii) context $\sum_i \mathsf{Emb}(j \mid \mathsf{Sub}[i])$ . GNN-AK<sup>+</sup> repeats the process for L layers, then sums all resulting node embeddings to compute the final graph embedding. As a weaker version, GNN-AK only contains encodings (i) and (ii).
12
+
13
+ expressiveness indirectly from matrix invariant operations (Maron et al., 2019a;b; Keriven & Peyré, 2019) and matrix language perspectives (Balcilar et al., 2021). However, they require O(k)-order tensors to achieve k-WL expressiveness, and thus are not scalable or feasible for application on large, practical graphs. Besides, the bias-variance tradeoff between complexity and generalization (Neal et al., 2018) and the fact that almost all graphs (i.e. $O(2^{\binom{n}{2}})$ graphs on n vertices, Babai et al. (1980)) can be distinguished by 1-WL challenge the necessity of developing such extremely expressive models. In a complementary line of work, Loukas (2020a) sheds light on developing more powerful GNNs while maintaining linear scalability, finding that MPNNs can be universal approximators provided that nodes are sufficiently distinguishable. Relatedly, several works propose to add features to make nodes more distinguishable, such as identifiers (Loukas, 2020a), subgraph counts (Bouritsas et al., 2020), distance encoding (Li et al., 2020), and random features (Sato et al., 2021; Abboud et al., 2021). However, these methods either focus on handcrafted features which lose the premise of automatic learning, or create permutation sensitive features that hurt generalization.
14
+
15
+ **Present Work.** Our work stands between the two regimes of extremely expressive but unscalable korder GNNs, and the limited expressiveness yet high scalability of MPNNs. Specifically, we propose a general framework that serves as a "wrapper" to uplift any GNN. We observe that MPNNs' local neighbor aggregation follows a star pattern, where the representation of a node is characterized by applying an injective aggregator function as an encoder to the star subgraph (comprised of the central node and edges to neighbors). We propose a design which naturally generalizes from encoding the star to encoding a more flexibly defined subgraph, and we replace the standard injective aggregator with a GNN: in short, we characterize the new representation of a node by using a GNN to encode a locally induced encompassing subgraph, as shown in Fig.1. This uplifts GNN as a base model in effect by applying it on each *subgraph* instead of the whole input graph. This generalization is close to Convolutional Neural Networks (CNN) in computer vision: like the CNN that convolves image patches with a kernel to compute new pixel embeddings, our designed wrapper convolves subgraphs with a GNN to generate new node embeddings. Hence, we name our approach GNN-AK (GNN As Kernel). We show theoretically that GNN-AK is strictly more powerful than 1&2-WL with any MPNN as base model, and is not less powerful than 3-WL with PPGN (Maron et al., 2019a) used. We also give sufficient conditions under which GNN-AK can successfully distinguish two non-isomorphic graphs. Given this increase in expressive power, we discuss careful implementation strategies for GNN-AK, which allow us to carefully leverage multiple modalities of information from subgraph encoding, and resulting in an empirically more expressive version GNN-AK<sup>+</sup>. As a result, GNN-AK and GNN-AK+ induce a constant factor overhead in memory. To amplify our method's practicality, we further develop a subgraph sampling strategy inspired by Dropout (Srivastava et al., 2014) to drastically reduce this overhead $(1-3 \times \text{ in practice})$ without hurting performance. We conduct extensive experiments on 4 simulation datasets and 5 well-known real-world graph classification & regression benchmarks (Dwivedi et al., 2020; Hu et al., 2020), to show significant and consistent practical benefits of our approach across different MPNNs and datasets. Specifically, GNN-AK<sup>+</sup> sets new state-of-the-art performance on ZINC, CIFAR10, and PATTERN – for example, on ZINC we see a relative error reduction of 60.3%, 50.5%, and 39.4% for base model being
16
+
17
+ GCN (Kipf & Welling, 2017), GIN (Xu et al., 2018), and (a variant of) PNA (Corso et al., 2020) respectively. To summarize, our contributions are listed as follows:
18
+
19
+ - A General GNN-AK Framework. We propose GNN-AK (and enhanced GNN-AK<sup>+</sup>), a general framework which uplifts any GNN by encoding local subgraph structure with a GNN.
20
+ - Theoretical Findings. We show that GNN-AK's expressiveness is strictly better than 1&2-WL, and is not less powerful than 3-WL. We analyze sufficient conditions for successful discrimination.
21
+ - Effective and Efficient Realization. We present effective implementations for GNN-AK and GNN-AK<sup>+</sup> to fully exploit all node embeddings within a subgraph. We design efficient online subgraph sampling to mitigate memory and runtime overhead while maintaining performance.
22
+ - Experimental Results. We show strong empirical results, demonstrating both expressivity improvements as well as practical performance gains where we achieve new state-of-the-art performance on several graph-level benchmarks.
23
+
24
+ Our implementation is easy-to-use, and directly accepts any GNN from PyG (Fey & Lenssen, 2019) for plug-and-play use. See code at https://github.com/GNNAsKernel/GNNAsKernel.
25
+
26
+ # Method
27
+
28
+ We first introduce our setting and formalisms. Let $G = (\mathcal{V}, \mathcal{E})$ be a graph with node features $\mathbf{x}_i \in \mathbb{R}^d$ , $\forall i \in \mathcal{V}$ . We consider graph-level problems where the goal is to classify/regress a target $y_G$ by learning a graph-level representation $\mathbf{h}_G$ . Let $\mathcal{N}_k(v)$ be the set of nodes in the k-hop egonet rooted at node v. $\mathcal{N}(v) = \mathcal{N}_1(v) \setminus v$ denotes the immediate neighbors of node v. For $\mathcal{S} \subseteq \mathcal{V}$ , let $G[\mathcal{S}]$ be the induced subgraph: $G[\mathcal{S}] = (\mathcal{S}, \{(i,j) \in \mathcal{E} | i \in \mathcal{S}, j \in \mathcal{S}\})$ . Then $G[\mathcal{N}_k(v)]$ denotes the k-hop egonet rooted at node v. We also define $Star(v) = (\mathcal{N}_1(v), \{(v,j) \in \mathcal{E} | j \in \mathcal{N}(v)\})$ be the induced star-like subgraph around v. We use $\{\cdot\}$ denotes multiset, i.e. set that allows repetition.
29
+
30
+ Before presenting GNN-AK, we highlight the insights in designing GNN-AK and driving the expressiveness boost. **Insight 1: Generalizing star to subgraph.** In MPNNs, every node aggregates information from its immediate neighbors following a star pattern. Consequently, MPNNs fail to distinguish any non-isomorphic regular graphs where all stars are the same, since all nodes have the same degree. Even simply generalizing star to the induced, 1-hop egonet considers connec-
31
+
32
+ tions among neighbors, enabling distinguishing regular graphs. **Insight 2: Divide and conquer.** When two graphs are non-isomorphic, there exists a subgraph where this difference is captured (see Figure 3). Although a fixed-expressiveness GNN may not distinguish the two original graphs, it may distinguish the two *smaller* subgraphs, given that the required expressiveness for successful discrimination is proportional to graph size (Loukas, 2020b). As such, GNN-AK divides the harder problem of encoding the whole graph to smaller and easier problems of encoding its subgraphs, and "conquers" the encoding with the base GNN.
33
+
34
+ We first take a close look at MPNNs, identifying their limitations and expressiveness bottleneck. MPNNs repeatedly update each node's embedding by aggregating embeddings from their neighbors a fixed number of times (layers) and computing a graph-level embedding $\mathbf{h}_G$ by global pooling. Let $\mathbf{h}_v^{(l)}$ denote the l-th layer embedding of node v. Then, MPNNs compute $\mathbf{h}_G$ by
35
+
36
+ $$\mathbf{h}_{v}^{(l+1)} = \phi^{(l)} \left( \mathbf{h}_{v}^{(l)}, f^{(l)} \left( \left\{ \mathbf{h}_{u}^{(l)} | u \in \mathcal{N}(v) \right\} \right) \right) \quad l = 0, ..., L - 1 \; ; \quad \mathbf{h}_{G} \quad = \text{POOL}(\left\{ \mathbf{h}_{v}^{(L)} | v \in \mathcal{V} \right\}) \quad (1)$$
37
+
38
+ where $\mathbf{h}_i^{(0)} = \mathbf{x}_i$ is the original features, L is the number of layers, and $\phi^{(l)}$ and $f^{(l)}$ are the l-th layer update and aggregation functions. $\phi^{(l)}$ , $f^{(l)}$ and POOL vary among different MPNNs and influence their expressiveness and performance. MPNNs achieve maximum expressiveness (1-WL) when all three functions are injective (Xu et al., 2018).
39
+
40
+ MPNNs' expressiveness upper bound follows from its close relation to the 1-WL isomorphism test (Morris et al., 2019). Similar to MPNNs which repeatedly aggregate self and neighbor representations, at t-th iteration, for each node v, 1-WL test aggregates the node's own label (or color) $c_v^{(t)}$ and its neighbors' labels $\left\{c_u^{(t)}|u\in\mathcal{N}(v)\right\}$ , and hashes this multi-set of labels $\left\{c_v^{(t)},\left\{c_u^{(t)}|u\in\mathcal{N}(v)\right\}\right\}$ into a new, compressed label $c_v^{(t+1)}$ . 1-WL outputs the set of all node labels $\left\{c_v^{(T)}|v\in\mathcal{V}\right\}$ as G's fingerprint, and decides two graphs to be non-isomorphic as soon as their fingerprints differ.
41
+
42
+ The hash process in 1-WL outputs a new label $c_v^{(t+1)}$ that uniquely characterizes the star graph Star(v) around v, i.e. two nodes u,v are assigned different compressed labels only if Star(u) and Star(v) differ. Hence, it is easy to see that when two non-isomorphic unlabeled (i.e., all nodes have the same label) d-regular graphs have the same number of nodes, 1-WL cannot distinguish them. This failure limits the expressiveness of 1-WL, but also identifies its bottleneck: the star is not distinguishing enough. Instead, we propose to generalize the star Star(v) to subgraphs, such as the egonet $G[\mathcal{N}_1(v)]$ and more generally k-hop egonet $G[\mathcal{N}_k(v)]$ . This results in an improved version of 1-WL which we call Subgraph-1-WL. Formally,
43
+
44
+ **Definition 3.1** (Subgraph-1-WL). Subgraph-1-WL generalizes the 1-WL graph isomorphism test algorithm by replacing color refinement (at iteration t) by $c_v^{(t+1)} = \text{HASH}(Star^{(t)}(v))$ with $c_v^{(t+1)} = \text{HASH}(G^{(t)}[\mathcal{N}_k(v)]), \forall v \in \mathcal{V}$ where $\text{HASH}(\cdot)$ is an injective function on graphs.
45
+
46
+ Note that an injective hash function for star graphs is equivalent to that for multi-sets, which is easy to derive (Zaheer et al., 2017). In contrast, Subgraph-1-WL must hash a general subgraph , where an injective hash function for graphs is non-trivial (as hard as graph isomorphism). Thus, we derive a variant called Subgraph-1-WL\* by using a weaker choice for HASH( $\cdot$ ) – specifically, 1-WL. Effectively, we *nest* 1-WL inside Subgraph-1-WL. Formally,
47
+
48
+ **Definition 3.2 (Subgraph-1-WL\*).** Subgraph-1-WL\* is a less expressive variant of Subgraph-1-WL where $c_v^{(t+1)} = 1\text{-WL}(G^{(t)}[\mathcal{N}_k(v)])$ .
49
+
50
+ We further transfer Subgraph-1-WL to neural networks, resulting in GNN-AK whose expressiveness is upper bounded by Subgraph-1-WL. The natural transformation with maximum expressiveness is to replace the hash function with a universal subgraph encoder of $G[\mathcal{N}_k(v)]$ , which is non-trivial as it implies solving the challenging graph isomorphism problem in the worst case. Analogous to using 1-WL as a weaker choice for HASH(·) inside Subgraph-1-WL\*, we can use use any GNN (most practically, MPNN) as an encoder for subgraph $G[\mathcal{N}_k(v)]$ . Let $G^{(l)}[\mathcal{N}_k(v)] = G[\mathcal{N}_k(v)|\mathbf{H}^{(l)}]$ be the attributed subgraph with hidden features $\mathbf{H}^{(l)}$ at the l-th layer. Then, GNN-AK computes $\mathbf{h}_G$ by
51
+
52
+ $$\mathbf{h}_{v}^{(l+1)} = \text{GNN}^{(l)} \left( G^{(l)}[\mathcal{N}_{k}(v)] \right) \quad l = 0, ..., L - 1 \quad ; \quad \mathbf{h}_{G} = \text{POOL}(\{\mathbf{h}_{v}^{(L)} | v \in \mathcal{V}\})$$
53
+ (2)
54
+
55
+ Notice that GNN-AK acts as a "wrapper" for any base GNN (mainly MPNN). This uplifts its expressiveness as well as practical performance as we demonstrate in the following sections.
2203.13560/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-11-16T01:53:49.690Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" etag="OJ5IbS-GgWtXnJfoQwtF" version="15.7.3" type="google"><diagram id="4O_1LjlPmK35i5Lr9BFW" name="Page-1">7V1Zc9s4Ev41qtndKql4H4+242Nq4zizzmQyjxQJSUwoUkNRlpxfvwAvkQB4SAJAKTZT5YggBFJ9fGg0upsj9Wa5u4+d1eIx8kAwUiRvN1I/jBRFlgwb/odaXrMWXTWzhnnse3mnfcOz/xMU38xbN74H1rWOSRQFib+qN7pRGAI3qbU5cRxt691mUVC/68qZA6Lh2XUCsvUv30sWWaulmPv2B+DPF8Wd5eIHL52ic/5L1gvHi7aVJvV2pN7EUZRkn5a7GxAg4hV0yb5313C1fLAYhEmfL6hfl9by++/fzD/tZ+0v/+6/Hx5exrKqZOO8OMEm/8n54yavBQ3iaBN6AA0jjdTr7cJPwPPKcdHVLeQ6bFskywCeyfBjHCVO4kchPB3bqP/MD4KbKIjidDAV6LImpe1RmFTaZ+kB29dJHP0AlStSepRXCjagm61A7C9BAmL0PH44zx+RJE1OrRcQJ2BXacpJdQ8iOEz8CrvkV1XVyr6Sy62imBNTz5q2e0GQ1axpUZGB/ItOLnnzcuw9c+CHnD+H8ErSunkFPCi++WkUJ4toHoVOcLtvva5zc9/nYxStcrJ+B0nymuuis0miOocR4+6cpR8g0tw7sbOMQjjcNdj5ybe8D/r8N7rFRM/PPuzyO6Ynr12cQr+jlU8xCKCcvdTVlEbz/KufIx/eouSvZmsTqXLIdW5Len3AdbSJXZCPUVUybFi9EIimgRInnoOEGCgViPLXHS8jqjyERAzGRbkA19cCfrXj+CabHQNx5luBT3vNXvvJJgNSyM3TIBlDYM8B1sylIa3hWmA6YwOgRp2gtkZgZ9Gjip2axAk8NeMssBPSM379ViAhOqmgJDrdw2R6VuBkga1SFVuryKrXoFUpzj4XE2QX3maK0kLAnFyZHrQRWhej+qWoHKj6rDSWZjcZAaTM9bQmVcY/G2ThpbPmOG++QoMnTuC7++vw0xz9f10MA59qWjQWbevNCm/y8QZ0I2TpOm79Ab5AQYBAIn0CW/j3f9HSCcm7ryt3zwaqDw6biRvCttpzYVoF7d4V+uhupqAbq6aZhn2clg2O+2Oe6t3TJgn8EOTtnhP/eEJWX5LqyETS641K2to6O/UHM3x6oKCZTkEz1eCEZqp6Fmh2PJqofdGENZicRnZylgbgB4h/Q3q1itYZPLGcq4Hs6cCkzdW2YaqOwWWuVixj4Mn6PBY6x07Wx6uF1lctmNvXp/GrcS70m+fCbS5JaDKcIrdM61RIzDrrlRO2TLQpT9HQ1kRFx2pHji9sVt0xmFX3rekPf0OTLQ2OaLMtPzgyzwKOjocVoy+sWFxs96s4dl4rHVbIJl83m/aqjAmAjHk2D+sPP2RPwNT+J9eTLtRssEPP5ZZLgbgZWPTrm4/PIx1+U/odEWeJ4AWAALkxIU8dCIjSZDKhYwRDE2Omo38jwiGbHVQ3QXpwMT1kSyJ0vcSDqrIbvJTd6tb1kwy6493fDKitKeZE0esUN0mKmzJJcJkXwW2+BJ85hkUjOLarIJDgFBEXSvBiYG4Un7mubdMoqxqqrXr8KGsPTVnSjcuYsrOZZQ1AWUUamrI0dxtDyroemFrTISgrD03ZHq6kkyjrAp0+sfGmrDI0ZXt4MU4yJdJjCMqqQ1NWZ0xZTnTSBqaT1WM+GjBgALXnX5KLoIHU3VYGDRDbXP0DCDoXxXJu6XeuipXcQh14VWzgbhG9fVVM9Fc7+tut/fmsomVyuXXloZUvXOo6S6Sl2V/Y8imKl4NruSFZVCbU1qsKZb2q8NJxhbTmL4qCMgUkxVLQviyUZLhD1L3pr3KBPhJ7cKHQMWY3hN8ciqG6Vcc4VTJaMRHvL3dhbsPv4IqhCmnB78YxbLhKkrBF3+XDLU4hu5EaPnHZ/bYjuQGEYV4QHpwS3dWNB4KCe3QV8+wcG9dn4C4i3G/MClgM+gM3Agve3+oAlgYnIl9gIZdZaz7AIiYkEQeWMqB6KGAxe3i1zghp9uszs/8CjXBhAsOlctoz7Wm7270bngxB5krBJ9ylfzA84UGMBvYsjOCJuI8tAj4oW6Rj5tBxikAdsmpRqQQcDjrOI0TqSCNlZrmAzrSppWv6iSjAx19DKJUpmUJsC+I+Zrut0NGfk7KT27hf+NgKp8jOAQpvYiu5oRVeJRX+5unx9gtB2zQGpUZAJ/DnKB/PheRAmQDXiAy+6wRX+YWl73kZHoC1/9OZpkMhQuaiCcfVr1GoChwLQsA6V2yCB2GEIsZqDMubGHBExlKryjXBYBwhzeO3zZHCmzIcR2hJPljc1XbhJGm89jaO0kirrQ+fFf6GaAP/3j499wq4+lVZqlt1lpqkjslifS/nkenQZOc0kpz7EsSwJ3b1qGsivqXYe0HSOqxi8rFwiK2qDgunoz8fC0elRQG3BHW2Ak2+NRNO19nezDvkkLLbFQLKDXOK7IqaQ93lYcuKcairOqYuQzvUSU26Dd3IA2i9UMmizBThlp4Q2ciFw2pU0EpUiHEpyHjos02WlyhDQ2pZhbzYol/oXNudVpVTtjvUg/n+xmlARHPAcwEiMQ54HIgGd8Dr5+1F2ycg7nMO/x5VkhPZJyD2DYrilOZ/aqpQAaqNqUL4hr4hwHLUe4QrnqGUSWcgZXxWUycnpHWsT3ApE7I+0WiZS5gtxSe99ea49NZmY+5XXeqoEiYXlLQKoaFvGi35ioFEsCmF8tBWCoVMCN9nS1ermNCTqGnyWBfdNyKSCr7PIZOrEFWoSUbLTmsrJuBmNEGCFM+n/5LSNSS8t1T59O+Rks1YElF6QMv6pRcDOH+CeLwuigmi62EUL52g7ILkYJyzHl3OuV+77MN5OsyHl1DhgerFJHbC9QxF4ubDQ04WHbZR7NXvXv06VthAVtpqGrBS3x6aWNGwXafidc8Pb1TxaJsnYhWPEgat314GPw52GRzAJwvfdhx4ztaVdz714JMqKcPyyaDw6ZxWfSyrtXav+ESVdcULgmpYCba+G2F4rGgJz6IKgpI+8mcQzEap7w+SPy0MeooPnIHKEclQtOIdYnWuRzGJNxoVZwgK3TdxocC3ollF3MvY3rTWFRVnndZfSFEhmqeQh8uGsPAXB5Ydze/eWX5NlmmrlHn3iuBXXj5wNIP0Ykul3FVUCUwWuqwwaev5M4Jkdlu/3buLgtITCHc1ztze0UA4Bkp8sPxQ/zqePXVwf0kAlpuU9bQYf/yXN+GPJ4wpfqCqSZQqXnbZNNQK0zyPupS908dY72YPEM7RMx4RD9HpC7im0oHcrIxnPCKsozYI3l/uMJ51qbU/J8DtUbrxAtTh6G33bnUQVc2hoZLM4W/TwcVUN89SHQrfY191wPpzUofL9oU0GuLd5TkvBu7xtFxevhLjQPkmnktEKSjrvGPxuFszlyO1eBkFTnmvxANLHVLb3p+T1DannHFe5T29r/LYrvJQTfM6GpOuM6HrO+vC1ncsyzeKRkMZf3vEsWhYvBqKMxgW+2q9QVoRAIYyaXLeASh6inQXxVsn9gjxHXrLUqZE3YjdsrQGC9t9+u19BmE9g2CxQsrAmy82LaOLcTAlHpF4wYGWYoIpr3putbojXruhXblhHDWEKHhFZoBpFA3hlvNo01ad7xpyGRoy/wU1xJRsTEPIOUSohsjM32NQSb8eU/OvB31lj6opE6XGAkjwCaUOij0xSTZY3LjA+p0HnVxofI0PW2prExt/JYo9oWahFGwRQ+8eLwxnTO+ml/uwpXfdRFUk6j427f2K/Ejdw8/BlNTNb/thSmpDRoVS60hCSR6wJrpIYvfYFmJL7MYXADEltilPcFrLE4MSoaGX8i6E3mqPtwucRm++dLUh7JoYYbVJkSxUnw+LYHsxhD3v/aIyv2IiSdaommOhKe1ZFujkM4ih5Zy6TBjl2ovOvFDlIzMvVHzzsTCIRWVeUKoIPkKypyuvZ6h7YAzChRO6qWOzLFw0pCvsBK8XZmLf3VlSaXoXmiMzwJEiG6qQDo0EkDK6VIhrTKbUc3v0dylTn+HKOAFz9PgfgROH6St58ZJtvwbDb24kPgxXi9o6LRwXW0pSVptd7bn/wi0Js/dRQMPcMKYq6bZ43ASJP75z3CSKx84WEguO8AHkeHCGZQUuQGp0w8akhpJSZnGSGj153D16P2++m/c/Puhf/1nOV4ux0igz7+UI3ssR8FVYgeUIKCHuIisSUFVPwObVu+pxcs2/q17/OU+VzkvvmrfEzr0oFPNEVSLyYk0XVRbZrG9E3GnJVJRXOQiV+Oa06zOQ+L6Q+y7xZyrxOMCLFPc/r1+iK/uP3cv8Pnz45D1Ysfw7FeB5bgHcpgdBdNauaksr9w1LV7UxIbcAZJ3TjssX/48kup3efx5fe2tt/Mn/Gnnj5jRgziUdVkJx472kQ0tECCHEFFFvlmsVi1m1xJVopEp0s0uEELPeIv4AgheAeFaVrB6yqNFk8T8HidfbCGw9TQSt/V5qWViE5sFnIoXwNI4QQ/fbOnDKWTxGHkA9/g8=</diagram></mxfile>
2203.13560/main_diagram/main_diagram.pdf ADDED
Binary file (70.3 kB). View file
 
2203.13560/paper_text/intro_method.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Empathy is the ability to perceive what others feel, think in their places and respond properly. It has a broad application scenarios to endow machines with the ability of empathy, including automatic psycho-therapist, intelligent customer service, empathetic conversational agents, and etc .
4
+
5
+ In this work, we focus on a special kind of human-computer empathetic conversation, i.e., emotional support conversation . Distinguishedly, emotional support conversation happens between a seeker and supporter, where the supporter aims to gradually reduce seeker's distress as the conversation goes. This makes existing approaches unsuitable for our setting for at least two reasons. Firstly, existing work on emotional chatting learns to predict user emotion using a conversation-level emotion label, which is coarse-grained and static to the conversation context . However, emotion is complex and user emotion intensity will change during the developing of the conversation . It is thus a necessity to tell seeker's fine-grained mental state at each utterance. Secondly, most of empathetic chatbots are trained to respond emotionally in accordance with the predicted coarse-grained emotion class, without consideration on how to address the seeker's emotional problem . Hence, they are deficient to apply for emotional support conversation whose goal is to help others work through the challenges they face.
6
+
7
+ [!h]
8
+ \centering
9
+ \includegraphics[width=0.45\textwidth]{figures/intro-example.png}
10
+ \caption{An Emotional Support Conversation Example.}
11
+
12
+ To tackle these issues, we propose a novel approach MISC, a.k.a. MIxed Srategy-aware model integrating COMET for emotional support conversation. As to the first issue, we introduce COMET, a pre-trained generative commonsense reasoning model , and devise an attention mechanism to selectively adopt the COMET knowledge tuples for fine-grained emotion understanding. As shown in Figure , this allows us to capture seeker's instantaneous mental state using different COMET tuples. In addition, we propose to also consider response strategy when generating empathetic responses for the second issue. Instead of modeling response strategy as a one-hot indicator, we formulate it as a probability distribution over a strategy codebook, and guide the response generation using a mixture of strategies. At last, our {MISC} produces supportive responses based on both COMET-enhanced mental information and distributed strategy representation. The unique design of mixed strategy not only helps to increase the expressed empathy, but also facilitates to learn the gradual transition in the long response, as the last utterance in Figure , which will in turn make the conversation more smooth.
13
+
14
+ To evaluate our model, we conduct extensive experiments on ESConv benchmark and compare with 5 state-of-the-art empathetic chatbots. Based on both automatic metrics and manual judgments, we demonstrate that the responses generated by our model {MISC} are more relevant and empathetic. Besides, additional experimental analysis reveal the importance of response strategy modeling, and sheds light on how to learn a proper response strategy as well as how response strategy could influence the empathy of the chatbot.
15
+
16
+ In brief, our contributions are as follows: (1) We present a Seq2Seq model {MISC}, which incorporates commonsense knowledge and mixed response strategy into emotional support conversation; (2) We conduct experiments on ESConv dataset, and demonstrate the effectiveness of the proposed {MISC} by comparing with other SOTA methods. (3) We implement different ways of strategy modeling and give some hints on strategy-aware emotional support conversation.
17
+
18
+ \iffalse
19
+
20
+ - We present a Seq2Seq model {MISC}, which incorporates commonsense knowledge and mixed response strategy into emotional support conversation.
21
+ - We identify the importance of strategy modeling and model a mixture of strategies to enhance the supportive responding.
22
+ - We conduct experiments on ESConv dataset, and demonstrate the effectiveness of the proposed {MISC} by comparing with other SOTA methods.
23
+ - We implement different ways of strategy modeling and give some hints on strategy-aware emotional support conversation.
24
+
25
+ \fi
26
+
27
+ # Method
28
+
29
+ In this paper, we use the Emotional Support Conversation dataset, ESConv . Before conversations start, seekers should determine their emotion types, and tell the situation they are dealing with to supporters.
30
+
31
+ Besides, the strategy of every supporter's utterance is marked, which is the most important to our work. In total, there are 8 kinds of strategies, and they are almost evenly distributed. More details are given in Appendix.
32
+ \iffalse
33
+ [!h]
34
+ \centering
35
+ \includegraphics[width=0.5\textwidth]{figures/strategy-dist.png}
36
+ \caption{The strategy distribution in the original ESConv dataset.}
37
+
38
+ \fi
39
+
40
+ For general dialogue response generation, the target is to estimate the probability distribution $p(\bm{r}|\bm{c})$ of the dataset $\mathcal{D}=\{\bm{c}^{(i)}, \bm{r}^{(i)}\}_{i=1}^{N}$, where $\bm{c}^{(i)}=(\bm{u}_1^{(i)}, \bm{u}_2^{(i)},...,\bm{u}_{n_i}^{(i)})$ consists of a sequence of $n_i$ utterances in the dialogue history, and $\bm{r}^{(i)}$ is the target response. For the sake of brevity, we omit the superscript $(i)$ when denoting a single example in the remaining part.
41
+
42
+ In the setting of emotional support conversation, the seeker's situation $\bm{s}$ is considered as an extra input, which describes the seeker's problem in free-form text. We also denote the seeker's last post (utterance) as $\bm{x}$. Consequently, the target becomes to estimate the probability distribution $p(\bm{r}|\bm{c, s, x})$.
43
+
44
+ The overview of our approach is shown in Figure . Based on blenderbot-small , our model {MISC} consists three main components: (1) a mental state-enhanced encoder ; (2) a mixed strategy learning module; and (3) a multi-factor-aware decoder.
45
+ [htpb]
46
+ \centering
47
+ \includegraphics[width=1.0\textwidth]{figures/model-overview.png}
48
+ \caption{The overview of the proposed MISC which consists of a mental state-enhanced encoder, a mixed strategy learning module, and a multi-factor-aware decoder.}
49
+
50
+ Following common practice, we firstly represent the context using the encoder $\mathtt{E}$:
51
+
52
+ \bm{C} = \bm{\mathtt{E}}(\mathtt{CLS}, \bm{u}_1,\mathtt{EOS}, \bm{u}_2,...,\bm{u}_{n_i})
53
+
54
+ where $\mathtt{CLS}$ is the start-token and $\mathtt{EOS}$ is the separation-token between two utterances.
55
+
56
+ To better understand the seeker's situation, we exploit COMET , a commonsense knowledge generator to supply mental state information related to the conversation. Concretely, we treat the situation $\bm{s}$ as an event, and feed it with different relations into COMET:
57
+
58
+ \bm{B}^{s} = \bigcup\limits_{j=1}^{N_r}\mathtt{COMET}(rel_j, \bm{s})
59
+
60
+ where $N_r$ is the number of pre-defined relations in COMET, and $rel_j$ stands for the $j$-th specific relation, such as \verb|xAttr| and \verb|xReact|.\footnote{Please refer to the appendix file for the definitions of all the relations as well as a brief introduction of COMET.} Note that given a certain event-relation pair, COMET is able to generate multiple ``tails'' of free-form mental state information, $\bm{B}^{s}$ is a set of $N_{s}$ mental state blocks, i.e., $\bm{B}^{s}=\{\bm{b}^s_{j}\}_{j=1}^{N_s}$. Similarly, we can obtain the set of mental state blocks $\bm{B}^{x}$ using the seeker's last post $\bm{x}$.
61
+
62
+ Then, all of the free-form blocks will be transformed into dense vectors using our encoder $\mathtt{E}$:
63
+
64
+ \hat{\bm{H}}^{s}&=[\bm{h}^s_{1,1}, \bm{h}^s_{2,1},...,\bm{h}^s_{N_{st},1}]\\
65
+ \bm{h}^s_{j}&=\mathtt{E}(\bm{b}^s_{j})
66
+
67
+ and the hidden state of each block's first token will be used to represent the corresponding block. Later, due to the noisy of COMET blocks, a lot of them are irrelevant to the context. We creatively take attention method to refine the strongly relevant blocks. That operation could be expressed as
68
+
69
+ \bm{Z}&= \mathtt{softmax}(\hat{\bm{H}}^{s}\cdot\bm{C}^{\mathrm{T}})\cdot\bm{C}\\
70
+ \bm{H}^{s}&=\mathtt{LN}(\hat{\bm{H}}^{s}+\bm{Z})
71
+
72
+ where $\mathtt{LN}$ is the LayerNorm module . Similarly, we could transform $\bm{x}$ to $\bm{H}^{x}$ following the same method as $\bm{s}$ to $\bm{H}^{s}$. At last, we get the conversation-level and utterance-level representation of seeker's mental state $\bm{H}^{s}$ and $\bm{H}^{x}$, which are enhanced with commonsense information.
73
+
74
+ One straightforward way to predict the response strategy is to train a classifier upon the $\mathrm{CLS}$ states of the context representation $\bm{C}$ from Eq. ():
75
+
76
+ \bm{p}^{g}=\mathtt{MLP}(\bm{C}_1)
77
+
78
+ where $\mathtt{MLP}$ is a multi-layer perceptron, and $\bm{p}^{g}$ records the probabilities of each strategy to be used.
79
+
80
+ To model the complexity of response strategy as discussed before, we propose to employ the distribution $\bm{p}^{g}$ and model a mixture of strategies for response generation.
81
+
82
+ Here, we masterly learn from the idea of VQ-VAE’s codebook to represent strategy. The strategy codebook $\bm{T} \in \mathbb{R}^{m\times d}$ represent $m$ strategy latent vectors (here $m$ = 8) with the dimension size $d$. By weighting $\bm{T}$ using $\bm{p}^{g}$, we are able to obtain a comprehensive strategy representation $\bm{h}^{g}$
83
+
84
+ \bm{h}^{g}=\bm{p}^{g}\cdot\bm{T}
85
+
86
+ Our codebook-based method has two benefits: (1) It is beneficial when long responses are needed to skillfully reduce the seeker's distress, which is common in emotional support conversation. (2) It is flexible to learn. Intuitively, if a strategy has a higher probability in $\bm{p}^{g}$, it should take greater effect in guiding the support conversation.
87
+ In the extreme case where we have a sharp distribution, one single strategy will take over the control.
88
+
89
+ The remaining is to properly utilize the inferred mental states and the strategy representation. To notify the decoder of these information, we modify the backbone's cross attention module as:
90
+
91
+ \bm{A}^{c}&=\mathtt{CROSS\text{-}ATT}(\bm{O}, \bm{H}) \\
92
+ \bm{A}^{s}&=\mathtt{CROSS\text{-}ATT}(\bm{O}, \bm{H}^{s})\\
93
+ \bm{A}^{x}&=\mathtt{CROSS\text{-}ATT}(\bm{O}, \bm{H}^{x}) \\
94
+ \bm{A}^{g}&=\mathtt{CROSS\text{-}ATT}(\bm{O}, \bm{h}^{g}) \\
95
+ \bm{O}^{'}&=\mathtt{LN}(\bm{A}^{c} + \bm{A}^{s} + \bm{A}^{x} + \bm{A}^{g} + \bm{O})
96
+
97
+ where $\mathtt{CROSS\text{-}ATT}$ stands for the backbone's cross attention module, and $\bm{O}$ is the hidden states of the decoder, which produces the final response by interacting with multi-factors.
98
+
99
+ Based on blenderbor-small , we jointly train the model to predict the strategy and produce the response:
100
+
101
+ \mathcal{L}_r &= -\sum\limits_{t=1}^{n_r}\mathtt{log}(p(r_t|\bm{r}_{j<t},\bm{c},\bm{s}, \bm{x})) \\
102
+ \mathcal{L}_g &=-\mathtt{log}(p(g|\bm{c},\bm{s}, \bm{x}))\\
103
+ \mathcal{L} &= \mathcal{L}_r + \mathcal{L}_{g}
104
+
105
+ where $n_r$ is the length of response, $g$ is the true strategy label, $\mathcal{L}_{g}$ is the loss of predicting strategy, $\mathcal{L}_r$ is the loss of predicting response, and $\mathcal{L}$ is combined objective to minimize.
2203.13685/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2203.13685/paper_text/intro_method.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ In human communication, speakers often adjust their language production by taking into consideration listeners' personality, background knowledge, perceptual or physical capabilities etc [\(Clark,](#page-9-0) [1996\)](#page-9-0). Recent years have seen an increasing amount of work that explores pragmatic reasoning based on Rational Speech Act (RSA) [\(Andreas](#page-9-1) [and Klein,](#page-9-1) [2016;](#page-9-1) [Fried et al.,](#page-9-2) [2018a,](#page-9-2)[b;](#page-9-3) [White et al.,](#page-10-0) [2020;](#page-10-0) [Cohn-Gordon et al.,](#page-9-4) [2018\)](#page-9-4), multi-agent emergent communication framework [\(Lazaridou et al.,](#page-10-1) [2020;](#page-10-1) [Lazaridou and Baroni,](#page-10-2) [2020\)](#page-10-2), and Theory of Mind in communication [\(Bara et al.,](#page-9-5) [2021;](#page-9-5) [Zhu](#page-10-3) [et al.,](#page-10-3) [2021\)](#page-10-3). However, except for [\(Zhu et al.,](#page-10-3) [2021\)](#page-10-3),
4
+
5
+ <span id="page-0-0"></span>![](_page_0_Picture_8.jpeg)
6
+
7
+ Literal Speaker: There is an owl on the table. Rational Speaker: There is a pizza on the table. Listener's Disparity: understands hypernym of food only. Pragmatic Rational Speaker: There is food on the table.
8
+
9
+ Figure 1: TASK: Given two images, the speaker generates a description for the target image and asks the listener to pick out the image described. Both players win if the listener picks the correct one. In this example, a Literal Speaker could generate multiple captions that suit the target, such as the one above, whereas a Rational Speaker limits the description to the unique features of the target (e.g. pizza). If the listener only understands the hypernym of food (disparity), a Pragmatic Rational Speaker would learn the disparity and use *food* instead of *pizza* to help the listener understand.
10
+
11
+ most previous works assume that the listeners and the speakers have the same background knowledge and capabilities, including vocabulary size, visual access, and relative locations. This assumption is a great simplification of real-world communication where speakers and listeners often have various types of disparities.
12
+
13
+ To address this limitation, this paper extends the Rational Speech Act (RSA) [\(Frank and Good](#page-9-6)[man,](#page-9-6) [2012\)](#page-9-6) model towards rational agents learning to adapt behaviors based on their experience with the listener. The design choice of our model is inspired by the human cognitive system [\(Cowan,](#page-9-7) [2008;](#page-9-7) [Wardlow,](#page-10-4) [2013\)](#page-10-4) where a limited capacity *working memory* is built on top of the *long-term memory* to adjust the output to be task and environment specific. Each communication is a modification on the long-term memory [\(Reed,](#page-10-5) [2012\)](#page-10-5) with situation-specific factors. In our framework, we fix the long-term memory which captures lan-
14
+
15
+ <sup>∗</sup>Work done during undergraduate study at the University of Michigan.
16
+
17
+ guage structure for communication, and introduce a light-weighted working memory [\(Miyake and](#page-10-6) [Shah,](#page-10-6) [1999\)](#page-10-6) for the Pragmatic Rational Speaker to modify and accommodate two goals: 1) a task goal which retrieves relevant information from the long-term memory and accomplish the task, and 2) a disparity goal which learns and adjusts the conversation to accommodate the listener's disparity through reinforcement learning. We separate each component as they are independent of each other in utility, and can be easily switched and adapted for new tasks and new environment.
18
+
19
+ Different from previous works which only demonstrate how learned models affect task performance (e.g. [\(Shridhar et al.,](#page-10-7) [2020;](#page-10-7) [Zhu et al.,](#page-10-3) [2021;](#page-10-3) [Corona et al.,](#page-9-8) [2019\)](#page-9-8)), one of our goals is to also provide transparency on what models have indeed learned towards the end goal. It's well established that end-to-end neural models can often take advantage of spurious data bias to gain end performance. Models that only report end measure without showing their internal works would not be sufficient to tell the whole story about model's abilities.
20
+
21
+ To serve this goal, we situated our investigation in the context of a referential game[1](#page-1-0) as shown in Figure [1.](#page-0-0) We carefully curated a dataset to simulate two types of disparity: *knowledge disparity* and *perceptual disparity*. Our empirical results demonstrate that our model is able to significantly improve the collaborative game performance by shifting communication towards the language that the listeners with disparities are able to understand. In addition, our results show that separating working memory from long-term memory leads to faster learning and better performance than the previous model which conducted joint end-to-end learning.
22
+
23
+ Our contributions are the following. 1) Following human cognition, we demonstrate the benefits of separating working memory from the long-term memory, compared to end-to-end joint training. 2) We propose a new dataset to simulate multiple distinct types of disparities, and demonstrate the pragmatic adaptability of our model. 3) Instead of focusing on mere end task performance, we show model's strong language shift ability to accommodate listener's disparities. The dataset and code are available through [https://github.com/sled-group/](https://github.com/sled-group/Pragmatic-Rational-Speaker) [Pragmatic-Rational-Speaker](https://github.com/sled-group/Pragmatic-Rational-Speaker) to facilitate future work on pragmatics and theory of mind in language interpretation and generation.
24
+
25
+ # Method
26
+
27
+ Given a pair of images m0, m1, the target image indicator t ∈ {0, 1}, and the listener's disparity d, the speaker generates a caption c for the target image m<sup>t</sup> , and the listener needs to pick out the correct target t given c. Both receive a reward of +1 upon correct choice, and −1 otherwise.
28
+
29
+ Following the RSA model, as shown Figure [2,](#page-3-0)
30
+
31
+ <span id="page-3-0"></span>![](_page_3_Figure_0.jpeg)
32
+
33
+ Figure 2: Speaker and Listener Models: Literal Speaker S<sup>0</sup> uses and object detector and image captioning module to generate a list of candidate captions in the fixed long term memory. The Rational Speaker S<sup>1</sup> simulates an internal listener to rank (illustrated by color gradient) the candidate captions by their uniqueness in describing the target image. The Pragmatic Rational Speaker S d 1 interacts with the actual listener to rerank the captions and pick out the best one to accommodate the disparity and the task goal. Both simulated listener and disparity adjustment components are inside the working memory for task specific and disparity specific adjustments.
34
+
35
+ we start by building the Literal Speaker S0, gradually increase model structure and functionality with the vanilla Rational Speaker S<sup>1</sup> and the Pragmatic Rational Speaker S d 1 . Upon retrieving a list of candidate captions C from the long-term memory, the final goal for S d 1 is to output the best caption c in the working memory, that accommodates both 1) task goal: describes the unique features of the target image, and 2) disparity goal: learns and accommodates the listener's disparity.
36
+
37
+ Table [1](#page-2-0) is a brief summary of each model. The Literal Speaker S<sup>0</sup> generates candidate captions c for a given image m (Eq [1\)](#page-3-1), which serves as the long-term memory. The Rational Listener L<sup>1</sup> picks out an image as the target given speaker's description (Eq [2\)](#page-3-2). The vanilla Rational Speaker S<sup>1</sup> achieves the *task goal* by simulating the listener's mind internally in its working memory (Eq [3\)](#page-3-3). L d 1 incorporates disparity to the Rational Listener. The Pragmatic Rational Speaker S d 1 adds a light-weight disparity adjustment layer (Eq [5\)](#page-3-4) to learn and accommodate listener's disparity through interactions, and achieves both goals. Each component can be easily switched and adapted to new tasks or environment.
38
+
39
+ $$\mathbf{S_0}: P(c|m_t) \tag{1}$$
40
+
41
+ $$\mathbf{L_1}: P(t|m_0, m_1, c) \propto P_{S_0}(c|m_t) \cdot P(m_t)$$
42
+ (2)
43
+
44
+ <span id="page-3-3"></span>
45
+ $$\mathbf{S}_{1} : P(c|m_{0}, m_{1}, t) \propto P_{L_{1}}(t|m_{0}, m_{1}, c) \cdot P(c|m_{0}, m_{1})$$
46
+ (3)
47
+
48
+ $$\mathbf{L_1^d} : P(t|m_0, m_1, c, d) \propto P_{S_1}(c|m_0, m_1, t, d) \cdot P(t|m_0, m_1, d)$$
49
+ (4)
50
+
51
+ <span id="page-3-4"></span>
52
+ $$\mathbf{S_1^d} : P(c|m_0, m_1, t, d) \propto P_{L_1^d}(t|m_0, m_1, c, d) \cdot P(c|m_0, m_1, d)$$
53
+ (5)
54
+
55
+ The Literal Speaker S<sup>0</sup> (Figure [2\)](#page-3-0) is an object detection based image captioning module that generates caption candidates for the target image.
56
+
57
+ $$\begin{aligned} o_1, \dots, o_k, b_1, \dots, b_k &= \mathtt{ObjDet}(m_t) \\ e_1, \dots, e_k &= \mathtt{WordEmb}(o_1, \dots, o_k) \\ c_1, \dots, c_n &= \mathtt{Transformer}(e_1, \dots, b_1, \dots) \end{aligned} \tag{6}$$
58
+
59
+ <span id="page-3-2"></span><span id="page-3-1"></span>For a given target image m<sup>t</sup> , since it's important to ground words to the scenes in order to control the disparities in vocabularies, we applied the object detector YOLO3 [\(Redmon and](#page-10-17) [Farhadi,](#page-10-17) [2018\)](#page-10-17) to extract a list of k detected objects O = {o1, o2, . . . , ok}, and their corresponding bounding boxes B = {b1, b2, . . . , bk}. Each image chooses at most max\_obj = 9 detected objects, and the names of each were embedded with a pre-trained BERT [\(Devlin et al.,](#page-9-18) [2019\)](#page-9-18) word embedding E = {e1, e2, . . . , ek}. These embeddings are then concatenated with their bounding box locations, and sent to the Transformer Decoder to generate beam\_size = 30 candidate captions C = {c1, c2, . . . , cn} for each target image.
60
+
61
+ Without disparity concerns, the Rational Listener picks out the image that they believe is the target.
62
+
63
+ $$\begin{split} g_0 &= \texttt{FT\_Transformer}(m_0, c) \\ g_1 &= \texttt{FT\_Transformer}(m_1, c) \\ t &= \operatorname{argmax}_{i \in \{0,1\}} \texttt{CosSim}(g_i, c) \end{split} \tag{7}$$
64
+
65
+ Recall that S<sup>0</sup> used a Transformer decoder to connect the image and its corresponding captions. We reuse the same Fixed pre-trained Training-mode Transformer module (named FT\_Transformer) to decide which image does the caption ground better in. Adopting the idea of teacher-forcing language training, the output (gi) of FT\_Transformer with an input pair (m<sup>i</sup> , c) should closely resemble the original input c if the input image m<sup>i</sup> is indeed the one used to generate the caption c. By calculating the cosine similarity of each (g<sup>i</sup> , c) pair, the image that grounds better (higher CosSim) in the description would be chosen as the target.
66
+
67
+ This module allows the agents to quickly and accurately make the decisions without further training. In theory, if the speaker and the listener were to have the exact same brain (same model and weights), the performance of this task should approach 100%. The results of "No Disparity" speaker in Figure [3](#page-5-0) confirmed the design choice.
68
+
69
+ Without disparity concerns, the Rational Speaker (S1) fulfills the task goal by simulating (Figure [2\)](#page-3-0) the Rational Listener (L1)'s behavior, and rank the candidate captions generated by the Literal Speaker (S0) according to how well they can describe the target image apart from the distractors. This design is under the fair assumption that both speakers and listeners are aware of the collaborative game goal, but can be switched for other task purposes.
70
+
71
+ For
72
+ $$i \in \{0, \dots, n\}$$
73
+ , where $n = |C|$ :
74
+ $t_i, p_i = \texttt{Simulate\_L}_1(m_0, m_1, c_i)$ (8)
75
+ $c = c_{\texttt{argmax}_i[[t_i = t^*]] \cdot p_i}$
76
+
77
+ Given an image pair (m0, m1), and a list of candidate captions C = {c1, · · · , cn} generated by S0, the Rational Speaker goes through each caption c<sup>i</sup> and simulates how well the listener (Simulate\_L1) would pick out the correct target image. If a candidate caption c<sup>i</sup> helps the simulator pick out the correct target image (i.e. t<sup>i</sup> == t ∗ ) with high
78
+
79
+ confidence (pi), then it will be chosen as the final caption sent over to the actual listener. The simulated listener shares the same architecture as L<sup>1</sup> and initializes the weights pre-trained from S0. By doing so, the Rational Speaker takes the listener's intention into account and achieves the task goal.
80
+
81
+ In the real world, however, it is hardly the case that different agents have the exact same knowledge background, experiences, physical capabilities, etc. The listener's decision making process is influenced by various kinds of disparities d.
82
+
83
+ To study speaker's ability of situated language adjustment, we created two representative types of listeners with different knowledge background and visual capabilities by training different caption grounding modules (FT\_Transformer) with the datasets assembled in Section [3.](#page-2-1) These disparities would challenge the speaker model to adjust the language at different levels.
84
+
85
+ - 1. L d1 1 : Hypernym. With limited vocabulary and knowledge in a certain domain, people tend to refer to objects in their hypernym form (e.g. "animal" instead of "cat"). In this experiment, we create listeners that would refer to all the detected objects by their hypernyms. This disparity would require the speaker to switch individual words that share similar meanings.
86
+ - 2. L d2 1 : Limited Visual. Due to the physical orientation or impaired vision capability, it is likely that some objects are blocked or hardly visible to one party but not the other. In this experiment, we remove all the animal objects from listener's visual detected object list (O), and replace the relevant descriptions with the special token '[UNK]'. This disparity would require the speaker to shift attention, and choose alternative objects to describe.
87
+
88
+ We investigate in listeners with a subset of speaker's capabilities under the argument that in the opposite case, the listener could use only a subset of the knowledge to achieve best performance without having the speakers to adjust the speech. Other disparities can be inferred through transfer learning or are left for further investigation with broader information access and datasets.
89
+
90
+ <span id="page-5-0"></span>![](_page_5_Figure_0.jpeg)
91
+
92
+ ![](_page_5_Figure_1.jpeg)
93
+
94
+ Figure 3: Referential game Accuracy: The Pragmatic Rational Speakers are able to significantly outperform Literal Speakers and vanilla Rational Speakers across different types of disparities. Word level models achieve higher performance and is much closer to the No Disparity upper bound than the sentence level communication.
95
+
96
+ On top of the Rational Speaker $(S_1)$ , the Pragmatic Rational Speaker incorporates a disparity adjustment layer to learn and accommodate the listener's disparity through emergent communication.
97
+
98
+ <span id="page-5-1"></span>For
99
+ $$i \in \{0, \dots, n\}$$
100
+ , where $n = |C|$ :
101
+ $$q_i = \text{MLP}(\texttt{SentenceEmb}(c_i))$$
102
+
103
+ $$a_i = [[t_i == t^*]] \cdot p_i \cdot q_i$$
104
+
105
+ $$c = c_{\text{argmax}_i a_i}$$
106
+ (9)
107
+
108
+ We use a pretrained BERT model to embed each candidate caption $c_i$ , add a single MLP layer, and approximate the REINFORCE policy through Equation 9. The reward $(r_{c^*})$ for each chosen caption $c^*$ is +1 or -1. The loss is calculated for all the chosen captions across each batch (Eq 10).
109
+
110
+ $$L = -\sum_{c^*} \log(a_{c^*}) \cdot r_{c^*} \tag{10}$$
111
+
112
+ We conducted the same sets of experiments using individual words (object names) instead of sentences to demonstrate the effects of working memory on disparity accommodation and internal task simulation, reducing the noise that came from the imperfection of the image description generator. The simplified pipeline uses the detected object name embedding for disparity adjustment, and the listener picks the target images by conducting simple word matching.
2205.07872/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-11-11T10:03:38.567Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36" version="15.7.3" etag="saX_SKv9O7yuYEWIg9nn" type="google"><diagram id="MIY60hqaUjMZMudiYD47">7VhLb9w2EP41iyQGbGgl7zY+xus4AYqiBxvImZJGEmuKlElqH/31nRnqmd1tnbZog7oHr6jRPD/ODMdcJJt6/8mKpvrJ5KAWcZTvF8ndIo6XURTjgyiHQFlFy0Aorcw7ppHwIH+FXrKjtjIHN2P0xigvmzkxM1pD5mc0Ya3ZzdkKo+ZWG1F2FqOR8JAJBUdsX2Tuq0B9v5pwfwZZVr1ljDh8qUXP3BFcJXKzm5CSj4tkY43xYVXvN6AIvB6XIHd/5uvgmAXtXyLQbcRWqLaLbRGvFYre5nJL/vlDF/T6uSWnbj3s/aVQstSL5ANy/NI6L4sDrvAjg6n9peMto8/LdbMfhXFVdk82QswkI7K5kU9gbCnFVPCEK99kKu0JG5TCGHo6YjN8Yj1rUTe41qlrJuLIFpyda0UyAzWn/mewW91eXPyozU6TIWmd16IGXF9f31xcLFZ3lEmOigr/OJEOICw+qKAo2xUx7yQmfeCppPPGUsCmIJIu+UVoYpd10yontySTQiW20pCuXWV6M94K7QqwFoi/sKZmYVLXeoc9gUwY10gvKMlR5L4hcOeIHYORiuyptKbV+WVmFFplTGyZvo2jFSvYMEbxuL6J3h3j9YiKtSHTZgs2Nw7OpM8VKXoBuBYaYz2HXgnyvYIBroohKaVVhZXApNSaJ2JoKWolHEl8gVyDy8UhOB5kp9pcmzp4brFhKNqLHTBIRvfQCpWZioMKspnJhNS8SVKX0FnC5bglj1Vrg0nsy/jzIHxr8Z3Cvmtt4PUhgITOAWbt1G16xwC1YsOEr8PPMQue8LmKLtcRv0v9RExvUwA74iMsPZTETbLvgl6nZVHAPArArSLzBew4fdD1KVS5pF9tOHMUQENBfOZME2S0dazvPEoWylYJy+BKzbEA92jaHrKTtmNooCXHS9nPKOH50NVOjzZXTgfecyu0l54LiiLPWyu85L1LoTAWyNmf9WQLKBY+mdj0S3LQh2IOBgs8wBjj6YYEH/tUdw2nzTdV2Wqssjhaj1W2ujlRZR7TPe9TqxIBJlk7UMWk3IIbfcuM2FuCUWOHSg+8AfZpugFhW6xpxpC4nvTV6z4D+vPTnoj7FcPREz5o7A1cdC6MD8dTxWvG63HHp7d2XuiMWxv3L9fKLJzXwnuoGz7jcGrl/smAconP+qUVJf078R2X49nWx1oKUUs8BVjPMGYu4iS5xmc5QD6nHxtbXv3FZvv3jTSzHjsmf3rSoyM3rgfT0bg64cLvjORDjd0rUZZhKuSD8Ci/hoM5zKvO4Y4z/5tWu9bCGzqzMevKMJ6KMHQAZySLdBOAUTwPwBa1Y0JfnUPj/wZ9yscTdv+RmomvziXR9zit/KuV9BgGvTDt/fmqevxhOS2pP6qT0zn5NTWewRHz7gDdZCzx866SHh6aUGk7PCmQVvladZ+dp2Guv66JidLfvUQhDzKauJE5wVdhs+7aKR4sY//DYjh70bIcrm/qPZZ3DZ47SSew7u+uDvOrrN14f5S872jV5O5odd0RRXdnVQ6qx2sdXHQ3O/3reIPE3yb3cMnH3wA=</diagram></mxfile>
2205.07872/main_diagram/main_diagram.pdf ADDED
Binary file (25 kB). View file
 
2205.07872/paper_text/intro_method.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ For decades, suicide has been one of the leading causes of death [\(CBHSQ,](#page-8-0) [2020\)](#page-8-0). The suicide rate in the United States increased from 10.5 per 100, 000 in 1999 to 14.2 in 2018, a 35% increase [\(Hedegaard et al.,](#page-8-1) [2020\)](#page-8-1). Globally, 740, 000 people
4
+
5
+ [\*Name\*] is a 22 year old male with a history of angry and impulsive behavior who is transferred from an outside hospital s/p Tylenol overdose. [\*Name\*] reports that he and his girlfriend broke up last Wednesday, and that he subsequently went on an alcohol and cocaine binge lasting from Thursday to Saturday. He has used alcohol and cocaine regularly in the past, but he denies having had a binge of this quantity or duration before. On Saturday night, [\*\*Name\*\*] told his father that he had tried to hang himself at a nearby park, but the rope had broken.
6
+
7
+ Two instances of suicide attempts are annotated in the paragraph.
8
+
9
+ - 1. [\*Name\*] ... overdose: Annotated for suicide attempt and is assigned 'unsure' category as there is no definite documentation that it is a suicide attempt.
10
+ - 2. On Sat ... broken: This is also annotated for suicide attempt and is assigned the category: 'T71' (asphyxiation, hanging).
11
+
12
+ Figure 1: An example of *positive* and *unsure* evidence annotations for SA in an EHR note.
13
+
14
+ commit suicide each year. The rates of suicidal behaviors, suicide attempt (SA) and suicide ideation (SI), are much higher [\(WHO,](#page-9-0) [2021\)](#page-9-0).
15
+
16
+ A prior study shows that a large proportion of suicide victims sought care well before their death [\(Kessler et al.,](#page-8-2) [2020\)](#page-8-2). Suicidal behaviors, including SA and SI are recorded by clinicians in electronic health records (EHRs). This knowledge can in turn help clinicians assess risk of suicide and make prevention efforts [\(Jensen et al.,](#page-8-3) [2012\)](#page-8-3). The diagnostic ICD codes include suicidality codes for both SA and SI. However a study has shown that ICD codes can only capture 3% SI events, while 97% of SI events are described in notes [\(Anderson et al.,](#page-8-4) [2015\)](#page-8-4). In addition, of patients described with SA in their EHR notes, only 19% had the corresponding ICD codes [\(Anderson et al.,](#page-8-4) [2015\)](#page-8-4). Therefore, it is important to develop natural language process-
17
+
18
+ <span id="page-0-0"></span><sup>1</sup>The annotations, code and the models are availble at [https://github.com/bsinghpratap/ScAN.](https://github.com/bsinghpratap/ScAN)
19
+
20
+ ing (NLP) approaches to capture such important suicidality information.
21
+
22
+ Researchers have developed NLP approaches to detect SA and SI from EHR notes [\(Metzger](#page-8-5) [et al.,](#page-8-5) [2017;](#page-8-5) [Downs et al.,](#page-8-6) [2017;](#page-8-6) [Fernandes et al.,](#page-8-7) [2018;](#page-8-7) [Cusick et al.,](#page-8-8) [2021\)](#page-8-8). These studies either used rule-based approaches [\(Downs et al.,](#page-8-6) [2017;](#page-8-6) [Fernandes et al.,](#page-8-7) [2018;](#page-8-7) [Cusick et al.,](#page-8-8) [2021\)](#page-8-8) or built the SA and SI identification models on a small set [\(Metzger et al.,](#page-8-5) [2017\)](#page-8-5) or private set [\(Bhat and](#page-8-9) [Goldman-Mellor,](#page-8-9) [2017;](#page-8-9) [Tran et al.,](#page-9-1) [2013;](#page-9-1) [Haerian](#page-8-10) [et al.,](#page-8-10) [2012\)](#page-8-10) of EHR notes. It is also difficult to compare the results of those studies as they varied in EHR data, data curation, as well as NLP models, which were not made available to the public.
23
+
24
+ In this study, we present ScAN: Suicide Attempt and Ideation Events Dataset, a publicly available EHR dataset that is a subset of the MIMIC III data [\(Johnson et al.,](#page-8-11) [2016\)](#page-8-11). ScAN contains 19, 690 expert-annotated SA and SI events with their attributes (e.g., methods for SA) over 12, 759 EHR notes. Specifically, experts annotated suicidality evidence or sentences relevant to SA and SI events during a patient's stay at the healthcare facility, an example of SA annotations is shown in Fig [1.](#page-0-1) The evidences were put together to assess whether the patient has an SA or SI event.
25
+
26
+ We also present ScANER (Suicide Attempt and Ideation Events Retriever), a RoBERTa-based NLP model that is built on a multi-task learning framework for retrieving evidences from the EHRs and then predicting a patient's SA or SI event using the complete set of EHR notes from the hospital stay using a multi-head attention model. We focus on the prediction of SA and SI using all the EHR notes during a patient's stay because for the whole duration, multiple EHR notes and note types are generated, including *admission* notes, *nursing* notes, and *discharge summary* notes. Suicidal information are described in multiple notes throughout the stay. For example, a patient was admitted to the hospital with opioid overdose. It was documented initially in the admission note as an SA, but later dismissed as an accident after physician's evaluation. In another example, an opioid overdose admission was first documented as an accident on admission, but later documented to be an SA event after clinical assessment. Both ScAN and ScANER capture SA and SI information at the hospital-stay level. ScANER is able to retrieve suicidal evidences from EHR notes with a macroweighted F1-score of 0.83 and is able to predict SA and SI with a macro F1-score of 0.78 and 0.60, respectively. Our annotation guidelines, ScAN, and ScANER system will be made publicly available, making ScAN a benchmark EHR dataset for SA and SI events detection. We will release the training and evaluations splits used in this study for benchmarking new models.
27
+
28
+ # Method
29
+
30
+ In this section, we introduce *ScANER* (Suicide Attempt and Ideation Events Retreiver): a strong baseline model for our dataset. ScANER consists of two sub-modules: (1) An *evidence retriever module* that extracts the evidences related to SA and
31
+
32
+ <span id="page-3-1"></span>
33
+
34
+ | General Statistics | Patients | Hospital-stays | Notes |
35
+ |--------------------|--------------------|-----------------|---------|
36
+ | | 669 | 697 | 12, 759 |
37
+ | Suicide Attempt | Positive | Negative | Unsure |
38
+ | | 14, 815 | 170 | 2, 738 |
39
+ | Suicide Ideation | Positive<br>1, 167 | Negative<br>800 | |
40
+
41
+ Table 1: Distribution of unique annotations at the patient, hospital-stay and notes level in ScAN.
42
+
43
+ SI and (2) A *predictor module* that predicts SA or SI label for the patient's hospital-stay using the evidences extracted by the retriever module.
44
+
45
+ <span id="page-3-2"></span>
46
+
47
+ | Evidence | | Yes | No | | |
48
+ |------------|----------|--------------------|------------|------------|--|
49
+ | Train | | 9,880 | 30,133 | | |
50
+ | Validation | | 1,803 | 4,864 | | |
51
+ | Test | | 3,038 | 7,836 | | |
52
+ | SA | Positive | Negative<br>Unsure | | Neutral-SA | |
53
+ | Train | 7,597 | 136 | 1,607 | 30,673 | |
54
+ | Validation | 1,474 | 36 | 216 | 4,941 | |
55
+ | Test | 2,433 | 20 | 431 | 7,990 | |
56
+ | SI | Positive | Negative | Neutral-SI | | |
57
+ | Train | 928 | 654 | 38,431 | | |
58
+ | Validation | 153 | 107 | 6,407 | | |
59
+ | Test | 331 | 189 | 10,354 | | |
60
+
61
+ Table 2: Distribution of evidences at paragraph level in ScAN for train, validation and test sets. A paragraph was considered an *evidence*, labeled as *Yes*, if it had at least one sentence annotated as SA or SI. A *No* evidence paragraph was either *Neutral-SA* or *Neutral-SI*.
62
+
63
+ Problem Formulation: Given an input clinical note, the model extracts the evidences (one or more sentences) related to SA or SI (SA-SI) from the note. This is a binary classification problem where given a text snippet the model predicts whether it has an evidence for SA-SI or not. We learn this task at paragraph level where the input is a set of 20 consecutive sentences because the local surrounding context provides additional important information [\(Yang et al.,](#page-9-2) [2021;](#page-9-2) [Rawat et al.,](#page-8-16) [2019\)](#page-8-16). A paragraph was labeled as *evidence no*, if all the sentences in that paragraph are *neutral-SA* and *neutral-SI*. If there was at least one SA-SI sentence, it was considered an *evidence yes*. As the number of nonevidence sentences significantly outsized the evidence sentences, we decided to use an overlapping window of 5 sentences between the paragraphs to
64
+
65
+ build more evidence paragraphs. The distribution of the paragraphs, across all evidence, SA and SI labels for train, validation, and test set is provided in Table [2.](#page-3-2) We segregated the train and test set such that any patient observed by the *retriever module* during training was not seen in the test set. This is important as there are patients who had multiple hospital-stays in ScAN.
66
+
67
+ Proposed Model: Transformer [\(Vaswani et al.,](#page-9-3) [2017\)](#page-9-3) based language models [\(Devlin et al.,](#page-8-17) [2018;](#page-8-17) [Liu et al.,](#page-8-18) [2019\)](#page-8-18) have shown great performance for a broad range of NLP classification tasks. Hence, to extract the evidence paragraphs we trained a RoBERTa [\(Liu et al.,](#page-8-18) [2019\)](#page-8-18) based model. It has been previously shown that the domain-adapted versions of the pre-trained language models, such as clinicalBERT [\(Alsentzer et al.,](#page-8-19) [2019\)](#page-8-19) or BioBERT [\(Lee et al.,](#page-8-20) [2020\)](#page-8-20), work better than their base versions. So, we further pre-trained the RoBERTabase model over the MIMIC dataset to create a clinical version of RoBERTa model, hereby referenced as medRoBERTa. During our initial exploration, we experimented with clinicalBERT and BioBERT but found that medRoBERTa consistently outperformed both models. medRoBERTa achieved an overall F1-score of 0.88 whereas both clinicalBERT and BioBERT achieved an overall F1-score of 0.85. Our hospital-level SA and SI predictor would work with any encoder-based evidence retriever model.
68
+
69
+ Multi-task Learning: We trained medRoBERTa in a multi-task learning setting where along with learning the evidence classification task, the model also learns two auxiliary tasks: (a.) Identifying the label for SA between *positive*, *negative*, *unsure* and *neutral-SA* and, (b.) Identifying the label for SI between *positive*, *negative* and *neutral-SI*. The training loss (L(θ)) for our evidence retriever model was formulated as:
70
+
71
+ $$L(\theta) = L_{evi} + \alpha * L_{SA} + \beta * L_{SI}$$
72
+ (1)
73
+
74
+ Where Levi is the negative log likelihood loss for evidence classification, LSA and LSI are SA and SI prediction losses respectively, and α and β are the weights for the auxiliary tasks' losses. The distribution of labels across all the three tasks is highly skewed, hence, we applied the following techniques to learn an efficient and robust model.
75
+
76
+ • Weighted log loss was used in both main task and auxiliary tasks. The total loss for each task
77
+
78
+ <span id="page-4-0"></span>
79
+
80
+ | Suicide Attempt | Positive | Neg_Unsure | Neutral-SA |
81
+ |------------------|----------|------------|------------|
82
+ | Train | 377 | 54 | 1, 381 |
83
+ | Val | 50 | 10 | 189 |
84
+ | Test | 91 | 19 | 326 |
85
+ | Suicide Ideation | Positive | Negative | Neutral-SI |
86
+ | Train | 377 | 214 | 1, 521 |
87
+ | Val | 45 | 28 | 208 |
88
+ | | | | |
89
+
90
+ Table 3: Distribution of SA and SI at hospital-stay level in training, validation and testing set.
91
+
92
+ was calculated as the weighted sum of loss according to the *label* of the input paragraph. Log weighing helps smooth the weights for highly unbalanced classes. The weight for each class was calculated using:
93
+
94
+ $$w_{l,t} = \begin{cases} 1.0 & \text{if}(w_{l,t} < 1.0) \\ log(\gamma * N_t/N_{l,t}) & \end{cases}$$
95
+
96
+ Where N<sup>t</sup> is the count of all training paragraphs for the task t and Nl,t is the count of paragraphs with label l for the task t and wl,t is the calculated weight for those paragraphs. We tuned γ as a hyper-parameter. All training hyper-parameters for our best model are provided in Appendix [B.](#page-10-1)
97
+
98
+ - We also employed different sampling techniques [\(Youssef,](#page-9-4) [1999\)](#page-9-4), up and down sampling, to help our model learn from an imbalanced dataset. After sweeping for different sampling combinations as hyper-parameters, we found that down-sampling the *no*-evidence paragraphs by 10% resulted in the best performance.
99
+ - The *negative* label of SA is severely underrepresented in ScAN making it difficult for the model to learn useful patterns from such instances, refer Table [2.](#page-3-2) After discussion with the experts, we decided to group the instances of *negative* and *unsure* together and label them as *neg\_unsure* because for both groups the general psych outcome is to let the patient leave after the hospital-stay as there is no solid evidence defining whether the self-harm was a SA event.
100
+
101
+ Problem Formulation Given all the clinical notes of a patient during the the hospital stay,
102
+
103
+ <span id="page-5-0"></span>![](_page_5_Figure_0.jpeg)
104
+
105
+ Figure 4: ScANER (Suicide Attempt and Ideation Events Retreiver) consists of two sub-modules: (a.) Evidence retriever module extracts evidence paragraphs from all EHR notes. We trained the module using all annotated paragraphs from ScAN. (b.) Prediction module predicts the SA and SI label for a patient using the evidence paragraphs extracted by the *retriever module* from EHR notes during the patient's hospital-stay.
106
+
107
+ the model predicts the label for SA (*positive*, *neg\_unsure* and *neutral-SA*) and SI (*positive*, *negative* and *neutral-SI*). The *prediction module* uses the evidence paragraphs extracted by the *retriever module*.
108
+
109
+ Robust Finetuning The retriever module is not perfect and can extract false positives. This results in extracting irrelevant paragraphs, with evidence label *No*, along with evidence paragraphs for a hospital-stay with SA or SI and extracting irrelevant paragraphs as evidences for a hospital-stay with both SA and SI marked as *neutral*. To tackle such situations and train a robust model, we applied three techniques:
110
+
111
+ • For a hospital-stay with a non-*neutral* label for SA or SI, during training we added some noise in the form of irrelevant paragraphs (a paragraph with no SA or SI annotation) from the notes to the set of actual evidence paragraphs for the input. An irrelevant paragraph from a clinical note was sampled with a probability
112
+
113
+ - of 0.05. This forced the predictor module to learn effectively even with noisy inputs.
114
+ - For a *neutral* hospital-stay with no evidence paragraphs, we randomly chose X unique irrelevant paragraphs from the notes. X was sampled from the distribution of number of evidence paragraphs of the non-*neutral* hospitalstays. This prevented the leaking of any information to the prediction module during training by keeping the distribution of number of input paragraphs the same across *neutral* and non-*neutral* instances.
115
+ - Since these hospital-stays were extracted using the ICD codes related to suicide and overdose, the data is quite skewed with only 102 *neutral* events from a total of 697 hospitalstays. Whereas in a real-world scenario, *neutral* hospital-stays would be much higher than non-*neutral* ones. Hence, to facilitate a balanced learning of the predictor module we introduced 1, 800 *neutral* hospital-stays from the MIMIC dataset. The distribution for SA
116
+
117
+ and SI at hospital-stay level is provided in Table [3.](#page-4-0)
118
+
119
+ Proposed Model The paragraphs extracted using the *retriever module* for a patient's hospital-stay were provided as an input to the predictor module. We used a multi-head attention model to predict the SA and SI label for a hospital-stay as self-attention based models have proved to be quite effective for a lot of prediction tasks in machine learning [\(Devlin](#page-8-17) [et al.,](#page-8-17) [2018;](#page-8-17) [Cao et al.,](#page-8-21) [2020;](#page-8-21) [Hoogi et al.,](#page-8-22) [2019\)](#page-8-22).
120
+
121
+ We encoded the extracted paragraphs ([p1, p2....pn]) using the retriever module, medRoBERTa, to get a vector representation of 768 dimensions for each of the paragraphs ([v1, v2...vn]). Training the *retriever module* on auxiliary tasks of predicting SA and SI helped align these paragraph representations for SA and SI prediction. Then, we added a prediction vector (v0) along with all the vector representations of the paragraphs to get V = [v0, v1, v2...vn]. We passed V through our multi-head attention model to get the hidden representations H = [h0, h1...hn]. We then passed h<sup>0</sup> through a SA inference layer and SI inference layer to predict the labels. During the whole training process, the weights of the retriever module were frozen whereas v<sup>0</sup> was a learnable vector initialised as an embedding in the multi-head attention model. We used a separate v<sup>0</sup> prediction vector so that it could retain the information from all the other paragraph representations for hospital-stay level prediction similar to how [CLS] is utilized in different transformer-based models for sequence prediction [\(Devlin et al.,](#page-8-17) [2018;](#page-8-17) [Liu et al.,](#page-8-18) [2019\)](#page-8-18). We tuned the number of layers and number of attention heads of our prediction module as hyper-parameters and achieved the best performance using a 2-layer and 3-attention head model. Our complete ScANER model is illustrated in Fig [4.](#page-5-0)
2205.08714/main_diagram/main_diagram.pdf ADDED
Binary file (57.9 kB). View file
 
2205.11048/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile pages="1"><diagram id="YirEp7f7zNRSt8zeXdBu" name="Page-1"><mxGraphModel dx="744" dy="622" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="827" pageHeight="1169" math="0" shadow="0"><root><mxCell id="0"/><mxCell id="1" parent="0"/><mxCell id="2" value="&lt;font style=&quot;font-size: 10px&quot;&gt;PS&amp;amp;&lt;br&gt;WORKER&lt;/font&gt;" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="1107.0020013439962" y="648.6787482527956" width="70" height="50" as="geometry"/></mxCell><mxCell id="3" value="&lt;span style=&quot;font-size: 10px ; white-space: normal&quot;&gt;PS&amp;amp;&lt;/span&gt;&lt;br style=&quot;font-size: 10px ; white-space: normal&quot;&gt;&lt;span style=&quot;font-size: 10px ; white-space: normal&quot;&gt;WORKER&lt;/span&gt;" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="1107.0020013439962" y="728.6787482527957" width="70" height="50" as="geometry"/></mxCell><mxCell id="4" value="&lt;span style=&quot;font-size: 10px ; white-space: normal&quot;&gt;PS&amp;amp;&lt;/span&gt;&lt;br style=&quot;font-size: 10px ; white-space: normal&quot;&gt;&lt;span style=&quot;font-size: 10px ; white-space: normal&quot;&gt;WORKER&lt;/span&gt;" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="1237.0020013439962" y="728.6787482527957" width="70" height="50" as="geometry"/></mxCell><mxCell id="9" value="&lt;span style=&quot;font-size: 10px ; white-space: normal&quot;&gt;PS&amp;amp;&lt;/span&gt;&lt;br style=&quot;font-size: 10px ; white-space: normal&quot;&gt;&lt;span style=&quot;font-size: 10px ; white-space: normal&quot;&gt;WORKER&lt;/span&gt;" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="1237.0020013439962" y="648.6793725985758" width="70" height="50" as="geometry"/></mxCell><mxCell id="12" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=0;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1192.0020013439962" y="681.6787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="13" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=0;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1192.0020013439962" y="738.6787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="14" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=0;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1147.0020013439962" y="711.6787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="15" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=0;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1232.0020013439962" y="711.6787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="20" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;exitX=0.123;exitY=0.236;exitDx=0;exitDy=0;exitPerimeter=0;" parent="1" source="4" target="2" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1172.0020013439962" y="715.6787482527957" as="sourcePoint"/><mxPoint x="1172.0020013439962" y="685.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="21" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;exitX=0.171;exitY=0.827;exitDx=0;exitDy=0;exitPerimeter=0;" parent="1" source="9" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1232.0020013439962" y="699.6787482527956" as="sourcePoint"/><mxPoint x="1171.48" y="742.6781239070156" as="targetPoint"/></mxGeometry></mxCell><mxCell id="23" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=-230;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1292.6099438326012" y="764.6792371569275" width="27" height="14" as="geometry"/></mxCell><mxCell id="26" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;flipH=1;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1351.1100000000001" y="631.0781239070154" width="20" height="165.92" as="geometry"/></mxCell><mxCell id="27" value="Forward stage" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;rotation=90;" parent="1" vertex="1"><mxGeometry x="1332.9" y="704.0381239070153" width="90" height="20" as="geometry"/></mxCell><mxCell id="31" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=50;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1095" y="762.6781239070154" width="27" height="14" as="geometry"/></mxCell><mxCell id="32" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#E1D5E7;" parent="1" vertex="1"><mxGeometry x="1122" y="780.6781239070154" width="40" height="14" as="geometry"/></mxCell><mxCell id="33" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#E1D5E7;" parent="1" vertex="1"><mxGeometry x="1252" y="780.6781239070154" width="40" height="14" as="geometry"/></mxCell><mxCell id="34" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#E1D5E7;" parent="1" vertex="1"><mxGeometry x="1124" y="631.0781239070154" width="40" height="14" as="geometry"/></mxCell><mxCell id="35" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#E1D5E7;" parent="1" vertex="1"><mxGeometry x="1253.1100000000001" y="631.0781239070154" width="40" height="14" as="geometry"/></mxCell><mxCell id="36" value="" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;size=10;" parent="1" vertex="1"><mxGeometry x="1080" y="494" width="40" height="28.92" as="geometry"/></mxCell><mxCell id="40" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#97D077;rotation=55;" parent="1" vertex="1"><mxGeometry x="1073" y="515.58" width="15" height="7.34" as="geometry"/></mxCell><mxCell id="41" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;flipH=1;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1348.9" y="479.9981239070154" width="20" height="133" as="geometry"/></mxCell><mxCell id="42" value="Aggregated Grad Update" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;rotation=90;" parent="1" vertex="1"><mxGeometry x="1298.9" y="538.9181239070153" width="150" height="20" as="geometry"/></mxCell><mxCell id="43" value="" style="endArrow=classic;html=1;fillColor=#B266FF;dashed=1;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1135.5" y="611" as="sourcePoint"/><mxPoint x="1135.5" y="591.22" as="targetPoint"/></mxGeometry></mxCell><mxCell id="45" value="" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;size=10;" parent="1" vertex="1"><mxGeometry x="1150" y="494" width="40" height="28.92" as="geometry"/></mxCell><mxCell id="46" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#97D077;rotation=305;" parent="1" vertex="1"><mxGeometry x="1182" y="515.33" width="15" height="7.34" as="geometry"/></mxCell><mxCell id="47" value="" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;size=10;" parent="1" vertex="1"><mxGeometry x="1080" y="552.5" width="40" height="28.92" as="geometry"/></mxCell><mxCell id="49" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#97D077;rotation=55;" parent="1" vertex="1"><mxGeometry x="1073" y="574.08" width="15" height="7.34" as="geometry"/></mxCell><mxCell id="51" value="" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;size=10;" parent="1" vertex="1"><mxGeometry x="1150" y="552.5" width="40" height="28.92" as="geometry"/></mxCell><mxCell id="52" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#97D077;rotation=305;" parent="1" vertex="1"><mxGeometry x="1182" y="573.83" width="15" height="7.34" as="geometry"/></mxCell><mxCell id="55" value="" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;size=10;" parent="1" vertex="1"><mxGeometry x="1225" y="494" width="40" height="28.92" as="geometry"/></mxCell><mxCell id="56" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1241.99503231978" y="552.9162478140306" as="sourcePoint"/><mxPoint x="1241.99503231978" y="522.9162478140306" as="targetPoint"/></mxGeometry></mxCell><mxCell id="57" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" source="55" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1321.99503231978" y="522.9162478140308" as="sourcePoint"/><mxPoint x="1295" y="508" as="targetPoint"/><Array as="points"/></mxGeometry></mxCell><mxCell id="58" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=0;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1248.5" y="530.5" width="6.5" height="14" as="geometry"/></mxCell><mxCell id="59" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=90;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1277.5" y="511" width="6.5" height="14" as="geometry"/></mxCell><mxCell id="60" value="" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;size=10;" parent="1" vertex="1"><mxGeometry x="1295" y="494" width="40" height="28.92" as="geometry"/></mxCell><mxCell id="61" value="" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;size=10;" parent="1" vertex="1"><mxGeometry x="1225" y="552.5" width="40" height="28.92" as="geometry"/></mxCell><mxCell id="62" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" source="61" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1321.99503231978" y="581.4162478140308" as="sourcePoint"/><mxPoint x="1295" y="566.5" as="targetPoint"/><Array as="points"/></mxGeometry></mxCell><mxCell id="64" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=90;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1277.5" y="550" width="6.5" height="14" as="geometry"/></mxCell><mxCell id="65" value="" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;size=10;" parent="1" vertex="1"><mxGeometry x="1295" y="552.5" width="40" height="28.92" as="geometry"/></mxCell><mxCell id="67" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1314.9050323197798" y="552.9162478140306" as="sourcePoint"/><mxPoint x="1314.9050323197798" y="522.9162478140306" as="targetPoint"/></mxGeometry></mxCell><mxCell id="68" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=0;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1300" y="531.46" width="6.5" height="14" as="geometry"/></mxCell><mxCell id="72" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1265" y="520.0900000000001" as="sourcePoint"/><mxPoint x="1295" y="550.9000000000001" as="targetPoint"/><Array as="points"/></mxGeometry></mxCell><mxCell id="73" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1266" y="550.9000000000001" as="sourcePoint"/><mxPoint x="1295" y="520.1600000000001" as="targetPoint"/><Array as="points"/></mxGeometry></mxCell><mxCell id="74" value="" style="endArrow=classic;html=1;fillColor=#B266FF;dashed=1;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1280.5" y="610" as="sourcePoint"/><mxPoint x="1280.5" y="590.22" as="targetPoint"/></mxGeometry></mxCell><mxCell id="75" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1137.0020013439962" y="728.6787482527957" as="sourcePoint"/><mxPoint x="1137.0020013439962" y="698.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="76" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1177.0020013439962" y="673.6787482527957" as="sourcePoint"/><mxPoint x="1237.0020013439962" y="673.6793725985758" as="targetPoint"/><Array as="points"/></mxGeometry></mxCell><mxCell id="77" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;entryX=0.052;entryY=0.61;entryDx=0;entryDy=0;entryPerimeter=0;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1174.5020013439962" y="758.6787482527957" as="sourcePoint"/><mxPoint x="1240.642001343996" y="759.1787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="78" value="" style="endArrow=classic;startArrow=classic;html=1;jumpSize=12;strokeWidth=2;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1271.7620013439964" y="728.6787482527957" as="sourcePoint"/><mxPoint x="1271.7620013439964" y="698.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="79" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=50;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1095" y="684.6781239070154" width="27" height="14" as="geometry"/></mxCell><mxCell id="80" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=-230;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1291.9999438326013" y="684.6792371569275" width="27" height="14" as="geometry"/></mxCell><mxCell id="81" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" target="45" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1124" y="508.5" as="sourcePoint"/><mxPoint x="1174" y="458.5" as="targetPoint"/></mxGeometry></mxCell><mxCell id="82" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1169" y="525" as="sourcePoint"/><mxPoint x="1169" y="550" as="targetPoint"/></mxGeometry></mxCell><mxCell id="83" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" parent="1" target="47" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1147" y="567" as="sourcePoint"/><mxPoint x="1147" y="592" as="targetPoint"/></mxGeometry></mxCell><mxCell id="84" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1100" y="549" as="sourcePoint"/><mxPoint x="1100" y="526.92" as="targetPoint"/></mxGeometry></mxCell><mxCell id="85" value="PS" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="759.5020013439962" y="655.1787482527956" width="70" height="50" as="geometry"/></mxCell><mxCell id="86" value="WORKER" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="720.5020013439962" y="736.1787482527957" width="70" height="50" as="geometry"/></mxCell><mxCell id="87" value="WORKER" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="925.5020013439962" y="735.6787482527957" width="70" height="50" as="geometry"/></mxCell><mxCell id="88" value="PS" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="889.5020013439962" y="655.1793725985758" width="70" height="50" as="geometry"/></mxCell><mxCell id="89" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=-120;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="911.5020013439962" y="770.6787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="90" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=120;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="774.0020013439962" y="772.1787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="95" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=-230;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="981.1099438326012" y="771.6792371569275" width="27" height="14" as="geometry"/></mxCell><mxCell id="96" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;flipH=1;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1007.6100000000001" y="637.5781239070154" width="20" height="165.92" as="geometry"/></mxCell><mxCell id="97" value="Forward stage" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;rotation=90;" parent="1" vertex="1"><mxGeometry x="989.4000000000001" y="710.5381239070153" width="90" height="20" as="geometry"/></mxCell><mxCell id="98" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=50;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="708.5" y="770.1781239070154" width="27" height="14" as="geometry"/></mxCell><mxCell id="99" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#E1D5E7;" parent="1" vertex="1"><mxGeometry x="735.5" y="788.1781239070154" width="40" height="14" as="geometry"/></mxCell><mxCell id="100" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#E1D5E7;" parent="1" vertex="1"><mxGeometry x="940.5" y="787.6781239070154" width="40" height="14" as="geometry"/></mxCell><mxCell id="105" value="" style="shape=curlyBracket;whiteSpace=wrap;html=1;rounded=1;flipH=1;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1005.4000000000001" y="486.4981239070154" width="20" height="133" as="geometry"/></mxCell><mxCell id="106" value="Batch Grad Update" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;rotation=90;" parent="1" vertex="1"><mxGeometry x="970.4000000000001" y="545.4181239070153" width="120" height="20" as="geometry"/></mxCell><mxCell id="141" value="WORKER" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="824.5020013439962" y="735.1787482527957" width="70" height="50" as="geometry"/></mxCell><mxCell id="142" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=120;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="878.0020013439962" y="771.1787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="143" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=50;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="812.5" y="769.1781239070154" width="27" height="14" as="geometry"/></mxCell><mxCell id="144" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#E1D5E7;" parent="1" vertex="1"><mxGeometry x="839.5" y="787.1781239070154" width="40" height="14" as="geometry"/></mxCell><mxCell id="151" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" parent="1" source="85" target="86" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="1103" y="595.5" as="sourcePoint"/><mxPoint x="1129" y="595.46" as="targetPoint"/></mxGeometry></mxCell><mxCell id="152" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" parent="1" source="85" target="141" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="804.5020013439962" y="715.1787482527957" as="sourcePoint"/><mxPoint x="765.5020013439962" y="746.1787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="153" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" parent="1" source="88" target="87" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="804.5020013439962" y="715.1787482527957" as="sourcePoint"/><mxPoint x="869.5020013439962" y="745.1787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="154" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" parent="1" source="85" target="87" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="934.5020013439962" y="715.1793725985758" as="sourcePoint"/><mxPoint x="970.5020013439962" y="745.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="155" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" parent="1" source="88" target="141" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="944.5020013439962" y="725.1793725985758" as="sourcePoint"/><mxPoint x="980.5020013439962" y="755.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="156" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" parent="1" source="88" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="954.5020013439962" y="735.1793725985758" as="sourcePoint"/><mxPoint x="756" y="736" as="targetPoint"/></mxGeometry></mxCell><mxCell id="159" value="PS" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="760.5020013439962" y="479.1787482527956" width="70" height="50" as="geometry"/></mxCell><mxCell id="160" value="WORKER" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="721.5020013439962" y="560.1787482527957" width="70" height="50" as="geometry"/></mxCell><mxCell id="161" value="WORKER" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="926.5020013439962" y="559.6787482527957" width="70" height="50" as="geometry"/></mxCell><mxCell id="162" value="PS" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="890.5020013439962" y="479.1793725985758" width="70" height="50" as="geometry"/></mxCell><mxCell id="163" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=-120;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="912.5020013439962" y="594.6787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="164" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=120;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="775.0020013439962" y="596.1787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="165" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=-230;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="982.1099438326012" y="595.6792371569275" width="27" height="14" as="geometry"/></mxCell><mxCell id="166" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=50;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="709.5" y="594.1781239070154" width="27" height="14" as="geometry"/></mxCell><mxCell id="169" value="WORKER" style="shape=hexagon;perimeter=hexagonPerimeter2;whiteSpace=wrap;html=1;fixedSize=1;" parent="1" vertex="1"><mxGeometry x="825.5020013439962" y="559.1787482527957" width="70" height="50" as="geometry"/></mxCell><mxCell id="170" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=120;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="879.0020013439962" y="595.1787482527957" width="30" height="14" as="geometry"/></mxCell><mxCell id="171" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=50;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="813.5" y="593.1781239070154" width="27" height="14" as="geometry"/></mxCell><mxCell id="179" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=1;entryDx=0;entryDy=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="160" target="159" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="804.5020013439962" y="715.1787482527957" as="sourcePoint"/><mxPoint x="970.5020013439962" y="745.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="180" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=1;entryDx=0;entryDy=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="169" target="159" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="814.5020013439962" y="725.1787482527957" as="sourcePoint"/><mxPoint x="980.5020013439962" y="755.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="181" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=1;entryDx=0;entryDy=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="161" target="159" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="824.5020013439962" y="735.1787482527957" as="sourcePoint"/><mxPoint x="990.5020013439962" y="765.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="182" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" target="162" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="760" y="560" as="sourcePoint"/><mxPoint x="1000.5020013439962" y="775.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="184" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=1;entryDx=0;entryDy=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="169" target="162" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="854.5020013439962" y="765.1787482527957" as="sourcePoint"/><mxPoint x="1020.5020013439962" y="795.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="185" value="" style="endArrow=classic;html=1;strokeWidth=2;endSize=6;jumpSize=12;entryX=0.5;entryY=1;entryDx=0;entryDy=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" parent="1" source="161" target="162" edge="1"><mxGeometry width="50" height="50" relative="1" as="geometry"><mxPoint x="864.5020013439962" y="775.1787482527957" as="sourcePoint"/><mxPoint x="1030.5020013439962" y="805.6787482527957" as="targetPoint"/></mxGeometry></mxCell><mxCell id="187" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=-180;fillColor=#97D077;" parent="1" vertex="1"><mxGeometry x="1437.25" y="494" width="25.5" height="14" as="geometry"/></mxCell><mxCell id="189" value="" style="rounded=1;whiteSpace=wrap;html=1;rotation=0;fillColor=#FFFF99;" parent="1" vertex="1"><mxGeometry x="1435.0020013439962" y="619.4987482527956" width="30" height="14" as="geometry"/></mxCell><mxCell id="190" value="" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#E1D5E7;" parent="1" vertex="1"><mxGeometry x="1433.5" y="738.6800000000001" width="31.5" height="14" as="geometry"/></mxCell><mxCell id="194" value="&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;dense 变量&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;同步ring all reduce&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;每台机器一个备份&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;异步分片放到多个worker&lt;/font&gt;&lt;/div&gt;" style="text;whiteSpace=wrap;html=1;" parent="1" vertex="1"><mxGeometry x="1390" y="522.92" width="120" height="77.5" as="geometry"/></mxCell><mxCell id="195" value="&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;sparse 变量&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;同步all2all&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;每台机器按行分片&lt;/font&gt;&lt;/div&gt;&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;异步分片到多个worker&lt;/font&gt;&lt;/div&gt;" style="text;whiteSpace=wrap;html=1;" parent="1" vertex="1"><mxGeometry x="1389.25" y="641.43" width="120" height="77.5" as="geometry"/></mxCell><mxCell id="197" value="&lt;div style=&quot;text-align: center&quot;&gt;&lt;font face=&quot;helvetica&quot;&gt;数据&lt;/font&gt;&lt;/div&gt;" style="text;whiteSpace=wrap;html=1;" parent="1" vertex="1"><mxGeometry x="1433.5" y="762.68" width="120" height="77.5" as="geometry"/></mxCell></root></mxGraphModel></diagram></mxfile>
2205.11048/main_diagram/main_diagram.pdf ADDED
Binary file (32.6 kB). View file
 
2205.11048/paper_text/intro_method.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Nowadays, recommendation models with a large volume of parameters and high computational complexity have become the mainstream in the deep learning communities [@hron2021component]. Accelerating the training of these recommendation models is a trending issue, and recently synchronous training upon high-performance computing (HPC) has dominated the training speed records [@kim2019parallax; @jiang2020unified; @zhang2022picasso]. The resource requirements of the synchronous training upon AR are more rigorous than the asynchronous training upon PS [@acun2021understanding]. In a shared training cluster with dynamic status [@chen2021fangorn], the synchronous training would be retarded by a few straggling workers. Thus, its training speed may be even much slower than the high-concurrency asynchronous training.
4
+
5
+ Should it be possible to switch the training mode according to the cluster status, we will have access to making full use of the limited hardware resources. Switching the training mode for a specific model usually demands tuning of the hyper-parameters for guarantees of accuracy. Re-tuning the hyper-parameters is common in the one-shot training workloads (e.g., the general CV or NLP workloads) [@li2020system]. However, it is not applicable for the continual learning or the lifelong training of the recommendation models [@guo2020improved], as tuning would be highly time- and resource-consuming. When switching the training mode of representative recommendation models, we confront three main challenges from our shared cluster: 1) Model accuracy may suffer from a sudden drop after switching, requiring the model to be retrained on a large amount of data to reach the comparable accuracy before switching; 2) The distribution of gradient values is different between synchronous training and asynchronous training, making the models under two training modes difficult to reach the same accuracy by tuning the hyper-parameters; 3) The cluster status imposes staleness on the asynchronous training, and staleness negatively impacts the aggregation of gradients, especially for the dense parameters.
6
+
7
+ We conduct a systematic investigation of the training workloads of recommendation models to tackle the above challenges. It is found that when the global batch size (i.e., the actual batch size of gradient aggregation) is the same, the distribution of gradient values of asynchronous training tends to be similar to that of synchronous training. Besides, we notice that due to the high sparsity, the embedding parameters in recommendation models are less frequently updated than the dense parameters, leading to a stronger tolerance for staleness than the general CV or NLP deep learning models. Based on these insights, we propose Global Batch gradients Aggregation (GBA), which ensures the model keeps the same global batch size when switched between the synchronous and asynchronous training. GBA is implemented by a token-control mechanism, which resorts to bounding the staleness and making gradient aggregation [@ho2013more]. The mechanism suppresses the staleness following a staleness decay strategy over the token index. The faster nodes would take more tokens without waiting, and thereby GBA trains as fast as the asynchronous mode. Furthermore, the convergence analysis shows that GBA has comparable convergence properties with the synchronous mode, even under high staleness for recommendation models. We conduct an extensive evaluation on three continual learning of recommendation tasks. The results reveal that GBA performs well on both accuracy and efficiency with the same hyper-parameters. Particularly, GBA improves the AUC metric by 0.2% on average compared to the state-of-the-art training modes of asynchronous training. Besides, GBA presents at least 2.4x speedup over the synchronous AR training in the cluster with strained hardware resources.
8
+
9
+ To the best of our knowledge, this is the first work to approach switching between synchronous and asynchronous training without tuning the hyper-parameters. GBA has been deployed in our shared training cluster. The tuning-free switching enables our users to dynamically change the training modes between GBA and the synchronous HPC training for the continual learning tasks. The overall training efficiency of these training workloads is thereby significantly improved, and the hardware utilization within the cluster is also raised by a large margin.
10
+
11
+ # Method
12
+
13
+ Recommendation models usually comprise two modules: the *sparse* module contains the embedding layers with the *embedding parameters*, mapping the categorical IDs into numerical space; the *dense* module contains the computational blocks with the *dense parameters*, such as attention and MLP [@cheng2016wide; @zhou2018deep], to exploit the feature interactions. The main difference between the two kinds of parameters is the occurrence ratio in each training batch. Each training batch needs all the dense parameters, yet only a tiny amount of embedding parameters are required according to the feature IDs in the data shard. The latest development of recommendation models introduces high complexity and a large volume of parameters, making distributed training essential to improve training efficiency. The synchronous HPC training mode usually adopts the AR architecture, where the dense parameters are replicated, and the embedding parameters are partitioned on each worker. HPC should be deployed by monopolizing a few high-performance workers and making full use of the associated resources, which may be retarded by the slow workers [@kumar2014fugue]. PS architecture is usually coupled with asynchronous high concurrency training where the parameters are placed on PSs, and the workers are responsible for the computation. On the one hand, the high concurrency mechanism activates the fragmentary resources in the training cluster by deploying hundreds of workers. On the other hand, the asynchronous training brings in *gradient staleness*, which occurs when the gradient is calculated based on the parameters of an old version and applied to the parameters of a new version.
14
+
15
+ <figure id="fig:dense_dist" data-latex-placement="tp">
16
+ <div class="minipage">
17
+ <embed src="status_eff.pdf" />
18
+ </div>
19
+ <div class="minipage">
20
+ <embed src="criteo_train.pdf" />
21
+ </div>
22
+ <figcaption>The AUC on the validation set of Criteo-4GB and Private by every 5% progress during training, switching at 50% progress.</figcaption>
23
+ </figure>
24
+
25
+ <figure id="fig:id_dist" data-latex-placement="tp">
26
+ <div class="minipage">
27
+ <embed src="batch_grad.pdf" />
28
+ </div>
29
+ <div class="minipage">
30
+ <embed src="id_distribution.pdf" />
31
+ </div>
32
+ <figcaption>The skewed distribution of ID occurrences across batches, reflecting the frequency that an ID gets updated.</figcaption>
33
+ </figure>
34
+
35
+ We investigate the training workloads of recommendation models from a shared training cluster to observe the obstacles and necessities of switching training modes.
36
+
37
+ **Observation 1: Cluster status determines the performance of training modes.** Figure [\[fig:status_eff\]](#fig:status_eff){reference-type="ref" reference="fig:status_eff"} illustrates the average CPU utilization within a real shared cluster, and the corresponding samples/queries per second (QPS) of a YouTubeDNN [@covington2016deep] model by the synchronous and asynchronous training mode. The utilization and QPS are normalized by their maximal value, respectively. When the cluster is relatively vacant, models trained in the synchronous mode can fully occupy the hardware resources, satisfying HPC conditions and presenting high efficiency. When there are plenty of heterogeneous workloads in the cluster, slow workers dominate the training speed, making the asynchronous training mode run much faster than the synchronous mode. We also implement two approaches of local all-reduce[^3]. Since the status of each device in the cluster is constantly changing, the local all-reduce-based mode would not work well when confronting resource shortages.
38
+
39
+ **Observation 2: Directly switching training mode brings sudden drop on accuracy.** We run DeepFM [@lian2018xdeepfm] over Criteo-4GB [@criteo2014kaggle] (few parameters, fast convergence) and YouTubeDNN on Private dataset (trillions of parameters, slow convergence) in the shared cluster. We tune the hyper-parameters from scratch for the best model accuracy of both asynchronous and synchronous mode, and denote the two sets of hyper-parameters as set $\mathbb{A}$ and set $\mathbb{S}$, respectively. After training in one training mode, we evaluate the tendency of the training AUC after switching to the other training mode with set $\mathbb{A}$ or set $\mathbb{S}$. Figure [1](#fig:dense_dist){reference-type="ref" reference="fig:dense_dist"} illustrates that after switching from the synchronous mode to the asynchronous mode, the AUC encounters sudden drop and even decreases to 0.5. The AUC drop also appears in the opposite-side switching, indicating that this condition is irrelevant to whether the model had been converged. These observations imply that directly switching the training mode requires heavy effort in re-tuning the hyper-parameter. Inherently, training modes would lead to different convergence or minima owing to the difference in batch size, learning rate and many other factors, which have already received in-depth theoretical research [@barkai2019gap; @keskar2016large]. We provide theoretical analysis to explain the sudden drop in Appendix D.
40
+
41
+ We then probe into the insights from asynchronously training recommendation models.
42
+
43
+ **Insight 1: Distribution of the gradient values is related to the aggregated batch size.** We attempt to investigate the reason for observation 2 from the gradient aspect. We implement asynchronous bulk synchronous parallel (BSP) on the YouTubeDNN recommendation task, which asynchronously aggregates $K$ gradients from workers before applying the values to the parameters. Here, we set $K$ to 100, the same as the number of workers. Besides, we compare the synchronous training in 6.4K local batch size (64 workers). Figure [\[fig:grad_dist\]](#fig:grad_dist){reference-type="ref" reference="fig:grad_dist"} plots the distribution of the L2-norm of gradient values from the synchronous training and BSP with various local batch sizes. It is evident that the batch size determines the mean and variance of the distribution. The distribution of BSP resembles synchronous training when the aggregation size is similar (i.e., BSP-4K). The result suggests that the same aggregation size could lead to a similar distribution of gradient values. However, there is still a gap in model accuracy after equalizing the global batch size between the asynchronous training and the synchronous training, mainly induced by gradient staleness.
44
+
45
+ **Insight 2: The gradient staleness imposes different impact on the embedding parameters and the dense parameters.** Due to the skewed distribution, most IDs would merely appear in a small number of batches, as depicted in Fig. [2](#fig:id_dist){reference-type="ref" reference="fig:id_dist"}. It means that in the recommendation models, only a tiny portion of IDs would be involved in every single batch, and the embedding parameters are less frequently updated than the dense parameters. Therefore, the embedding parameters tend to be more robust on the gradient staleness than the dense parameters (for example, considering a worker in training, there could be five updates for the dense parameters, yet only two updates for the embedding of a specific ID).
46
+
47
+ Switching the distributed training mode for recommendation models should get rid of tuning the hyper-parameters. We introduce the concept of *global batch size*, which is defined as the actual batch size when gradients are aggregated and applied, and propose GBA for the tuning-free switching. We denote the *local batch size*, i.e., the actual batch size on each worker and the number of workers, as $B_s$ and $N_s$ for the synchronous training, $B_a$ and $N_a$ for the asynchronous training. Then, the global batch size in synchronous training, denoted by $G_s$, can be calculated as $B_s\times N_s$. Following Insight 1, GBA remains the global batch size unchanged when we switch the distributed training model from synchronous training to asynchronous training. For each step, all the dense parameters would be updated, and only a small number of embedding parameters would be updated. Then the dense parameters and embedding parameters obtain different gradient staleness during training. Hence, we define the *data staleness* as the unified staleness in training recommendation models. The data staleness describes the gap between the global step when the worker begins to ingest a data batch and the global step when the calculated gradient is applied. Obviously, the data staleness in the synchronous training mode is constantly zero. Based on data staleness, we implement GBA by a token-control mechanism on the PS architecture to cope with the sparsity and the dynamic cluster status.
48
+
49
+ <figure id="token" data-latex-placement="tp">
50
+ <embed src="token_v4.pdf" />
51
+ <figcaption>Illustration of the token-control mechanism in GBA: every <span class="math inline"><em>M</em></span> gradients would be aggregated in the buffer before the PSs apply them to the parameters; workers report gradients to the PSs along with a token indicating the degree of data staleness.</figcaption>
52
+ </figure>
53
+
54
+ Figure [3](#token){reference-type="ref" reference="token"} illustrates the architecture of the proposed token-control mechanism. Over the canonical PS, we prepare a queue called *data list* to arrange the data (addresses) by batches. Given a dataset $\mathcal{D}$, suppose we can split it into $Q$ batches of size $B_a$, denoted by $\mathcal{D}=(\mathbf{d}_0, \mathbf{d}_1, \dots, \mathbf{d}_{Q-1})$. Meanwhile, we establish another queue called *token list* to yield the token of each individual batch. The token list contains $Q$ tokens, denoted by $(t_0, t_1, \dots, t_{Q-1})$, each attached to one batch in the data list to indicate the global step when this batch is sent to a worker. The token value starts from zero, and each token value repeats $M$ times in the token list. Here, $M$ is the number of batches we use to aggregate gradients. Under this setting, we can deduce that there will be $K=\lceil \frac{Q}{M} \rceil$ gradient updates during the training. Then we set $t_i=\lfloor \frac{i}{K} \rfloor,\forall i\in\{0,1,\ldots,Q-1\}$ to ensure that the token list yields the token value in ascending order. Apart from the two queues, we also employ a *gradient buffer* to receive the gradients calculated by the workers with the corresponding tokens of the gradients. To be consistent with the tokens, the capacity of the gradient buffer is set to $M$, and therefore the PSs would aggregate $M$ gradients before applying them to the variables. Note that each PS maintains an individual gradient buffer to deal with the gradients of the corresponding partitions of the variables.
55
+
56
+ During the training process, a worker would pull the parameters from PS, a batch from the data list, and a token from the token list simultaneously before ingesting the data and computing the gradient locally. When a worker completes calculating the gradient of a batch, the gradient and the corresponding token are sent to the gradient buffer on PS. Then, the worker immediately proceeds to work on the next batch. In this way, the fast workers can keep working without waiting for the slow ones. When the gradient buffer reaches the capacity of $M$ pairs of gradients and tokens, all the gradients are aggregated to apply once, and at the same time, the buffer will be cleared. This is what we call finishing a global step, and thereby the global batch size in GBA can be calculated as $G_a = B_a\times M$. According to the design, we aim to keep the global batch size consistent in switching, that is, $G_s=G_a$. Hence, we can set the size of the gradient buffer to be $M=\frac{B_s\times N_s}{B_a}$. We would use $M$ workers in GBA, i.e., $N_a=M$, to avoid the intrinsic gradient staleness led by the inconsistency between the number of workers and the number of batches to aggregate.
57
+
58
+ At the update of global step $k$, denote $\tau(m,k)$ the $m$-th token in the gradient buffer. When we aggregate the gradients in the gradient buffer, we shall decay the gradients that suffer from severe staleness. GBA could employ different staleness decay strategies to mitigate the negative impact from the staleness according to the token index, and in this work we define it as: $$\begin{equation}
59
+ \label{eqn:tolerance_func}
60
+ f(\tau(m,k), k)=\left\{
61
+ \begin{aligned}
62
+ 0 & , & k - \tau(m,k) > \iota\\
63
+ 1 & , & k - \tau(m,k) \leq \iota,
64
+ \end{aligned}
65
+ \right.
66
+ \end{equation}$$ where $\iota$ is the threshold of tolerance. If $f(\tau(m,k), k)=0$, we exclude the $m$-th gradient in the buffer due to the severe staleness; otherwise, we aggregate the gradient. As we can see, tokens help identify whether the corresponding gradients are stale and how many stale steps are behind the current global step. In this case, although the token is designed over the data staleness, the negative impact from the canonical gradient staleness can also be mitigated.
67
+
68
+ We have seen much research on the convergence analysis of the synchronous and asynchronous training. Following the assumptions and convergence analysis in @dutta2018slow, the expectation of error after $k$ steps of gradient updates in the synchronous training can be deduced by: $$\begin{equation}
69
+ \label{eq:sync}
70
+ \begin{split}
71
+ &\mathbb{E}[F(\mathbf{w}_k)]-F^*
72
+ \leq
73
+ \frac{\eta L \sigma^2}{2cN_s B_{s}}+
74
+ (1-\eta c)^k (F(\mathbf{w}_0)-F^*-\frac{\eta L \sigma^2}{2cN_s B_{s}}),
75
+ \end{split}
76
+ \end{equation}$$ where $\mathbf{w}_k$ denotes the parameter in step $k$, $\eta$ denotes learning rate, $L$ is the Lipschitz constant and $\sigma$ denotes the variance of gradients. $F(\mathbf{w})$ is the empirical risk function that is strongly convex with parameter $c$. $\mathbb{E}[F(\mathbf{w}_k)]-F^*$ is the expected gap of the risk function from its optimal value, used as the error after $k$ steps. As mentioned in Eqn. ([\[eq:sync\]](#eq:sync){reference-type="ref" reference="eq:sync"}), The first term in the right, i.e. $\frac{\eta L \sigma^2}{2c(N_s B_{s})}$, would be the error floor, and $(1-\eta c)$ is the decay rate. The proposed GBA is derived upon the asynchronous gradient aggregation. We assume that, for some $\gamma\leq 1$, $$\begin{equation}
77
+ \label{eq:gamma}
78
+ \gamma \geq \frac{\zeta E[||\nabla F(\mathbf{w}_{k})-\nabla F(\mathbf{w}_{\tau(m, k)})||_2^{2}]}{E[||\nabla F(\mathbf{w}_{k})||_2^{2}]}.
79
+ \end{equation}$$ Here, $\gamma$ is a measure of gradients impact induced by the staleness; smaller value of $\gamma$ indicates that staleness makes less accuracy deterioration of the model. Besides, $\zeta$ indicates the average probability that any parameter in the model would be both updated in step $k$ and step $\tau(m,k)$. Intuitively, $\zeta$ would be far below $1$ in the recommendation models due to the strong sparsity. Then, the error of GBA after $k$ steps of aggregated updates would become (Appendix A presents the proof): $$\begin{equation}
80
+ \label{eq:async}
81
+ \begin{split}
82
+ &\mathbb{E}[F(\mathbf{w}_k)]-F^*
83
+ \leq
84
+ \frac{\eta L \sigma^2}{2c\gamma'MB_{a}}+
85
+ (1-\eta \gamma' c)^k (\mathbb{E}[F(\mathbf{w}_0)]-F^*-\frac{\eta L \sigma^2}{2c \gamma'MB_{a}}),
86
+ \end{split}
87
+ \end{equation}$$ where $\gamma' = 1-\gamma+\frac{p_{0}}{2}$ and $p_0$ is a lower bound on the conditional probability that the token equals to the global step, i.e., $\tau(m,k)=k$. Equation ([\[eq:async\]](#eq:async){reference-type="ref" reference="eq:async"}) proves the convergence of GBA. Considering the error floors of Eqn. ([\[eq:sync\]](#eq:sync){reference-type="ref" reference="eq:sync"}) and Eqn. ([\[eq:async\]](#eq:async){reference-type="ref" reference="eq:async"}), $M\times B_a$ should be set close to $N_s\times B_s$ to make GBA tuning-free. It is exactly the global batch size we use in GBA, consistent with our main idea of keeping global batch size unchanged. Recall that with the embedding parameters, $\zeta<1$ makes $\gamma$ lower than the training of general CV or NLP models. Consequently, the error floor remains low in GBA.
2205.15730/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2205.15730/main_diagram/main_diagram.pdf ADDED
Binary file (49.8 kB). View file
 
2205.15730/paper_text/intro_method.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Multi-object tracking (MOT) is an important chain link in the perception, prediction and planning pipeline of an autonomous or automated vehicle. The task comprises keeping track of all relevant surrounding objects over time, where every object should have a unique identity that remains unchanged in the time frame of interest. On a frame-by-frame basis, each track can have four different states. *Initialization:* The object is seen for the first time and its track starts. *Continuation:* The same objects is visible in the following measurement(s), each time appending its new position to the track. *Occlusion:* An object can be occluded for a certain time and possibly reappear in the measurements afterwards. *Completion:* It is seen for the last time at some point, concluding its track. Since a multi-object-tracking module provides the current and past state of the surrounding agents of a vehicle, it is an essential component for the subsequent modules of the driving pipeline such as prediction of the agents' future movement, which, in turn, is essential for planning the behaviour of the ego vehicle.
4
+
5
+ There exist multiple fundamentally different approaches to tracking. Classically, the task has been addressed with Bayesian approaches, such as joint probabilistic data association filters [@bar2011tracking] and random finite set based filters [@mahler2007statistical], which are especially suited for measurements with known noise characteristics. With the emergence of reliable deep-learning based object detectors, less computation-intensive Bayesian approaches are widely used for 3D MOT [@weng_3d_2019; @chiu_probabilistic_2020]: In [@weng_3d_2019], the authors found that their plain Kalman-filter based approach could achieve state-of-the art results on the nuScenes dataset [@caesar_nuscenes:_2020], powered by the output of a reliable object detector, and it outperformed more complicated tracking methods such as [@zhang_robust_2019]. The aforementioned tracking approaches can be described as *tracking-by-detection*, which means that tracking is regarded as a capsuled problem, i.e. detections serve as input and tracks are output, commonly on a frame-by-frame basis. Each detection is described by a set of parameters, such as location, extent, and orientation. Approaches that belong to this group can function without access to raw measured data and can be very fast. However, the trackers are restricted to their low-dimensional input, which does not include higher-dimensional features that the detector inferred from the input data. This additional information, however, has the potential to provide useful insights, e.g. for recognition of an object based on its appearance. To mitigate this drawback, some workarounds were proposed, such as utilizing re-identification features [@chiu_probabilistic_2020-1].
6
+
7
+ <figure id="figTrSmall" data-latex-placement="t">
8
+ <embed src="figures/tracker_small2.pdf" />
9
+ <figcaption>Illustration of one tracking step. A main component is our novel ego-motion compensation (EMC) module that transforms the tracker state in feature space. Refer to Fig. <a href="#fig_tracker_large" data-reference-type="ref" data-reference="fig_tracker_large">2</a> for a full model overview, as some details are not pictured in this graphic, which is implied by dashed lines.</figcaption>
10
+ </figure>
11
+
12
+ In this work, we propose a model, which is illustrated in Figure [1](#figTrSmall){reference-type="ref" reference="figTrSmall"}, that performs tracking and detection jointly to make use of said latent space that an object detector has. This paradigm, *tracking-and-detection*, has recently become more popular, especially in the area of image-based 2D tracking, as its potential became apparent [@meinhardt_trackformer:_2021]. Some existing approaches on lidar data take a stack of frames as input, e.g. the past $N$ frames, and some perform different tasks jointly, such as detection, tracking and prediction [@luo_fast_2018]. In our model on the other hand, a sequence is processed on a frame-by-frame basis, avoiding overlap that occurs with stacked input frames.
13
+
14
+ In 2017, transformers were proposed as a novel machine learning model based on dot-product attention [@vaswani_attention_2017], which revolutionized the field of natural language processing. Besides this, the transformer showed promising results in vision tasks [@dosovitskiy_image_2020], such as object detection [@{carion_end--end_2020}] and tracking [@meinhardt_trackformer:_2021] on the image domain and shape classification on lidar data [@engel_point_2020].
15
+
16
+ Track management requires a global overview of all tracks and object candidates. This makes a transformer a natural fit for this task since it can employ self-attention between the tracklets. However, image data and lidar data behave fundamentally different when it comes to detection and tracking, therefore it is non-trivial to apply existing approaches in the lidar domain. We develop a model that performs both detection and tracking on point cloud data, such as lidar, based on a transformer.
17
+
18
+ One peculiarity of lidar data in an automotive setting is that the ego vehicle can travel significant distances between two frames, making an ego-motion compensation (EMC) necessary. Because the tracker state is encoded implicitly in feature space in our proposed model, we develop a novel trainable EMC module to perform the compensation in this space. The module can ego-motion correct the tracker state to any point in time, enabling us to be robust against frame skips and delayed sensor input, which is a common occurrence in practice [@shin_roarnet_2019].
19
+
20
+ To summarize, the main contributions of this work are:
21
+
22
+ - A transformer-based model for 3D detection is presented that is applicable to point clouds, particularly lidar data in an automotive application, and that serves as a building block for tracking in latent space.
23
+
24
+ - We develop a novel ego-motion compensation (EMC) module that operates in feature space and can be used to compensate skipped frames.
25
+
26
+ - We propose a novel tracking-and-detection model based on a transformer that can accept new input frames at arbitrary time steps. It is modular and therefore suitable for further enhancements such as multi-modality.
27
+
28
+ # Method
29
+
30
+ An overview of our proposed joint tracking and detection model can be found in Fig. [2](#fig_tracker_large){reference-type="ref" reference="fig_tracker_large"}. Each of its components is introduced in the following.
31
+
32
+ <figure id="fig_tracker_large" data-latex-placement="!t">
33
+ <embed src="figures/tracker_detail_ab_lowercase.pdf" />
34
+ <figcaption>Tracking and detection model overview. a. Detection model, which can function as a standalone detector as well as a track initializor in the first frame of a sequence. The input point cloud is pre-processed through a backbone and a positional encoding is added to retain the location information of the computed feature vectors. Through a sampling method, <span class="math inline"><em>M</em></span> anchor locations are obtained from the input point cloud, which are encoded to generate object queries for the transformer decoder that serve as slots for possible objects at the output. b. Joint tracking and detection with track and object queries, whereas the track queries are propagated from the previous time step and ego-motion corrected through the proposed EMC module. When a track query results in a valid object at the output of the decoder, it is considered a continued track, while new tracks originate from object queries.</figcaption>
35
+ </figure>
36
+
37
+ To perform object detection on lidar data with a transformer, we follow the general approach of the Detection Transformer (DETR) [@{carion_end--end_2020}]. However, it is non-trivial to apply this image-based model, which consists of an image backbone followed by a full transformer with encoder and decoder, to lidar data in an automotive context. One issue is that in a transformer *encoder*, the size of the self-attention matrix grows quadratically with the input sequence length $N$, limiting it to a maximum length of $N_{\textrm{max}}$, which can be as low as about $2000$ tokens, depending on the available GPU type. This is why DETR [@{carion_end--end_2020}] operates on a grid of only $35 \times 45$ cells as input to the encoder, which is feasible for image data. With automotive lidar data on the other hand, objects tend to be small compared to the sensor range. When a grid-based backbone for lidar data, such as PointPillars [@lang_pointpillars:_2019] is used before the encoder, this limitation in turn limits the side length of the grid to $\sqrt{N_{\textrm{max}}}$ and therefore bloats the size of each cell. The same holds for a point-based backbone such as PointNet++ [@{qi_pointnet++_2017}], where the influence region around each subsampled point grows as the number of tokens becomes limited.
38
+
39
+ An overview of our transformer detector for point clouds is pictured in Fig. [2](#fig_tracker_large){reference-type="ref" reference="fig_tracker_large"} (a). We found that the backbone we selected, PointPillars [@lang_pointpillars:_2019], can fulfill some of the context encoding that the transformer encoder normally would. Therefore, it is possible to remove said encoder entirely. An alternative to this would be to use one of the modified transformer models that aim to remedy the memory limitation issue that self-attention has [@wang_linformer:_2020] [@tay_efficient_2020]. Our experiments showed, however, that skipping the encoder entirely yields the best results, because the other remedies still limit the grid size. Following [@lang_pointpillars:_2019], the input to the PointPillars backbone is a point cloud, which consists of three-dimensional coordinates. Within PointPillars, it is discretized into a set of pillars, which are located on a grid on the birds-eye-view plane. The backbone outputs one feature vector per grid cell. Note that PointPillars could be replaced with any other backbone, which is able to output feature vectors that are bound to a certain location in 3D space or on a grid. From this location, a sine and cosine positional encoding [@{carion_end--end_2020}] is computed and added to the vectors. Then, the backbone result is flattened into a sequence of feature vectors and input into the transformer decoder to constitute keys and values for cross-attention. This decoder consists of six layers with eight attention heads. The dimension of the keys, values and queries is $d=256$. In our model, the queries for the decoder are data-dependent rather than learnt. Inspired by [@{misra_end--end_2021}], we use a farthest point sampling [@{qi_pointnet++_2017}] to obtain locations $\bm{\rho}_i$ from the data, with $i=1,\dots,M$, and encode them with a Fourier encoding [@tancik_fourier_2020; @rahimi2007random]: $$\begin{equation}
40
+ \label{eq_query_encoding}
41
+ \bm{y}_i=\textrm{FFN}\left[\sin(\bm{B}\bm{\rho}_i), \cos(\bm{B}\bm{\rho}_i)\right],
42
+ \end{equation}$$ where $\bm{B}\in \mathbb{R}^{\frac{d}{2}\times3}$ is a matrix that contains entries drawn from a normal distribution, the feed-forward network (FFN) consists of two layers with ReLU activation and is trained with the rest of the model and $\bm{Y}=\{\bm{y}_i\}_{i=1}^M$ denotes the set of tokens that is input to the decoder. As introduced in [@rahimi2007random], each line of $\bm{B}$ projects the coordinates $\bm{\rho}_i$ into a different direction, which is chosen randomly in order to approximate a shift-invariant kernel with the inner product of two encoded points. We name the locations $\bm{\rho}_i\in \mathbb{R}^3$ *anchor locations* because the queries computed from them serve as a prior in the search for objects. However, the model is not restricted by them and can find objects at a distance from their anchor. Besides this, the anchor locations are different from anchor boxes, which are commonly used in other detection approaches such as [@lang_pointpillars:_2019].
43
+
44
+ In our model, the anchor locations change with every frame, since they are drawn from the data in the present input frame. Farthest point sampling is a commonly used, fast strategy to select locations within a point cloud that are spread out well over the data. For lidar data, location-bound queries are superior to learnt queries, because the point cloud can be very sparse. Otherwise, the model would need to waste resources on finding locations that actually contain data.
45
+
46
+ Now, each decoder query token $\bm{y}_i$, i.e. anchor-based object query, serves as slot for a possible object. From every *output* token $\bm{y}'_i$ of the decoder with anchor $\bm{\rho}_i$, a feed-forward network is used to compute the parameters $$\begin{equation}
47
+ \label{eq_box_params}
48
+ \bm{d}_i = (\Delta x,\Delta y,\Delta z,w,l,h,\gamma, v_x,v_y, {\tt cls}),
49
+ \end{equation}$$ $i=1,\dots , M$, with location $(x,y,z)=\bm{\rho}_i+(\Delta x,\Delta y,\Delta z)$, dimensions $(w,l,h)$, heading $\gamma$, velocity $(v_x,v_y)$ and class identifier `cls`.
50
+
51
+ During training, those $\bm{y}'_i$ with closest box parameters to ground truth are matched with the Hungarian algorithm in terms of $\ell_1$-distance, while all others are assigned a 'no-object' class id. An $\ell_1$-loss is then used to train the model. We use a pre-trained PointPillars backbone and refine it in combination with our model.
52
+
53
+ A model overview of our proposed tracker is pictured in Fig. [2](#fig_tracker_large){reference-type="ref" reference="fig_tracker_large"}, (a) and (b). To initialize the tracks, the detector as introduced above is run on the first frame at timestep $t$, with queries $\bm{Y}_t=\{\bm{y}_{t,i}\}_{i=1}^M$, obtaining decoder output $\bm{y}'_{t,i}$ and (merely as a by-product) detections $\bm{d}_{t,i}$. The high-dimensional vectors $\bm{y}'_{t,i}\in\mathbb{R}^d$, $i=1,\dots , M$ each contain *encoded* information about an object, such as its dimension, location and object type as well as additional latent space. This makes them usable to carry tracking information over to the next timestep [@meinhardt_trackformer:_2021]. Therefore, we only use these $d$-dimensional vectors as tracker state, instead of the low-dimensional bounding boxes $\bm{d}_{t,i}$.
54
+
55
+ With lidar data, the ego vehicle can move a significant distance between two frames, making an ego-motion correction (EMC) necessary: $$\begin{equation}
56
+ \label{eq_EMC}
57
+ \bm{y}''_{t,i}=\textrm{EMC}(\bm{y}'_{t,i}, \bm{\rho}_i, \bm{p}),
58
+ \end{equation}$$ where $\bm{p}$ is the ego pose change. Our EMC module is introduced in the following subsection. The transformed output tokens $\bm{y}''_{t,k}$, $k=1,\dots,K$ that belong to an object are used as input to the transformer decoder in the next timestep, denoted as track queries, where $K$ is the number of detected objects in frame $t$. Since it is always possible for a new track to spawn, the object queries $\bm{y}_{t+1,i}$, computed as in Eq. [\[eq_query_encoding\]](#eq_query_encoding){reference-type="ref" reference="eq_query_encoding"} from newly sampled anchor locations $\bm{\rho}_{t+1,i}$, are passed into the decoder as before to serve as slots for new objects, in addition to the track queries. The set of decoder tokens is therefore $\bm{Y}_{t+1}=\{\bm{y}''_{t,k}\}_{k=1}^K\cup\{\bm{y}_{t+1,i}\}_{i=1}^M$. Due to the transformer design [@vaswani_attention_2017], the number of tokens that is passed into the decoder at a time, i.e. track and object queries, does not impede with its performance: An additional token merely adds an additional participant in attention and therefore an additional output token.
59
+
60
+ During training, pairs of consecutive frames are selected, whereas up to $n$ intermediate frames can be skipped, $n\in\mathbb{N}$. For the first frame of the pair, detections and corresponding decoder output $\bm{y}_i$ are computed. Those that were assigned closest to ground truth objects are then used as track queries for the second frame. From ground truth it is known which tracks are to be continued, which are occluded or terminated and which will spawn newly. Those that shall continue are assigned to their respective track queries' slots. Those estimates belonging to track queries whose track is occluded or terminated are assigned to the 'no-object' class. Ground truth objects belonging to new tracks on the other hand are assigned to the closest estimate that stems from an object query using the Hungarian algorithm. Like for the detector training, the $\ell_1$-loss is applied on the difference between the estimated box parameters and the ground truth.
61
+
62
+ At inference time, a track is considered to be continued if its corresponding track query results in a detection in the next frame of a confidence above a certain threshold $\lambda_{\textrm{track}}$. If it however results in a detection that is assigned to the 'no-object' class, the track is either terminated or occluded. To allow for recognition after occlusion, such track queries are carried over for some frames until the track is finally either terminated or continued. A new track on the other hand is spawned when a new object is detected with one of the object queries, which is above a certain confidence threshold $\lambda_{\textrm{detect}}$. We found that a non-maximum suppression, which favours boxes that stem from track queries over those originating from object queries, improves the tracking results.
63
+
64
+ We utilize track augmentation methods, following [@meinhardt_trackformer:_2021]: Since the event that new tracks spawn occurs rather rarely, it is simulated by removing each track query with a certain probability. A track query can also randomly spawn an additional, previously discarded, track query, which is meant to simulate occluded or terminated tracks.
65
+
66
+ In Fig. [3](#fig_AttnViz){reference-type="ref" reference="fig_AttnViz"}, the cross-attention in the decoder between one track query and the input birds-eye-view grid is visualized, where dark purple denotes low, and brighter colors high attention. The object that belongs to this track query (yellow) moved between the previous and this frame. This is why, in the first decoder layer, the query seems to search for its object in the 'wrong' place, but, due to its broad field of view in attention, it is able to shift its attention until the last decoder layer. Note that we do not explicitly model the motion of tracked agents, which might be a beneficial addition to the model in the future. We do, however, correct the motion of the ego-vehicle between frames, since this ego-motion would require too large attention shifts to be corrected by the transformer. The proposed EMC module for this is introduced in the following subsection.
67
+
68
+ <figure id="fig_AttnViz" data-latex-placement="!tbp">
69
+
70
+ <figcaption>Attention visualization between one track query and each input grid cell. The tracked vehicle (yellow box) moved between the previous and this frame. The birds-eye-view grid cells are colored according to their attention score with the tracked vehicle’s query, where yellow denotes larger values, and blue small ones. An attention shift is visible: In layer <span class="math inline">0</span>, the track query seemingly expects the vehicle in the ’wrong’ place, but it is able to correct its focus until the last decoder layer. In black, the lidar data of the current frame is visible, as well as a second vehicle nearby (for which the attention is not pictured).</figcaption>
71
+ </figure>
72
+
73
+ <figure id="figEMC_module" data-latex-placement="t">
74
+ <embed src="figures/emc_matrix_less_wide.pdf" />
75
+ <figcaption>Ego-motion compensation (EMC) module overview. The ego-pose change consists of a translation and a rotation and it is converted into a transformation matrix through a feed-forward network. The feature vector that is to be ego-motion corrected is brought to the same length and the transformation is applied with a dot product, similar to a transformation in 3D space. A residual connection between the transformed and original feature vector is intended to prevent loss of information during the ego-motion correction.</figcaption>
76
+ </figure>
77
+
78
+ An ego-vehicle can travel a significant distance between two lidar frames, making an ego-motion correction necessary. Since the tracker state is encoded in feature vectors rather than explicit parameters, such as bounding boxes, its transformation with the known pose change is non-trivial. Note that image-based trackers, such as [@meinhardt_trackformer:_2021], do not require such a correction, since a moving ego-vehicle does not impact the position of objects in image space as much as it does in lidar space.
79
+
80
+ An alternative to correcting the internal state of the tracker may be to transform the input data into a certain global coordinate frame. However, after processing many consecutive frames, the area occupied by the measured data will become very large. This is problematic since the positional encoding at the transformer input would need to have an arbitrarily high resolution to cover the space.
81
+
82
+ Therefore, the only option for ego-motion compensation with our combined tracker and detector is shifting the internal tracker state according to the pose change. To achieve this task, we develop a novel ego-motion compensation (EMC) module as introduced in Eq. [\[eq_EMC\]](#eq_EMC){reference-type="ref" reference="eq_EMC"}, which is pictured in Fig. [4](#figEMC_module){reference-type="ref" reference="figEMC_module"}. Our model is inspired by a transformation in 3D space: We compute a feature-agnostic transformation matrix $\bm{T}\in\mathbb{R}^{k\times k}$ from the known pose change $\bm{p}$ by using a feed-forward network (FFN) and reshaping the output, where $\bm{p}=(t_1,t_2,t_3, q_0,q_1,q_2,q_3)$ contains a translation as well as a rotation in quaternion form. With a second FFN, the feature vectors $\bm{y}'_i$ to be transformed are reduced to length $k$, after which the transformation takes place with a dot product. The result of the transformation is scaled back up to length $d$ with a FFN and a residual connection is added that is meant to prevent information loss. The overall model design serves two purposes: Firstly, the transformation is performed in a lower dimension than the full feature length to encourage only transforming necessary parts of the vector. Besides, $\bm{T}$ is computed without access to the feature vectors, aiming to make it independent of the location of the objects to be transformed.
83
+
84
+ For realistic training of the EMC module, nuScenes [@caesar_nuscenes:_2020] data is used. Each frame is first input into the transformer detector, which has previously been trained and is now used with fixed weights, to obtain the decoder output $\bm{y}'_i$. These vectors are input into the EMC module, together with the pose change $\bm{p}$ w.r.t. the following frame that is obtained from nuScenes meta data. To obtain ground truth data for training, the detector's FFN is used to compute box parameters $\bm{d}_i$ as defined in Eq. [\[eq_box_params\]](#eq_box_params){reference-type="ref" reference="eq_box_params"} from the vectors $\bm{y}'_i$. With the pose change information, the expected location, heading and velocity of each box after transformation for the following frame is computed, using standard transformations in 3D space, resulting in parameters $(x',y',z'), \gamma', (v'_x,v'_y)$ for each object. Meanwhile, the size and class identifier shall be unchanged. Now, the change in location is implemented into a new, transformed anchor $\bm{\rho}'_i=(x',y',z')$, while the other parameters serve as ground truth for the EMC module: $$\begin{equation}
85
+ \bm{d}'_i = (0, 0, 0,w,l,h,\gamma', v'_x,v'_y, {\tt cls}),
86
+ \end{equation}$$ where $\Delta x,\Delta y$ and $\Delta z$ are set to zero to align them with the new anchor location $\bm{\rho}'_i$. To allow for large pose changes and to be prepared for possible frame skips in practice, the frame pairs used for training can have up to $n$ discarded frames between them, $n \in \mathbb{N}$.
2207.13440/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2207.13440/paper_text/intro_method.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Scene graphs allow for structured understanding of objects and their interactions within a scene. A *scene graph* is a graph where nodes represent objects within the scene, each detailed by the class label and spatial location, and the edges capture the relationships between object pairs. These relationships are usually represented by a <subject, predicate, object> triple. Effectively generating such graphs, from either images or videos, has emerged as a core problem in computer vision [13, 34, 38, 44, 48, 50, 52]. Scene graph representations can be leveraged to improve performance on a variety of complex high-level tasks like VQA [23, 45], Image Captioning [16, 51], and Image Generation [25].
4
+
5
+ The task of scene graph generation involves estimating the conditional distribution of the relationship triplets given an image. Naively modelling this distribution is often infeasible as the space of possible relationship triplets is considerably larger than the space of possible subjects, objects, and predicates. To circumvent this issue, existing methods factorize the aforementioned distribution into easy-to-estimate conditionals. For example, *two-stage* approaches, like [10, 45, 53], follow the graphical model image → [subject, object] → predicate, wherein the subjects and objects are independently obtained via a pre-trained detector like Faster-RCNN [40]. These are then consumed by a downstream network to estimate predicates. Any such factorization induces conditional dependencies (and independencies) that heavily influence model characteristics. For example, in the aforesaid graphical model, errors made during the estimation of the subjects and objects are naturally propagated towards the predicate distribution, which makes the estimation of predicates involving classes with poor detectors challenging. Furthermore, the assumed fixed factorization might not
6
+
7
+ always be optimal. Having information about predicates in an image can help narrow down the space of possible subjects/objects, *e.g*., predicate wearing makes it likely that the subject is a person.
8
+
9
+ Additionally, due to the *two-stage* nature of most existing scene graph approaches (barring very recent methods like [13, 30, 34]), the image feature representations obtained from a pre-trained task-oblivious detector might not be optimally catered towards the scene graph generation problem. Intuitively, one can imagine that the information required to accurately localize an object *might not* necessarily be sufficient for predicate prediction, and by extension, accurate scene graph generation. Morever, two-stage approaches often suffer with efficiency issues as the detected objects are required to be paired up before predicate assignment. Doing so naively, by pairing all possible objects [48], results in quadratic number of pairs that need to be considered. Traditional approaches deal with this using heuristics, such as IoU-based threshold-based pairing [45, 53].
10
+
11
+ In this work, we aim to alleviate the previously mentioned issues arising from a fixed factorization and potentially limited object-centric feature representations by proposing a *general* framework wherein the subject, objects, and predicates can be inferred jointly (*i.e*., depend on one another), while simultaneously avoiding the complexity of exponential search space of relational triplets. This is achieved by performing message passing within a Markov Random Field (MRF) defined by components of a relational triplet (Figure 1(a)). Unrolling this message passing is equivalent to an *iterative* refinement procedure (Figure 1(b)), where each message passing stage takes in the estimate from the previous step. Our proposed framework models this iterative refinement strategy by first producing a scene graph estimate using a traditional factorization, and then systematically improving it over multiple refinement steps, wherein each refinement is conditioned on the graph generated in the previous iteration.
12
+
13
+ ![](_page_1_Figure_3.jpeg)
14
+
15
+ Figure 1: Iterative Refinement for Scene Graph Generation. By unrolling the message passing in a Markov Random Field (a), our proposed approach essentially models an iterative refinement process (b) wherein each modification step is conditioned on the previously generated graph estimate.
16
+
17
+ This conditioning across refinement steps compensates for the conditional independence assumptions and lets our framework jointly reason over the subjects (s), objects (o), and predicates (p).
18
+
19
+ Contributions. To realize the aforementioned iterative framework, we propose a novel and intuitive transformer [46] based architecture. On a technical level, our model defines three separate multi-layer multi-output synchronized decoders, wherein each decoder layer is tasked with modeling either the subject, object, or predicate components of a relationship triplets. Therefore, the combined outputs from each layer of the three decoders generates a scene graph estimate. The inputs to each decoder layer are conditioned to enable joint reasoning across decoders and effective refinement of previous layer estimates. This conditioning is achieved *implicitly* via a novel joint loss, and *explicitly* via crossdecoder and layer-wise attention. Additionally, each decoder layer is also conditioned on the image features, which are provided by a shared encoder. As our proposed model is end-to-end trainable, it addresses the limitation of two-stage approaches, allowing image features to directly adopt to the scene graph generation task. Finally, to tackle the long-tail nature of the scene graph predicate classes [10], we employ a loss weighting strategy to enable flexible trade-off between dominant (head) and underrepresented (tail) predicate classes in the long-tail distribution. In contrast to data sampling strategies [10, 31], this has a benefit of not requiring additional fine-tuning of models with sampled data post training. We illustrate that our proposed architecture achieves state-of-the-art performance on two benchmark datasets – Visual Genome [29] and Action Genome [24]; and thoroughly analyze effectiveness of the approach as a function of the refinement steps, design choices employed and as a generic add-on to an existing, MOTIF [53], architecture.
20
+
21
+ # Method
22
+
23
+ For a given image $\mathbf{I}$ , a scene graph $\mathbf{G}$ can be represented as a set of triplets $\mathbf{G} = \{\mathbf{r}_i\}_{i \leq n} = \{(\mathbf{s}_i, \mathbf{p}_i, \mathbf{o}_i)\}_{i \leq n}$ , where $\mathbf{r}_i$ denotes the *i*-th triplet $(\mathbf{s}_i, \mathbf{p}_i, \mathbf{o}_i)$ , and n denotes the total number of triplets. The subject $\mathbf{s}_i$ denotes a tuple $(\mathbf{s}_{i,c}, \mathbf{s}_{i,b})$ , where $\mathbf{s}_{i,c} \in \mathbb{R}^n$ is the one-hot class label, and $\mathbf{s}_{i,b} \in \mathbb{R}^4$ is the corresponding bounding box coordinates. $\eta$ is the total number of possible entity classes in the dataset. The object $\mathbf{o}_i$ and predicate $\mathbf{p}_i$ can similarly be represented as tuples $(\mathbf{o}_{i,c}, \mathbf{o}_{i,b})$ and $(\mathbf{p}_{i,c}, \mathbf{p}_{i,b})$ respectively. Note that, $\mathbf{p}_{i,b}$ corresponds to the box formed by the centers' of $\mathbf{s}_{i,b}$ and $\mathbf{o}_{i,b}$ as the diagonally opposite coordinates. Additionally, $\mathbf{p}_{i,c} \in \mathbb{R}^v$ represents the corresponding one-hot predicate label between the pair $(\mathbf{s}_i, \mathbf{o}_i)$ , where v is the total number of possible predicate classes in the dataset. Then the task of scene graph generation can be thought of as modelling the conditional distribution $\Pr(\mathbf{G} \mid \mathbf{I})$ . This distribution can be expressed as a product of conditionals,
24
+
25
+ $$\Pr\left(\mathbf{G} \mid \mathbf{I}\right) = \Pr\left(\left\{\mathbf{s}_{i}\right\} \mid \mathbf{I}\right) \cdot \Pr\left(\left\{\mathbf{o}_{i}\right\} \mid \left\{\mathbf{s}_{i}\right\}, \mathbf{I}\right) \cdot \Pr\left(\left\{\mathbf{p}_{i}\right\} \mid \left\{\mathbf{s}_{i}\right\}, \left\{\mathbf{o}_{i}\right\}, \mathbf{I}\right)$$
26
+ (1)
27
+
28
+ where $\{.\}$ denotes a set. For brevity, we omit explicitly mentioning the total number of set elements n throughout the paper. Existing approaches model this product of conditionals by making some underlying assumptions. For example, [10, 45, 53] assume conditional independence between $\mathbf{s}_i$ and $\mathbf{o}_i$ as they rely on heuristics to obtain the entity pairs. However, modelling the conditional in Equation 1 (or any other equivalent factorization) in such a "one-shot" manner makes certain assumptions on the flow of information, which, in this case, is from $\mathbf{s}_i \to \mathbf{o}_i \to \mathbf{p}_i$ . Therefore, any errors made during the estimation of $\mathbf{s}_i$ are naturally propagated towards the estimation of $\mathbf{o}_i$ and $\mathbf{p}_i$ . Additionally, the subject (or object) estimation procedure $\Pr\left(\{\mathbf{s}_i\} \mid \mathbf{I}\right)$ is completely oblivious to the estimated
29
+
30
+ ![](_page_3_Figure_0.jpeg)
31
+
32
+ Figure 2: **Transformer Architecture for Iterative Refinement.** For a given image, the model extracts features via a convolutional backbone and a transformer encoder. The individual components of a relationship triplet are generated using separate subject, object, and predicate multi-layer decoders. The inputs to each layer of the decoder is appropriately conditioned. For example, for the predicate decoder, the positional embeddings are conditioned on the outputs generated by the subject and object decoders (blue Attn module) and the queries are conditioned on the previously generated graph estimate (orange Attn module). The model is additionally implicitly conditioned and trained in an end-to-end fashion using a joint matching loss.
33
+
34
+ predicate $\mathbf{p}_i$ . Having access to such information can help the subject (or object) predictor update its beliefs and significantly narrow down the space of feasible entity pairs.
35
+
36
+ Contrary to existing works, our proposed formulation moves away from the "one-shot" generation ideology described in Equation 1. We instead argue for modelling the task of scene graph generation as an iterative refinement procedure, wherein the scene graph estimate at step t, $\mathbf{G}^t$ , is dependent on the previous estimates $\{\mathbf{G}^{t'}\}_{t' < t}$ . Formally, our aim is to model the conditional distribution $\Pr\left(\mathbf{G}^t \mid \{\mathbf{G}^{t'}\}_{t' < t}, \mathbf{I}\right)$ . Assuming Markov property holds, this can be conveniently factorized as,
37
+
38
+ Subject Predictor Object Predictor
39
+ $$\Pr\left(\mathbf{G}^{t} \mid \mathbf{G}^{t-1}, \mathbf{I}\right) = \Pr\left(\left\{\mathbf{s}_{i}^{t}\right\} \mid \mathbf{G}^{t-1}, \mathbf{I}\right) \cdot \Pr\left(\left\{\mathbf{o}_{i}^{t}\right\} \mid \left\{\mathbf{s}_{i}^{t}\right\}, \mathbf{G}^{t-1}, \mathbf{I}\right) \cdot \Pr\left(\left\{\mathbf{p}_{i}^{t}\right\} \mid \left\{\mathbf{s}_{i}^{t}\right\}, \left\{\mathbf{o}_{i}^{t}\right\}, \mathbf{G}^{t-1}, \mathbf{I}\right).$$
40
+ Predicate Predictor
41
+
42
+ though we assume the flow of information to be from $\mathbf{s}_{i}^{t} \to \mathbf{o}_{i}^{t} \to \mathbf{p}_{i}^{t}$ for $\mathbf{G}^{t}$ , condition-
43
+
44
+ Note that even though we assume the flow of information to be from $\mathbf{s}_i^t \to \mathbf{o}_i^t \to \mathbf{p}_i^t$ for $\mathbf{G}^t$ , conditioning on the previous graph estimate $\mathbf{G}^{t-1}$ allows the subject, object, and predicate predictors to jointly reason and update their beliefs, leading to better predictions. Additionally, the framework described in Equation 2 is model agnostic and can be implemented using any of the existing architectures.
45
+
46
+ As described in Section 3, our proposed iterative scene graph generation formulation is agnostic to the architectural choices used to realise the subject, object, and predicate predictors. In this section we provide a realization of the proposed formulation in Equation 2 using Transformer networks [46]. The choice of using transformer networks is motivated by their natural tendency to model iterative refinement behaviour under the Markov property, wherein each layer of the transformer decoder takes as input the output of the previous layer. Our novel end-to-end trainable transformer based iterative generation architecture builds on top of the DETR [2] framework, which is shown to be effective for the task of object detection. Our proposed model architecture is shown in Figure 3.
47
+
48
+ Given an image, our proposed approach first obtains corresponding image features using a combination of a convolutional backbone and a multi-layer transformer encoder, akin to DETR [2]. These image features are used as inputs to the subject, object, and predicate predictors, each implemented as a multi-layer Transformer decoder. To generate a scene graph estimate $\hat{\mathbf{G}}^t$ at step t in accordance with Equation 2, the queries used in each predictor decoder are appropriately conditioned. For example, in the case of the predicate decoder, its input queries are conditioned on the subject $(\{\hat{\mathbf{s}}_i^t\})$ and object $(\{\hat{\mathbf{o}}_i^t\})$ estimates at step t. Additionally, the input to each predictor decoder is infused with all decoder estimates $\{(\hat{\mathbf{s}}_i^{t-1}, \hat{\mathbf{p}}_i^{t-1}, \hat{\mathbf{o}}_i^{t-1})\}$ from the previous step t-1 via a structured attentional mechanism.
49
+
50
+ The entire model is trained end-to-end, with a novel joint loss applied at each step t to ensure the generation of a valid scene graph at every level. This section describes these components in detail.
51
+
52
+ Similar to DETR [2], for each image $\mathbf{I}$ , our proposed architecture uses a deep convolutional network (like ResNet [21]) to obtain image level spatial map $\bar{\mathbf{I}} \in \mathbb{R}^{c \times w \times h}$ , where c is the number of channels, and w,h correspond to the spatial dimensions. A multi-layer encoder $f_e$ then transforms $\bar{\mathbf{I}}$ into a position-aware flattened image feature representation $\mathbf{Z} \in \mathbb{R}^{d \times wh}$ , where d < c.
53
+
54
+ Our approach models each of the subject, object, and predicate predictors using a multi-layer transformer decoder [2], which are denoted by $f_{\mathbf{s}}$ , $f_{\mathbf{o}}$ , and $f_{\mathbf{p}}$ respectively. The t-th layer of each decoder, denoted as $f_{\mathbf{x}}^t$ ; $\mathbf{x} \in \{\mathbf{s}, \mathbf{o}, \mathbf{p}\}$ , is tasked with generating the step t scene graph $\mathbf{G}^t$ . Therefore, at each step t, the decoders output a set of t feature representations $\{\mathbf{q}_{\mathbf{x},i}^t\}$ ; $\mathbf{x} \in \{\mathbf{s}, \mathbf{o}, \mathbf{p}\}$ , which are transformed into a set of triplet estimates $\{(\hat{\mathbf{s}}_i^t, \hat{\mathbf{p}}_i^t, \hat{\mathbf{o}}_i^t)\}$ via fully-connected feed forward layers.
55
+
56
+ Specifically, for a decoder $f_{\mathbf{x}}$ ; $\mathbf{x} \in \{\mathbf{s}, \mathbf{o}, \mathbf{p}\}$ , an arbitrary layer t takes as input a set of queries $\{\mathbf{q}_{\mathbf{x},i}^{t-1}\}$ and a set of learnable positional encodings $\{\mathbf{p}_{\mathbf{x},i}\}$ , where $\mathbf{q}_{\mathbf{x},i}^{t-1}$ ; $\mathbf{p}_{\mathbf{x},i} \in \mathbb{R}^d$ . The output representations $\{\mathbf{q}_{\mathbf{x},i}^t\}$ are then obtained via a combination of self-attention between the input queries, and encoder-decoder attention across the encoder output $\mathbf{Z}$ . These attention modules allow the decoder to jointly reason across all queries, while simultaneously incorporating context from the input image.
57
+
58
+ At a given step t, naively using the inputs $\{\mathbf{q}_{\mathbf{x},i}^{t-1}\}$ and $\{\mathbf{p}_{\mathbf{x},i}\}$ to generate $\mathbf{G}^t$ forgoes leveraging the compositional property of relations. As described Equation 2, for any arbitrary step t, our proposed formulation entails two types of conditioning for better scene graph estimation. The first involves conditioning decoders on the step t outputs, specifically the object decoder $f_{\mathbf{o}}^t$ on the subject decoder $f_{\mathbf{s}}^t$ , and the predicate decoder $f_{\mathbf{p}}^t$ on both $f_{\mathbf{s}}^t$ and $f_{\mathbf{o}}^t$ . The second requires all three decoder layers at step t to be conditioned on the outputs generated at step t-1. To effectively implement this design, we modify the inputs to each decoder layer $f_{\mathbf{x}}^t$ . Specifically, the positional encodings $\{\mathbf{p}_{\mathbf{x},i}\}$ are modified to condition them on step t outputs, and the queries $\mathbf{q}_{\mathbf{x},i}^{t-1}$ are updated to incorporate information from the previous step t-1. Modifying the positional encoding and queries separately allows the model to easily disentangle and differentiate between the two conditioning types.
59
+
60
+ Conditional Positional Encodings. At a particular step t, the conditional positional encodings for the three decoders are obtained as,
61
+
62
+ $$\begin{aligned} \{\widehat{\mathbf{p}}_{\mathbf{s},i}^t\} &= \{\mathbf{p}_{\mathbf{s},i}\}; \quad \{\widehat{\mathbf{p}}_{\mathbf{o},i}^t\} &= \{\mathbf{p}_{\mathbf{o},i}\} + \text{FFN}\left(\text{MultiHead}\left(\{\mathbf{p}_{\mathbf{o},i}\}, \{\widehat{\mathbf{q}}_{\mathbf{s},i}^t\}, \{\mathbf{q}_{\mathbf{s},i}^t\}\right)\right) \\ \{\widehat{\mathbf{p}}_{\mathbf{p},i}^t\} &= \{\mathbf{p}_{\mathbf{p},i}\} + \text{FFN}\left(\text{MultiHead}\left(\{\mathbf{p}_{\mathbf{p},i}\}, \{\widehat{\mathbf{q}}_{\mathbf{s},i}^t \oplus \widehat{\mathbf{q}}_{\mathbf{o},i}^t\}, \{\mathbf{q}_{\mathbf{s},i}^t \oplus \mathbf{q}_{\mathbf{o},i}^t\}\right)\right) \end{aligned}$$
63
+ (3)
64
+
65
+ where MultiHead(Q, K, V) is the Multi-Head Attention module introduced in [46], FFN(.) is a fully-connected feed forward network, and $\oplus$ is the concatenation operation. Additionally, $\widetilde{\mathbf{q}}_{\mathbf{x},i}^t = \mathbf{q}_{\mathbf{x},i}^t + \mathbf{p}_{\mathbf{x},i}^t; \mathbf{x} \in \{\mathbf{s}, \mathbf{o}, \mathbf{p}\}$ is the position-aware query.
66
+
67
+ **Conditional Queries.** Similarly, for a step t, conditional queries for the subject decoder are defined,
68
+
69
+ $$\{\widehat{\mathbf{q}}_{\mathbf{s},i}^{t-1}\} = \{\mathbf{q}_{\mathbf{s},i}^{t-1}\} + \mathtt{FFN}\left(\mathtt{MultiHead}\left(\{\widetilde{\mathbf{q}}_{\mathbf{s},i}^{t-1}\}, \{\widetilde{\mathbf{q}}_{\mathbf{s},i}^{t-1} \oplus \widetilde{\mathbf{q}}_{\mathbf{o},i}^{t-1} \oplus \widetilde{\mathbf{q}}_{\mathbf{p},i}^{t-1}\}, \{\mathbf{q}_{\mathbf{s},i}^{t-1} \oplus \mathbf{q}_{\mathbf{o},i}^{t-1} \oplus \mathbf{q}_{\mathbf{p},i}^{t-1}\}\right)\right) \tag{4}$$
70
+
71
+ $\widehat{\mathbf{q}}_{\mathbf{o},i}^{t-1}$ and $\widehat{\mathbf{q}}_{\mathbf{p},i}^{t-1}$ are defined identically. For a decoder layer $f_{\mathbf{x}}^t; \mathbf{x} \in \{\mathbf{s}, \mathbf{o}, \mathbf{p}\}$ we use the conditioned positional encodings $\{\widehat{\mathbf{p}}_{\mathbf{x},i}^t\}$ and queries $\{\widehat{\mathbf{q}}_{\mathbf{x},i}^{t-1}\}$ as input.
72
+
73
+ Our proposed transformer based refinement architecture can be trained in an end-to-end fashion. To ensure that a valid scene graph is generated at every level, we propose a novel joint loss that is applied at each step t. Therefore, the combined loss $\mathcal{L}$ can be expressed as $\mathcal{L} = \sum_t \mathcal{L}^t = \sum_t \mathcal{L}^t_{\mathbf{s}} + \mathcal{L}^t_{\mathbf{o}} + \mathcal{L}^t_{\mathbf{p}}$ , where $\mathcal{L}^t_{\mathbf{x}}$ ; $\mathbf{x} \in \{\mathbf{s}, \mathbf{o}, \mathbf{p}\}$ represents the loss applied to the t-th layer of the decoder $f_{\mathbf{x}}$ . Our approach generates a fixed-size set of n triplet estimates $\{\hat{\mathbf{r}}^t_i\} = \{(\hat{\mathbf{s}}^t_i, \hat{\mathbf{o}}^t_i)\}$ at each step t, where n is larger than the number of ground truth relations for a given image. Therefore, in order to effectively optimize the proposed model, we obtain an optimal bipartite matching between the predicted and ground truth triplets. Note that, contrary to the matching algorithm in [2], our proposed matching is defined over
74
+
75
+ triplets rather than individual entities. Additionally, instead of independently computing the loss over each decoder layer as in [2], our loss computes a joint matching across all refinement layers.
76
+
77
+ Let $G = \{\mathbf{r}_i\} = \{(\mathbf{s}_i, \mathbf{p}_i, \mathbf{o}_i)\}$ denote the ground truth scene graph for an image I. Note that, as the number of ground truth relations is less than n, we convert G to a n-sized set by padding it with $\emptyset$ (no relation). The goal then is to find a bipartite matching between the ground truth graph G and the set of all graph estimates $\{\hat{G}^t\}$ that minimizes the *joint matching cost*. Specifically, assuming $\sigma$ to be a valid permutation of n elements,
78
+
79
+ $$\widehat{\sigma} = \underset{\sigma}{\operatorname{arg\,min}} \sum_{t} \sum_{i}^{n} \mathcal{L}_{rel} \left( \mathbf{r}_{i}, \widehat{\mathbf{r}}_{\sigma(i)}^{t} \right)$$
80
+ (5)
81
+
82
+ where the pair-wise relation matching cost $\mathcal{L}_{rel}$ is defined as,
83
+
84
+ $$\mathcal{L}_{\text{rel}}\left(\mathbf{r}_{i}, \widehat{\mathbf{r}}_{\sigma(i)}^{t}\right) = -\mathbb{1}_{\left\{\mathbf{r}_{i} \neq \varnothing\right\}} \begin{bmatrix} \widehat{\mathbf{s}}_{\sigma(i),c}^{t} \cdot \mathbf{s}_{i,c} - \mathcal{L}_{\text{box}}\left(\widehat{\mathbf{s}}_{\sigma(i),b}^{t}, \mathbf{s}_{i,b}\right) \\ + \widehat{\mathbf{o}}_{\sigma(i),c}^{t} \cdot \mathbf{o}_{i,c} - \mathcal{L}_{\text{box}}\left(\widehat{\mathbf{o}}_{\sigma(i),b}^{t}, \mathbf{o}_{i,b}\right) \\ + \widehat{\mathbf{p}}_{\sigma(i),c}^{t} \cdot \mathbf{p}_{i,c} - \mathcal{L}_{\text{box}}\left(\widehat{\mathbf{p}}_{\sigma(i),b}^{t}, \mathbf{p}_{i,b}\right) \end{bmatrix}$$
85
+ (6)
86
+
87
+ where • is the vector dot product, and $\mathcal{L}_{box}$ is a combination of the L-1 and generalized IoU losses. Please refer to Section 3 for clarification on the notations. The optimal permutation $\hat{\sigma}$ can then be computed using the Hungarian algorithm. The loss $\mathcal{L}_{s}^{t}$ is then defined as,
88
+
89
+ $$\mathcal{L}_{\mathbf{s}}^{t} = \sum_{i=1}^{n} \left[ -\log \left( \widehat{\mathbf{s}}_{\hat{\sigma}(i),c}^{t} \cdot \mathbf{s}_{i,c} \right) + \mathbb{1}_{\left\{ \mathbf{r}_{i} \neq \emptyset \right\}} \mathcal{L}_{\text{box}} \left( \widehat{\mathbf{s}}_{\hat{\sigma}(i),b}^{t}, \mathbf{s}_{i,b} \right) \right]$$
90
+ (7)
91
+
92
+ $\mathcal{L}_{\mathbf{o}}^t$ and $\mathcal{L}_{\mathbf{p}}^t$ are defined identically. Note that as we use the same permutation $\hat{\sigma}$ for all refinement layers t, it induces strong *implicit* dependencies between the subject, object, and predicate decoders. The potency of the aforementioned implicit conditioning is highlighted in the experiment section.
93
+
94
+ Due to the inherent long-tail nature of the scene graph generation task, using an unbiased loss often leads to the model prioritizing the most common (a.k.a., head) predicate classes like has and on, which have abundant training examples. To afford our proposed model the flexibility to achieve the trade-off between head/tail classes, we integrate a loss-reweighing scheme into the model training procedure. Note that, contrary to existing methods that do this post-hoc via finetuning the final layer of the trained network (see Section 2), we instead train the model with this weighting to allow the internal feature representations to reflect the desired trade-off. Note, to the best of our knowledge, our paper is the first to illustrate effectiveness of such a strategy for the task of scene graph generation. For a particular predicate class $c \in [1, v]$ , we define the class weight $w_c$ as $\max\left\{(\alpha/f_c)^\beta, 1.0\right\}$ , where $f_c$ is the frequency of the predicate class c in the training set, and $\{\alpha, \beta\}$ are scaling parameters. Note that this weighting scheme is similar to the data sampling strategy described in [18, 31]. However, instead of modifying the training set, we scale each class weight by the factor $w_c$ when computing the predicate classification loss $\mathcal{L}_{\mathbf{p}}^t$ . Therefore, $\mathcal{L}_{\mathbf{p}}^t$ can be defined similarly to Equation 7,
95
+
96
+ $$\mathcal{L}_{\mathbf{p}}^{t} = \sum_{i=1}^{n} \left[ -w_{c} \log \left( \widehat{\mathbf{p}}_{\widehat{\sigma}(i),c}^{t} \cdot \mathbf{p}_{i,c} \right) + \mathbb{1}_{\left\{ \mathbf{r}_{i} \neq \varnothing \right\}} \mathcal{L}_{\text{box}} \left( \widehat{\mathbf{p}}_{\widehat{\sigma}(i),b}^{t}, \mathbf{p}_{i,b} \right) \right]. \tag{8}$$
2209.00465/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-08-07T07:02:07.353Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36" etag="GQ7PtN_Kp5w1S_dNn9dl" version="20.0.2" type="device"><diagram id="7Nq-QcSDTZuC8Fw2rsQU" name="Page-1">7Vxbc5s4FP41ntl9SIa77cc0SbsP3Taz2dm2jzLINlsZeYUcO/31K4GE0cUOccA4cV4SOAiBzuU7N+GBf73YfCJgOf8TJxANPCfZDPybgee5wchl/zjlsaSM/HFJmJE0EYO2hPv0FxRER1BXaQJzZSDFGNF0qRJjnGUwpgoNEILX6rApRupTl2AmnuhsCfcxQNAY9i1N6Fyswhtu6X/AdDaXT3Yjsb4FkIPFxPkcJHhdI/m3A/+aYEzLo8XmGiLOPMmX8r6PO65WL0ZgRpvcAP+5cr6mDx9X8wg5X2Nn6V18uYj8cpoHgFZixeJt6aNkAUwYR8QpJnSOZzgD6HZL/UDwKksgf47DzrZjPmO8ZESXEf+FlD4K8YIVxYw0pwskrk5xRsVFN2Ln5trEcnO8IjHctyCpJIDMIN03UCyUr672CMG7TxAvICWPbACBCND0QdUHINRqVo3bcp4dCOY/RxDu04LYspnzbD1PKbxfgoIda2Z9KksrdeMimSGQ5+I4Afm8mqQx4x8goXCzl1HyqhMJjRcm7znifL01INcXtHnNeAJJbJ27wbhl7jZmqCIGzsSUYcsVSmcZo00wpXjREttdlethaHJ9ZOP6qCumh6ZK5zEmkJHwtOA9XAYm2iDEsB12peDTFKFrjDApnuZPp1MvjvlslOCfsHYliSZR2JpJjDThRKZwxhbZRJ3JxsL41wTzQdQQ5oPRScG8fO8zAqLA7RuIpA7sAyL/PIEo8PoGIgvjXxUQ+U2BKDwtIGoQ+L8xIPL7B6LwaSCy5F/nAER+30Ak6xKvFYj81gMdcesdTtm77JacF2oiKaFQ3KZJpXqPFwjK9OZvHbm83pHLVp2IEOUcSx/Y4YwWKy9J+RJkijyi/1a8ylUw9CIvOHrFBixgkq4W28v6PBOiU9i7l5MbZMtrdPBm93/f3rHLbjnq7xXhs4FC2/hB8XcN0E/27/Lykv0dDBm/HXGbV952l8Z8wGppH+WLycFP7heAcAvVWAYtyt3Frbdfbg5iEV82f4Asu8r1+s8USjmPJL/EGPvxRp5qbyPT3LzAYm6u05W9eYa93RGmkzFNcWYwmK2SapGAwq0MZ1BjrSABAV4x4x0kFlRbpEmCdoUeqveroaXntCSXoZatNCwkdldH3AmD0pKeQhY3Wm5M49qLJCAmmDkolmPMOSIQzNyMHELLG0mxdt4aYX+mXEgCLrqwUE3OhsXCaIfFDscTpy3N0CzWs4SPVpPtLpHtSjUMP1AIudQFBKeV2FHpGADdXgULPp4jhqAkBOY5JDU/clZaYqt2HFdLbOW/NrSkHlOUoo65v+AzxoAkqhZQMEH8f8pJU1I+uEhAy+tLBDJ6Rmphyz2PqxYyvm/ahoVogtf1RLQgsAtzTNJfjMcAcWKWXPEWOHfwPMdJY1UY+/033KT0u7jGj3/w48tQnN1sapduHuVJxrjxvX5Su4ufbm8rzuR9O4X6ZNrbtN0rPPeT2XFNBUKLBkjaC5PowNHcl55Dl+s2cmhzolCbSI9IdyTjTC/AY23Ykg/Id7+w1tJ1x8peA3ZQTthqou9a+odLlFKrZXwGE4hU9W4e3DKvmP4qQLFUR8ENNnn4YRDeaKWAsVVh99u1jk3VhhXx1EF9T4gNsy6cy+FwGClSkMr/QmWUyYacdaxOgKfTHHZSyXHNLKd/1DsQwQ5By+5RLzop1NMKh7o3bYp5VconMW94FMyTXOgW82wZZt8mcZKq3bSddhzV1nXSDVpSbv84yu07x1Du9xy55WRIaVfstLnDMyPbPqUjZ0YNNszU4E3UNRW+1DjO6XeAsnAwKyhFtV3wV27v9fYBYtEHqgCxAsEfYv4mmdE2lPihRBJtZ0ZhQyB1TwtJ9dQo0DOaQ1Mj/8h9SndoaG6Ml5yRzIw5mxb4AdZwKKcVfOmdFo5dEQcq/0M2yZdVbiNvnsm6MSUrJiLdPsz+QJupEW9qiwbSc1oPHew81sHLsu+4GqPsO+4MvMxtx3/BKWRLjOFv+e8NBPVGGjmRo2auVWOnt06O1IOuWjn1qEM0ZERdtizHmkZeC0FivIrn/cQcIBknoS3mADBwfa8ddTDKX70XYD2z1tSBOlwXywGJFqCWIWhalz/JmBUX40+ww9eLjvTfu5GR5zGaN0bP5twgof+ejGfbZ3YUD/HGRR2N1GDgBNDf9nlYG6L+Vu7gMKsRsAjXaz1XMx4o1MCqINxhnJF+9I/8flfB4tNt+7OOE3VNsDuF4Jia0FkJs3QKyi6N4nXMAuUUYUzesNT1UKD/ImQYGWx9Xd8GNP1aUn6yeyIfKfm7t0s13cltt7brFVVtCtXTK7gBizSze2ULStdNUd16/epNUQfg/k0xMBP1GKB4xdSRC2byaIpsxvSUS5q/QBrnhljOpTw63rVXqF6Ecy3C7Kw+GpgZtfE9nGvI6yy+h7MlRcf9MNfSvng3tIMMLejd0EKzlvEuzMOE6fcvTLNa8S7Mw4QZ2r4oOqow5c9Q7XKBYkf+7o+/GntDnZ1qNNrQV6q+EbpJCIc23ziOhj5o7eu8Yaia4Cg0pWZr7Hb3GYe5TQ+wBfHfkjtX03LlnvBKSmPTtoYWIXnPFxI73f5uXrlfY/vrg/7t/w==</diagram></mxfile>
2209.00465/paper_text/intro_method.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Pre-trained language models (LMs) demonstrate exceptional proficiency in a wide range of natural language processing (NLP) tasks such as question answering, machine translation, and summarization. They indeed capture some commonsense knowledge about our physical world such as "birds can fly". However, the question of whether LMs can exhibit reasoning abilities within a grounded, realistic setting remains an open issue. This is because LMs lack the sensory experiences and physical interactions with the environment that enable human beings to grasp the nuances of real-life situations and plan for completing tasks.
4
+
5
+ Embodied robotics learning is a growing field that seeks to create artificial intelligence agents capable of navigating and performing tasks within real-world environments, typically simulated through physical engines such as AI2THOR (Kolve et al. 2017). The ALFRED benchmark (Shridhar et al. 2020) represents one of the pioneering datasets that bridges the gap between NLP and robotics, providing a platform for investigating language-directed agents.
6
+
7
+ ![](_page_0_Picture_14.jpeg)
8
+
9
+ ![](_page_0_Picture_16.jpeg)
10
+
11
+ - Ø Turn right and walk to the stove.
12
+ - Ø Pick up the tea pot on the left side of the stove.
13
+ - Ø Turn left and walk to the shelves on the right.
14
+ - Ø Place the tea pot on the middle shelf to the left of the glass container.
15
+
16
+ Figure 1: The task of grounded planning for embodied tasks (G-PlanET). The input to the LMs is a goal with a specific environment, and the output is a step-by-step plan that can guide a robot to complete the task.
17
+
18
+ The objective of these studies is to design and test agents that can translate language instructions into sequences of low-level actions that enable the agent to manipulate objects within an environment and achieve a desired outcome (*e.g.,* cleaning an object and placing it elsewhere).
19
+
20
+ However, the primary emphasis of the ALFRED benchmark and related datasets is on the comprehension of preestablished plans, rather than the ability to reason and independently plan within a realistic environment. Prior research focuses on the capacity of agents to comprehend and execute step-by-step plans, but not on their capacity for decomposing tasks and generating such plans, which represents a more advanced skill. Additionally, the role of LMs has received limited examination in the context of these benchmarks, where they are mainly used as encoders for embedding token sequences, rather than for planning or reasoning.
21
+
22
+ Prior studies have explored the planning capability of LMs, with Huang et al. (2022) demonstrating that GPT-
23
+
24
+ <sup>1</sup> University of Southern California
25
+
26
+ <sup>2</sup> Fudan University <sup>3</sup> Sea AI Lab
27
+
28
+ <sup>\*</sup> The first two authors contributed equally.
29
+
30
+ <sup>1</sup> Project website: *https://yuchenlin.xyz/g-planet/*
31
+
32
+ 3 and similar models are capable of generating general plans for executing everyday tasks. However, these plans lack grounding in a realistic environment, as LMs are not environment-specific. As a result, these plans are not necessarily executable by agents. For instance, in the context of an ALFRED task to "move a teapot from the stove to a shelf," embodied agents require knowledge of the location of the teapot and the path to reach it. Humans, on the other hand, can readily observe the location of the teapot on the stove and their current position in the kitchen, allowing them to formulate a grounded plan that starts with "turn right and walk to the stove." This highlights the need for generating detailed, step-by-step action sequences for robotic agents to use in their execution processes.
33
+
34
+ *Can LMs also learn grounded planning ability? How should we evaluate and improve LMs for grounded planning?* In this paper, we address the question of whether LMs can also learn grounded planning abilities. To this end, we propose a study on the ability of language models for grounded planning for embodied tasks (G-PlanET). Our approach involves providing LMs with two inputs: a highlevel task description and a realistic environment in the form of an object table. The output is a plan consisting of executable, step-by-step actions. We formulate G-PlanET as a language generation task and focus on encoder-decoder language models such as BART (Lewis et al. 2020).
35
+
36
+ In order to establish a dataset and evaluation protocol for G-PlanET, we leveraged the ALFRED data by developing a suite of data conversion programs. They extract the object information from the environment and format it into data tables, thereby enabling models to access observations from realistic scenarios. Additionally, we formulated a new evaluation metric, referred to as KAS, that is more appropriate for the task than existing ones for text generation. As regards the methodology of G-PlanET, we suggest flattening an object table into a sequence of tokens and appending it to the task description as input to the model. The base LMs are then fine-tuned with these seq2seq data to learn to generate plans. Furthermore, we propose a simple yet effective decoding strategy that iteratively generates subsequent steps by incorporating the previous generation into the input. Our empirical results and analysis indicate that incorporating object tables into inputs and the proposed iterative decoding strategies are both crucial for enhancing the performance of language models in G-PlanET.
37
+
38
+ To summarize, our main contributions are:
39
+
40
+ - The task of G-PlanET: To the best of our knowledge, this is one of the first studies to investigate the ability of LMs for embodied planning in realistic environments. G-PlanET is crucial for advancing the grounded generalization of large LMs and bridging the gap between NLP and embodied intelligence. (Sec. 2)
41
+ - A comprehensive evaluation protocol: We put significant effort to convert the ALFRED and AI2THOR data into data tables to support the evaluation of G-PlanET. We also created a new evaluation metric, KAS, to effectively assess the plans generated by the LMs.
42
+ - Improving LMs for G-PlanET: We present two simple
43
+
44
+ but effective components for enhancing the grounded planning ability of LMs - flattening object tables and an iterative decoding strategy. Our experiments show that these components lead to notable performance gains. (Sec. 3) Also, through extensive experimentation and indepth analysis, we have gained a deeper understanding of the behavior of LMs for G-PlanET and present a series of non-trivial findings in our study.
45
+
46
+ # Method
47
+
48
+ Here we present the background knowledge, the problem formulation and the data sources for G-PlanET.
49
+
50
+ Embodied tasks. The ALFRED benchmark (Shridhar et al. 2020) is among the first benchmarks focusing on embodied tasks in realistic environments, although most of the examples are *household* tasks. It aims to test the ability of agents to execute embodied tasks in real-world scenarios. Specifically, the agents need to understand language-based instructions and output a sequence of actions to interact with an engine named AI2-THOR (Kolve et al. 2017), such that the given tasks can be achieved.
51
+
52
+ Language instructions. Language instructions play an important role in the ALFRED benchmark. The embodied tasks are annotated with a high-level goal and a low-level plan (*i.e.,* a sequence of executable actions for robots) in natural language, which are both inputs to the agents. The agents need to understand such language instructions and parse them into action templates. Note that the agents do not need to *plan* for the task, as they already have the step-bystep instructions to follow.
53
+
54
+ Task planning. Prior works show that large pre-trained language models (LMs) such as GPT-3 (Brown et al. 2020) can generate general procedures for completing a task. However, such plans are not aligned with the particular environment in which we are interested. This is because these methods never encode the environment as part of the inputs to LMs for grounding the plans to the given environment. Therefore, such non-grounded plans are hardly useful in guiding agents to work in real-world situations.
55
+
56
+ As discussed in Sec. 2.1, the ALFRED benchmark does not explicitly test the *planning* ability, while prior works on planning with LMs have not considered *grounding* to a specific environment. In this work, we focus on evaluating and improving the ability to generate *grounded plans* for *embodied tasks* with LMs, which we dub as G-PlanET. It has been an underexplored open problem for both the robotics and NLP communities.
57
+
58
+ Task formulation. The task we aim to study in this paper is essentially a language generation problem. Specifically, the input is two-fold: 1) a high-level goal G and 2) a specific environment E that the agents need to ground to. The expected output is a sequence of actionable plans
59
+
60
+ ![](_page_2_Figure_0.jpeg)
61
+
62
+ Figure 2: The overall workflow of the proposed methods. First, we extract the object table from the realistic environment. Then we flatten the table into a sequence of tokens E (Sec. 3.2). We provide two learning methods for generating plans: 1) generate the whole plan S1, S2,⋯, S<sup>T</sup> and 2) iteratively decode the St+<sup>1</sup> (Sec. 3.3).
63
+
64
+ S = {S1, S2,⋯} to solve the given goal in the specific environment step-by-step. The goal G and the plan S are in the form of natural language, while the environment E can be viewed as a data table consisting of the object information in a room. Figure 2 shows an illustrative example and we will discuss more details in Section 3.2.
65
+
66
+ To build a large-scale dataset for studying the G-PlanET task, we re-use the goals and the plans of ALFRED and extract object information from AI2THOR for the aligned environment. The ALFRED dataset uses the AI2THOR engine to provide an interactive environment for agents with an egocentric vision to perform actions. However, the dataset does not contain explicit data about objects in the environment (*e.g.,* the coordination, rotation, and spatial relationship with each other).
67
+
68
+ We develop a suite of conversion programs for using AI2THOR to re-purpose the ALFRED benchmark for evaluating the methods shown in Section 3. We managed to get a structured data table to describe the environment of each task in the ALFRED dataset. We explore the AI2THOR engine and write conversion programs such that we can get full observations of all objects: properties (movable, openable, etc.), positions (3D coordinates & rotation), sizes, and spatial relationships (*e.g.,* object A is on the top of object B). We believe our variant of the ALFRED data will be a great resource for the community to study G-PlanET and future directions in grounded reasoning.
69
+
70
+ Herein, we introduce the methods that we adopt or propose to address the G-PlanET problem. First of all, we present the base language models that are encoder-decoder architectures. Then, we show in detail how we encode the environment data and integrate them with the seq2seq learning frameworks. Finally, we propose an interactive decoding strategy that significantly improves performance.
71
+
72
+ Pretrained encoder-decoder language models, such as BART (Lewis et al. 2020) and T5 (Raffel et al. 2020), have achieved promising performance in many well-known language generation tasks such as summarization and question answering. They also show great potential for general commonsense reasoning tasks such as CommonsenseQA (Talmor et al. 2019), suggesting that these large LMs have common sense to some extent. As the G-PlanET can be also viewed as a text generation problem, we use these LMs as the backbone for developing further planning methods, hoping that their common sense can be grounded in real-world situations for embodied tasks.
73
+
74
+ Vanilla baseline methods. As shown in many papers, BART and T5, when sizes are similar, show comparable performance in many generation tasks. Thus, we use BARTbase and BART-large as two selected LMs for evaluation. The simplest and most straightforward baseline method of using such LMs to solve G-PlanET is to ignore the environment and only use the goal as the sole input. Then, we fine-tune the base LMs with the training data and expect they can directly output the whole plan as a single sequence of tokens (including special separator tokens). This simple method does not allow the LMs to perceive the environment, although training from the large-scale data can still teach the LMs some general strategies for planning. Therefore, we see this as an important baseline method to analyze.
75
+
76
+ To enable the LMs to perceive an environment, we need to encode the object tables described in Sec. 2.2. Following prior works in table-based NLP tasks (Chen et al. 2020; Liu et al. 2022b), we flatten a table into token sequences row by row, thus creating a linearized version of an object table. Then, we append the flattened table after the goal to form a complete input sequence. Thus, the input side of the encoder-decoder finally has the environment information for generating a grounded plan.
77
+
78
+ Considering the max sequence limit, we only choose to encode objects by their type, position, rotation, and the receptacle parent. The object type does not only tell what an object is but also implies commonsense affordance (e.g., a microwave can heat up something, a knife can slice something) which is very important for planning. The position information is essential for agents to navigate and find objects, thus playing an important part in planning. The rotation is also useful for some objects that can only be used with a certain orientation (e.g., a refrigerator can only be opened when the agent is in front of it). The receptacle of an object and itself has a close spatial connection (e.g., a pen is on a desk; an apple is in a fridge). Every object has a unique identifier such that objects of the same type can be referred to precisely when they are receptacles of others. In addition, the agent is represented as a special object.
79
+
80
+ Adding the flattened table of object information to the input sequences indeed improves the LMs in terms of their perception of the realistic environments, which forms the foundation of grounded planning. However, the thinking process is still limited by the conventional seq2seq learning framework, which assumes LMs should output a complete plan by a single pass of decoding. We argue that a thoughtful planning process should carefully handle the coherence of each step, otherwise errors accumulate and cause a failed plan.
81
+
82
+ Therefore, we propose a simple yet effective decoding strategy that learns to iteratively generate a plan step by step. Specifically, we append previously generated steps until the current step t to the input sequence (i.e., Input = $[G+S_1+\cdots+S_t(+E)]$ ) for generating the next step (i.e., Output = $S_{t+1}$ ). This iterative decoding process will end until the LM generates the special token END. In the training stage, we use the ground-truth references for $S_{\leq t}$ ; in the inference stage, we do not have such references, so we use the model predictions as $S_{\leq t}$ .
83
+
84
+ Notably, in contrast to the conventional seq2seq learning process, the iterative decoding strategy needs to run the encoder-decoder model N+1 times to generate a plan with N steps. The additional computation cost for re-encoding is worthy. Imagine when we humans are planning a task in a room. It is natural for us to come up with the plans step by step, and it is very likely that the most useful information to generate different steps is about different objects. Therefore, a temporally dynamic attention mechanism is favorable in planning with LMs. Our iterative decoding strategy encourages the encoder-decoder architectures to learn such ability.
85
+
86
+ **Pretrained table encoders.** Since we use environmental information in a tabular format and BART has not been pretrained in the tabular form of input, BART may not be able to use this part of information well. Therefore, we employ TAPEX (Liu et al. 2022b), the state-of-the-art pre-trained language model on tabular data. Using SQL execution as the only pre-training task, TAPEX achieves better tabular reasoning capability than BART, and thus we expect TAPEX can make full use of the environmental information represented by the table in our task.
87
+
88
+ **In-context few-shot learning with GPT-J.** Finally, to explore whether large-scale language models can master the task with few-shot examples, we also experimented with few-shot performance on a larger language model GPT-J 6B.
2210.07920/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-05-25T08:52:30.016Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36" etag="aYp9nz46n9pJni7ABZKS" version="18.1.2" type="google"><diagram id="iybBiewkQJGjLnTZbJjj" name="Page-1">5VZbc6pIEP41Pu4WCBh95Ga8REQTk/i0NcIwjg4MwijGX396EBViUuec3X3YqrUqpPubr7tn+jLQ0uz4+JihdD3hIWatthIeW5rTarfVXk+BfxL5qBBV0c8IyWhYYTfgmZ5wBVaGZE9DnDeIgnMmaNoEA54kOBANDGUZL5q0iLNm1BSRKqJyA54DxPAd7Y2GYn1Gu0aNPcCUrMX1fNVKjC7kCsjXKORFDdLclmZnnIuzFB9tzGT2Lnk52/W/Wb1uLMOJ+BWDNyHM/URzPpxVkFPlLfW2f/1ReTkgtq8OXG1WfFwyAPtOpUjjMlXWAWeCQoKe0Aozn+dUUJ7A+ooLwWMgMLlgoWBLMr5PQpsznsF6iCO0Z6LmwWSUSEvBU0BRnp4LGNEjhj1bZUDzgioXRLpCArU086y2+2lCWm2bvlrTeaGMHwk34ec9L9bugoA0lapDbLP8AX++WfRAssYuc2evcz2Zau/5/NGcWY6bD6NW28I6CuyDPWWeypwDDoIwNHwOCwL+HjJ/Ouyr5j53VRLLR5I/QldbwUjvvdm4eIntsHB2Vjh7yixv5VndzsmC9RrfNUgS2B1pZBm9Bn85kg/v2NVPx06NbXbI+6I0WTpg4tTZowdruvTUru4cHxrs4uXNRh8rxzgmTq/GHWnmlPfBYHB6mLg1rnzgYrUximSjNrjleXNlcMomgzq3PO1TZhSDpN3gns+qDU4bf7CrcauTGsQHA6+ZR1cnPlQosHf+oc/eoxHfFWXZnhev0/nYsJfDoWxLzaoaF1oJH7+dCPU6Z3BDYR5jkX0ApTLQL1NcXU7ti17cJh1YZ2xdm/IriKrbhVx93wYQhGoGf2Me2//jeTRpfR5nW3NinszcPwEjdPLBfr0fC3/rnoxEiGIJfmyYxv5mAQ/pSxmvQTrNuqXSYVIhsnmGWgf6oF9AbMuEIe7rqi8Vz+ZRV/FNKU7TqKtPgGx7csqj7hDk2Rh60fJepOLMdgMw9ZxCKi+7d1Cmjtz1Jsreqy249qa+o+0Tdchk0sNpPOuUbWyN5gvDzbYjQkjZxv9SJz/85xpZ+/VGjhg+mvJVDbnASViJTsBQntMAwLWIIbajglh2sOzGsv/OLnFIPr+sf5q3WmIuuarn5YJlmCFBD033X+WqiuBzCoGvZdH0Zl2gEf80mk5yvs8CXNnV396fXOnqT10JlBEs7lyV5bse/e9XVL+r6ATBLjlTO3elhU4WzcLlIuNbfLl7Ep7IqyuijH2CUHUHBVBBnH1xOcU0DGUYq1hTgZ9TFMiYBXx+ftMe/2iwNF1rpr2r3k1W54sG0pTfnitQbx+D56Ldvqk19wc=</diagram></mxfile>
2210.07920/main_diagram/main_diagram.pdf ADDED
Binary file (8.56 kB). View file
 
2210.07920/paper_text/intro_method.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Image segmentation and object detection are today mature and essential components in vision-based systems with applications in a wide range of fields including automotive [1], agriculture [2], and medicine [3], just to name a few. A major challenge in building and deploying such components at scale is that they require costly and time-consuming human annotation. This has motivated efforts in self-supervised learning (SSL) [4–6]. The aim of SSL is to learn general-purpose image representations from large unlabeled datasets that can be fine-tuned to different downstream tasks with small annotated datasets. While SSL methods have also been fine-tuned for image segmentation since their inception, it is only with the recent state of the art (SotA) methods, such as DINO [4] and Dense Contrastive Learning [6], that a clear and strong link to object segmentation has been observed. This has led to several methods for salient object detection built on top of SSL features [7–12].
4
+
5
+ Most prior work based on SSL features defines some form of clustering by either using attention maps [7–9] or similarity graphs [10–12]. In this work, we take a quite different direction. Rather than directly clustering the features, we train a network to map them to a segmentation mask. As supervision signal we use the *movability* of objects, *i.e.*, whether they can be locally shifted in a realistic manner. We call our method MOVE. This property holds for objects in the foreground, as they occlude all other objects in the scene. This basic idea has already been exploited in prior work with relative success [13–19]. Nonetheless, here we introduce a novel formulation based on movability that yields a significant performance boost across several datasets for salient object detection.
6
+
7
+ In our approach, it is not necessary to move objects far from their initial location or to other images [14, 16] and thus we do not have to handle the context mismatch. It is also not necessary to employ models to generate entire scenes [15, 20], which can be challenging to train. Our working principle exploits observations also made by [17–19]. They point out that the correct mask maximizes the inpainting error both for the background and the foreground. However, rather than using the
8
+
9
+ ![](_page_1_Figure_0.jpeg)
10
+
11
+ Figure 1: Exploiting inpainting and movability. (a) Input image. (b) Examples of predicted segmentation masks: correct (top), larger (middle) and smaller (bottom). (c) Inpainted backgrounds in the three corresponding cases. (d) Composite image obtained by shifting the foreground object in the three cases. (e) It can be observed that when the mask is incorrect (it includes parts of the background or it does not include all of the background), the background inpainting combined with shifting reveals repeated patterns and mismatching background texture, when compared to the original input image or composite images obtained without shifting.
12
+
13
+ reconstruction error as a supervision signal, we rely on the detection of artifacts generated through shifting, which we find to provide a stronger guidance.
14
+
15
+ Suppose that, given a single image (Figure 1 (a)), we predict a segmentation mask (one of the 3 cases in Figure 1 (b)). With the mask we can remove the object and inpaint the background (Figure 1 (c)). Then, we can also extract the foreground object, randomly shift it locally, and paste it on top of the inpainted background (Figure 1 (d)). When the mask does not accurately follow the outline of a foreground object (e.g., as in the middle and bottom rows in Figure 1), we can see duplication artifacts (of the foreground or of the background). We exploit these artifacts as supervision signal to detect the correct segmentation mask. As inpainter we use a publicly available Masked AutoEncoder (MAE) [21] trained with an adversarial loss. Our segmenter uses a pre-trained SSL ViT as backbone (e.g., DINO [4] or the MAE encoder [21]). We then train a neural network head based on an upsampling Convolutional Neural Network (CNN). Following [12], we also further refine the segmenter by training a second segmentation network (SelfMask [12]) with supervision from pseudo-masks generated by our trained segmenter. Even without these further refinements MOVE shows a remarkable performance on a wide range of datasets and tasks. In particular, in unsupervised single object discovery on VOC07, VOC12 and COCO20K it improves the SotA CorLoc between 6.1% and 9.3%, and in unsupervised class agnostic object detection on COCOval2017 it improves the $AP_{50}$ by 6.8% (a relative improvement of 56%), the $AP_{75}$ by 2.3% (relative 55%) and the AP by 2.7% (relative 49%).
16
+
17
+ # Method
18
+
19
+ Our objective is to train a segmenter to map a real image $x \in \mathbb{R}^{H \times W \times 3}$ , with H the height and W the width of the image, to a mask $m \in \mathbb{R}^{H \times W}$ of the foreground, such that we can synthesize a realistic image for any small shifts of the foreground. The mask allows to cut out the foreground from x and to move it arbitrarily by some $\delta \in \mathbb{R}^2$ shift (see Figure 2, top-left). However, when the shifted foreground is copied back onto the background, missing pixels remain exposed. Thus, we
20
+
21
+ https://github.com/facebookresearch/mae/blob/main/demo/mae\_visualize.ipynb
22
+
23
+ ![](_page_2_Figure_0.jpeg)
24
+
25
+ Figure 2: Synthetic and real images used to learn how to segment foreground objects. We obtain the predicted mask and inpainted background from our segmenter and MAE respectively. We train the segmenter in an adversarial manner so that the composite image with a shifted foreground (left, top row) looks real. A discriminator is trained to distinguish two types of real (right) from two types of fake (left) images. The fake images consist of the composite image with a shift and a copy-paste image, obtained by placing the shifted foreground on top of the input image. The set of real images consists of composite images without a shift and the real images. The real images are first autoencoded with MAE to match the artifacts of the inpainted background.
26
+
27
+ inpaint the background with a *frozen* pre-trained MAE<sup>2</sup> and obtain $\hat{b}$ (see Figure 3). Moreover, there is a difference between the texture of $\hat{b}$ , which is generated from a neural network, and the texture of the cut out foreground from x, which is a real image. To ensure more similarity between these two textures, we synthesize $\hat{x}_{\delta}$ by extracting the foreground from the autoencoding (AE) of the input image x shifted by $\delta$ , which we call $\check{x}_{\delta}$ , and by pasting it onto the background $\hat{b}$ .
28
+
29
+ We enforce the realism of the synthesized images $\hat{x}_{\delta}$ by using adversarial training, i.e., by training the segmenter against a discriminator that distinguishes two sets of *real* (Figure 2, right hand side) from two sets of *fake* images (Figure 2 left hand side). The synthetic *real* image $\hat{x}_{\delta=0}$ is obtained by composing a zero-shifted foreground with the inpainted background; the second *real* image $\check{x}$ is instead simply the AE of x. The two *fake* images are obtained by composing a $\delta$ -shifted foreground with either the inpainted background $\hat{b}$ or $\check{x}$ , and obtain $\hat{x}_{\delta}$ and $\tilde{x}_{\delta}$ respectively.
30
+
31
+ We introduce all the above synthetic images so that the discriminator pays attention only to artifacts due to incorrect masks from the segmenter. Ideally, the segmenter should generate masks such that the fake image $\hat{x}_{\delta}$ looks as realistic as $\check{x}$ for any small $\delta$ . However, the discriminator might distinguish these two images because of the background inpainting artifacts and not because of the artifacts due to an incorrect segmentation (which are exposed by random shifts). To avoid this undesired behavior, we also introduce the real image $\hat{x}_{\delta=0}$ . This image has no segmentation artifacts for any mask, but has the same background inpanting artifacts as the fake images (although there is no shift in $\hat{x}_{\delta=0}$ , the background inpainting creates artifacts beyond the boundaries of the segmentation mask). Finally, to guide the discriminator to detect repeated patterns (as those caused by incorrect masks, see Figure 1), we also add a fake image $\tilde{x}_{\delta}$ , where the background has the original foreground.
32
+
33
+ The segmenter is trained only through the backpropagation from $\hat{x}_{\delta}$ . The details of the segmentation network, the inpainting network and the adversarial training are explained in the following sections.
34
+
35
+ Following the recent trend of methods for unsupervised object segmentation [7–12, 22], we build our method on top of SSL features, and, in particular, DINO [4] or MAE [21] features. Thus, as a backbone, we adopt the Vision Transformer (ViT) architecture [23]. Following the notation in [10], we split an image $x \in \mathbb{R}^{H \times W \times 3}$ in tiles of size $P \times P$ pixels, for a total of $N = HW/P^2$ tiles (and we assume that H and W are such that H/P and W/P are integers). Each tile is then mapped
36
+
37
+ <sup>&</sup>lt;sup>2</sup>The MAE [21] we use is based on a ViT architecture and has been pre-trained in an adversarial fashion (as opposed to the standard training with an MSE loss) to output more realistic-looking details
38
+
39
+ ![](_page_3_Figure_0.jpeg)
40
+
41
+ Figure 3: (Left) The segmenter is built on top of SSL features from a *frozen* encoder. To define the inpainting region for the background, the predicted mask is shifted and combined with the unshifted mask (bottom left). For better visualization purposes we highlight the edge of the shifted mask, but this does not appear in the actual union of the masks. This mask union is then downsampled to the size of the tile grid via max pooling and denoted $\hat{m}$ . (Right) The inpainter is based on a *frozen* MAE. First, it takes all the tiles from the input image and feeds them to the MAE encoder. Second, it takes a convex combination between the encoder embeddings and the MSK learned embedding (but now frozen), where the convex combination coefficients are based on the downsampled mask $\hat{m}$ . Finally, this combination is fed to the MAE decoder to generate the inpainted background.
42
+
43
+ through a trainable linear layer to an embedding of size d and an additional CLS token is included in the input set (see Figure 3 left).
44
+
45
+ The *segmenter* network is a CNN that takes SSL features as input (e.g., from a pre-trained DINO or MAE encoder), upsamples them and then outputs a mask for the original input image. The final output is generated by using a sigmoid to ensure that the mask values are always between 0 and 1. We also ensure a minimum size of the support of the predicted mask by using
46
+
47
+ $$\mathcal{L}_{\min} = \frac{1}{n} \sum_{i=1}^{n} \max \left\{ \theta_{\min} - \sum_{p} \frac{m^{(i)}[p]}{HW}, 0 \right\}$$
48
+ (1)
49
+
50
+ where n is the number of images in the training dataset, $m^{(i)}$ is the predicted mask from image $x^{(i)}$ , p is a pixel location within the image domain, and $\theta_{\min}$ is a threshold for the minimum mask coverage percentage respectively (in the range [0,1], where 0 implies that the mask is empty and 1 implies that the mask covers the whole image). Since masks should only take binary values to clearly indicate a segment, we use a loss that encourages $m^{(i)}$ to take either 0 or 1 values
51
+
52
+ $$\mathcal{L}_{bin} = \frac{1}{n} \sum_{i=1}^{n} \frac{1}{HW} \sum_{p} \min \left\{ m^{(i)}[p], 1 - m^{(i)}[p] \right\}.$$
53
+ (2)
54
+
55
+ The main task of MOVE is to predict a segmentation mask that can be used to synthesize a realistic image, where the foreground object is shifted on top of the inpainted background (see Figure 1 (e) top and Figure 2 top left). Figure 3 shows how we use the predicted high resolution mask for inpainting with MAE. Since MAE performs inpainting by masking or retaining entire patches of $P' \times P'$ pixels, it is necessary to also split the segmentation mask into a grid of tiles of $P' \times P'$ pixels and to map each tile to a single scalar between 0 and 1. We do that by using a max pooling operation within each tile and obtain a low-res mask $\hat{m}$ , such that $1-\hat{m}$ does not contain any part of the predicted mask. To regularize the predicted mask m, the mask losses $\mathcal{L}_{\min}$ , $\mathcal{L}_{\text{bin}}$ are also computed on max
56
+
57
+ pool $\hat{m}$ and average pool downsampled masks (at a scale 1/P' of the original image resolution; for more details see the supplementary material). Then, we feed the entire set of image tiles to the MAE encoder and obtain embeddings $\xi_1,\ldots,\xi_N$ . Next, for $j=1,\ldots,N$ , we compute the convex combination between the embeddings $\xi_j$ and the learned MSK (masked) token from MAE by using the low res mask $\hat{m}$ as $\hat{\xi}_j = \hat{m}[j] \cdot \xi_{\text{MSK}} + (1-\hat{m}[j]) \cdot \xi_j$ . Finally, we feed the new embeddings $\hat{\xi}_j$ in the MAE decoder and reassemble the output tiles back into the inpainted background image $\hat{b}$ (see Figure 3 bottom-right). Notice that we feed all the tiles as input to obtain a differentiable mapping that we can backpropagate on. Interestingly, we found that when no tile is masked at the input of the MAE encoder, the embeddings $\xi_j$ do not store significant information about their neighbors (see the supplementary material). This is in contrast to the typical use of MAE, where only the subset of "visible" tiles is fed as input to the encoder. However, such tile selection operation would make the inpainting not differentiable.
58
+
59
+ Figure 2 shows how we create the images used in the adversarial training. First, we mask the input image with the predicted mask and compose with the inpainted background image, obtaining
60
+
61
+ $$\hat{x}_{\delta}[p] = m_{\delta}[p]\check{x}[p+\delta] + (1-m_{\delta}[p])\hat{b}[p], \tag{3}$$
62
+
63
+ where $m_{\delta}[p] = m[p+\delta]$ , $\delta \in [-\Delta W, \Delta W] \times [-\Delta H, \Delta H]$ is a 2D shift, with $\Delta$ the maximum shift range (relative to the image size). To make the inpainting artifacts in the no-shift composite image $\hat{x}_{\delta=0}$ more comparable to those in the shifted composite image, we define the background inpainting region as the union between the predicted mask and its shifted version (see Figure 3). Thus,
64
+
65
+ $$\hat{m} = \mathsf{maxpool}_{P}(1 - (1 - m) \odot (1 - m_{\delta})). \tag{4}$$
66
+
67
+ To improve the discriminator's ability to focus on repeated patterns artifacts, we additionally create *fake* images with a predicted shifted foreground pasted on top of the autoencoded image, obtaining $\tilde{x}_{\delta} = \check{x}_{\delta} \odot m_{\delta} + \check{x} \odot (1 - m_{\delta})$ .
68
+
69
+ The adversarial loss for the discriminator can be written as
70
+
71
+ $$\mathcal{L}_{\text{advD}} = -\mathbb{E}_{x_R} \min\{0, D(x_R) - 1\} - \mathbb{E}_{x_S} \min\{0, -D(x_S) - 1\}$$
72
+ (5)
73
+
74
+ where samples for "real" images $x_R$ are the set $\{\check{x}^{(i)}_{\delta}\}_{i=1,\dots,n} \cup \{\hat{x}^{(i)}_{\delta=0}\}_{i=1,\dots,n}$ and samples for synthetic images $x_S$ are the set $\{\hat{x}^{(i)}_{\delta}\}_{i=1,\dots,n} \cup \{\tilde{x}^{(i)}_{\delta}\}_{i=1,\dots,n}$ , with uniform random samples $\delta \sim \mathcal{U}_2([-\Delta W, \Delta W] \times [-\Delta H, \Delta H])$ and $\mathbb{E}$ denotes the expectation. To speed up the convergence, we also use the projected discriminator method [24]. For the segmenter, we use instead the standard loss computed on the composite shifted images
75
+
76
+ $$\mathcal{L}_{\text{advS}} = -\mathbb{E}_{\hat{x}_{\delta}} D(\hat{x}_{\delta}). \tag{6}$$
77
+
78
+ Finally, with $\lambda_{\min}$ , $\lambda_{\min}$ nonnegative hyperparameters, our optimization is the adversarial minimization
79
+
80
+ $$S^* = \arg\min_{S} \mathcal{L}_{advS} + \lambda_{min} \mathcal{L}_{min} + \lambda_{bin} \mathcal{L}_{bin}$$
81
+ (7)
82
+
83
+ subject to
84
+ $$D^* = \arg\min_{D} \mathcal{L}_{\text{advD}}.$$
85
+ (8)
2211.10435/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-11-18T18:19:15.032Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" etag="Ijgx4jAhnA0cl4ftk6ut" version="20.5.3" type="device"><diagram id="GgyPG4EAwuddIE4trvRh" name="intro_example">7XxXk9u4tu6vcdXet8q7KIpBemQOCsxUeDnFnIMYxPDrDyCp250843GYPWfu2K4mCRALwArfCmD705IpRqFx6nhX+UH+CUX88dOS/YSiCwwlwAW2TPcWAlvcG6Im8R8vfWkwkjl4NCKP1j7xg/bVi11V5V1Sv270qrIMvO5Vm9M01fD6tbDKX89aO9FjRuRLg+E5efDutUPid/G9dYWSX9rFIInip5kXxPreUzhPLz8It7HjV8OLpiX3ack0VdXd74qRCXLIvCe+3MfxX+l9XlgTlN23DKAwGeFWuzN5Wh9YRBtWmxz5vFjeyVydvH/s+BNK5IAgHVaALlh2Nz14QVz66qnjc3uTFAVeWCxqIG36Sz+4i+CVuvc/0Wtrp/yQnut4WdRUfel/9qq8au7Dmsj914q4jWfgLOvl8z2K4/9+P50ZB6DPdbKgaW/zOE0X+OBuSIAkwCgEDEfyyrkCfXqsCXDrvqw7iadmt3nb8h2LR5fIlxW/vv9g9W+4/qDFvnxxGSCLAHdeNiGh471eD1P1TRKA9SP7YPj6NO5Tw50f/wPZBnl1o3Vn1TOH3LeDQdt9mb/Hta+w9zs14VkNnnXgI60DajBB4tDQUeSmNQmcrbupR1E1ZVJGEBxK2L9cv+53wi5oyqoqf80WvlmZ/wye/p9SUCjP//kivbueQjb+Bp++VXG/hgIf8vMtE1HszrXlg3tr5N+PnUOYDJ0iyadnIHSKGnR+hROvOu80fkd4z5J7FttHFvGSgy/0+8FDaAFf59ifplZ/GfC4RSKVFzQ3FOmqBrY0Qdc35Q0hiWcn8p+/ixf5iuW9IfZHLfmhdi9Yd1c44jdF/hX7/E5TvsvTKdvhts6k/evI5tvZ+YNifN79ncIdZUq3rX82WH4jP78ZQH8K+P2oEn8fN358838Z7/E6Pvz8Gxr0N2fEVwKRjwDlDYf+4doHwQf6AaN+MVN+BaC8X84r6P5hp/k96PNfDeR+KPT4AxHBL4u4vmz0xd13bhhBwvCWS78fe1f9b3hC6iYpu3/d3fi/32+VxL6fWd9Y3fnq3t/NiL6iid4YHMAC1AJ0D3HSBUZ9N4mhceAm467IH91OnkQluM+D8Ca6rqmygHlm63KFwL/wxcZ7FApXjwU/HheLu8g9gM1mBciz+JeG7Y0su/7Soj/qdi+a6KrrqgK0Ic/beVlhexTdrkHTBeOLpkfFTQiqIuhumcOjd0k8qn+P8icGwQk+D1+KiUsEv7fFLwuJ60dlznkUMKNn2l9qfODmUeb7IyW/n1Pxe6cT2r1Tr6IbvMUOrMKBnSFdUJYw8gb4m+cwa0LEW6Wun2DjrVZ6z7I8oOPgUoUfDuIcL76/9Ex9+TH1aoA0nXJ61w2ufgXLf8gtMYgB2IJLCcuy/M+0ix+h8SeXTZ/k9a5e+rHo0Ddyei8CcA3uorq1ER+SwZ9CAISAjx/kaYA735Jg/zclddf410HMmyq0Aw3qXlqkAwAbOdRJMbmziYZvTY/XIedfl6jvDHabwIF9t6hpV5W+M316rmM+ePe24PlM4fcqnx+9+BylQeJw879bDunLx+TPdZHXNvjxhvzEv885fbHEO/T/ZUzx/4ybW6Df6+c+L3+So3vt5xYr4gM/h7z3c+jTSdpP93PoOz/HxE5Sfq7Cz2Zc9XAFKPKvQ5BAwIL3Tv6fB1AiKPrvd0IGjOheS/K16MqqDKCcgHG/aXqSuAfYC/BtSUO2Jp6TU4+OIvF9OM2HqvNFuZAnNXgs6oMjwD8sNxJ/LTds/U5uz2exL+X2JO+fLjb8ndiksu67H7S5Z1PCkde2tH4jMmCAvhOsQu8j0yS8VeCGP8dg8DcGQ75nPPYB3xfYL+L76p+w8NM/YeH3hIVfidL+8+l1dexHmfcHDx5/RaXjx08z75z7nyetu5+I4L9VuPmNw/Yf5eiv1qE/miv88mD/b6FC7i12eaNCkNX/787iv6UqfTPifHTi92GV+R99+vB8EHmLULck/bXO/Vc07McTdeSf3Pyf3Py/kJuTn/5aNeiPc/MPatC/Ljdfv0s21KaKGqf47CT+TXf1wGmrh1H8q4u/hAdf9TQfqcjLzxhfeoqharL/XzJ8DHkj/o9SfPQD8f+yFP+pnvC3z/EBXvy1kvwF8Y7zT5/sK333J4sAD1Y+9iHKou6SIH6NCLAF+U4E5J8qAvKPF1p+VTXgj3wU+W0f1n8l6PmeL7AhKaP6Eoa8pHePSm9D71HrYom+XcO3REdfBrycKXa+BHyfn4l/foy5T0h8me8pNvq2MshXP0wEJP/z1aDmpYL84W8m/uSIaEH8Xkj0eYF9b0yE/SRY+JZj+Y9gAfllx/Lvzyv+5tCM/9WQ+eslcDABSOURL3aaNuheIWzfhbB2/ptp/puEBkW/8htTvwvnXyN0/97v8T7sKaumcPIXfcODg7ATRoS3njzoANh+ftjY+5EwIP78QADqCea+9CRA/coHTeRpJbeergHQFgJKTzRvUTKUbdX4r+d7HugnbZ07j88VkzJPnsaEAGa7N4TecY5DP63ZTyvmq1XEj2P937GaF2YCNfn2zD++qmTFIL8GMMp/dLwwE/72B7Tnjhvk9LMTfpM23HsBS4LmTc9PMK8F9sa+Vqv3cf8vOtljLLUYpEV/Ori5c16vqtINPjiQfSeVNnZqeJsUt1/LfE6jtpBRatUmXVJBb+Q+3MHX+OsHodPf9OBtItZBB0Q7bX3/ZdEwGSGC0rcJqadW5KkFknKA6S+p+yPK1zB0YRKbVvQB2QhRRYE/e8OKOSsCd4IGftAZQ53gdcBq24I3Cpqz2oKWNWQXWaJ8PRd5ewavbmQ+PQmUYTBDNQl8fBJYaTbaminlpVUlUsrLUbjaUozktKw8yZNn8WnE0WpG5SxLOfxxIulck2nZX2gHt2PZtMFH8zJ7K2uzSsPm0E1svlszPb0xjIVCdzLYQ8RGde8wVSwJ9E4TJTvBrhuNGQ8Y3WbGJqowthUMGlUqY023uhI1lBrtDxzNZixqB5QriVm60yxJbeVsg5oxYwRpawho1DIijdFM5haXU8aVXEVsuKLbUKI+XzM+tw5nQs+ApyEpjqvt1W7cLrq9vmOlYVfQESVZS109lkBPaXo4y7qkVUnOp7tBoFc0k1bJyMrhbmbbY5tSEmNQHK9cabGu95EK1IdPWz6xLCmdgf3QpylXdrPDtGKRCXR8gi/MUIPhPzoDF2rYkTsJtpXX61MPH+yeb8E/s9hHJxVjFkorUht7PPINsbP5CxS7Itgn43Demxp1e2aApGM0o2j4QHHwh0Pd/zAVJb1op7OK2r16vr82wsWILbjDZsPSaVuMaNJu5QNv50JDHHFNYqI+sjleQynLtizR5GhTa0xfCLbsfls6Qe3Spd76MtpY5dY5gq06kz1ccpIbL1Mpnmti1Llo3+Y6xWeWdbA0nqfss64wQWCvVjWZ5UcFn7DtxkvB4BCYNQ8EwuOAQ/Q6XPbwmZjzY23L3MmiY03gqe+5DpsKcJreiAjr0ehA8drIVbs1x+AXjHI0mh1lTuabHUvpsn6muERBpHKchmhNcUcn0bjFlGfMkQqcUhEziE1XDdlaWxMQFQizmZbihktIj6GEVXOiZg3XikVjjRNAMn5D7Ig9cmUkfettwQD2lFeBFnfUvhHsrRP0Gc4di0Y6CjuvQN2WPCN6G3LbRuiHOdp7Fo4wkCG1tL7oUx9EYkhZB2rLbBofPdTZ0ZoZSxw3HiVpKnUScGnltFej0qvlGUzvnI4rLZ/YqzZn+aw1w7jCPU4wNhsNslo9xUySnKgQUSLSX23UygPNPkegqHXc9b2VE/opOCRpmLrZBcEkN8noK1Bp2thGiLLbH6n1yVd7nQpDMd2i+kIdAQGSTHW12G5OkknPh/4c5v6YcI3Nk1pOiTgFF8bkO2xNzqfISd2Y9rQSYVo9CxHcKgqgdCeTI3tMY93UYUJK5gd3lyJLc5nKbb8VV72mkx5u832wF05qCefsB22O0JBihawFaRW/Q4wCLNRbncXdgWgDDvMJw7yYGVs5l8mX3GVPymypecMq66GC3LzNGcSGtOhWEaIv7NEplwILUk8aoBm99ZjAL7pZubgBQWBKr9lcY8gAYknlctqHZRoInq3yYysGmq+UZ/xc7VcL65ZD0i7SE3u234ku5SeXvXviOX212keJ2a4zF2cxk85XZuIipnTsN7q/9UVBUrsUE8HgiFlgh/bINKc+TYjDTonWElpppGgnZXy+ck3vaguM96zFFo3EWSYOBK0e54G0eaxjj0tfAUTypeIUuzxJ5g3rHf1pog1kPWUGnfmRbgJb5AqvaxcQuJdtuZfSpTxtIXwJBurTgejsRM1uWUEvFFeFiAXCAtqcmixDV2Qcnc+OMkIjPmUFLo77imWrtjREfUhI08e2vHTsijNNLc4iciXp827j7G2mWfj4WK2IrWNOQqCxkSwAqpNPBeIoKTZxMAXoDS/VQGCxmo5cziPOpuiaNSt1mjo010OfAl0XnOVZGQ9Ctc7rwzFQ46DVwTjENs94hKyVSjMvS6/I1TVmddWpzExHVRijrEgdccgWgMHYDZSicBswSlHmDgZ2C9u5zvKGk9cYaafTDHWjNS+gJ6ZaYcoFSkDM2pHJpSlvepKmgtTU98tI2NajXwVNr+yl4mzXZrMhmq013pG+pWnWdqaqqPSBz6n6WLe2v7dw4FxblQMAe43JuowVwRCa3XSZmW6nbpfTlWkJzS561/J1a2DkcuCBeI+tlE4rFJrU6XTJGtZOVwMtCrjOpM61XF+RE7nGcC620WbFKJJ9oq0oWaU1x4VWsuvXIsambWcqV248DMnk1Rm/PY+u2881X8jhviEQx7msjyLfT7tAdaBJXQ2cR4Q0zKgrQuhmoHltE22XUeQBOKDbZgg1liiY/brcbgLawOljUp6NngGmw6BQE4FO81h09DcYNaPsrqIjsdwS0VhsleAijUvcO2f1dMg5MuKNWu86UZTt0d0yC/YEhAqRrW9nftEVU4Soh/ViOyXn9XS8riaDsSSxSTNOtYpBLWrqTPu4UsVQieJzNu+8TsABytFXEdmNtETVTTAxfAs3JtLHONoKrt7aVebNSQrR8ApXXO6BFtIJbfiD7uuivD1T6ysnePGxa/f89upVnS42YoTsw5AIthxR9Qomt/xVpbvaUGURN6aJKpdXNNEy1y3y+Dy7wMPRAdmmkTJOUjiTbEGtIFrQ+/iAB228W+Sb1PeunGJzaroGPXzM+dAvbvaOe4Cgkq6vK1deXeNtHDvJNXa7YX+hi4lIQfLKG0OLg7d8p78GIXhem7vSBdcty0JMw87qykSUUxIjO6y7SvmVaK65Ii6sCgEISjd1rS8JBKUO5WUeeNxWUA6n86w+bridMjK0NzQVIqsQGyPCI4NT42bXa7noxsDcXRxJ0EdRASHIPu1Ybljm3i7yN7LC7Zf1Phlme7uAMDIRtjpuG/EYCpucCeKQVZesSVPHVcgoCoyX1AstCAePxYhwNsyzSssHbe1PaHg4huc1VpxbBs8oGIH4Ut0cG6w4tGWcsckhCW3QvI9nanmVmhPgLg+xnJ88TgJYRQ97n2IRSgm05LxXlgw1nvcbUtRRkcf2PMJVoXjKoN12zSqqThCbIpHanoyJHKt0yVtVeKoaeb/Ur2Gf4/0lkLBVRA4hhEbU3Ogz6sUxMakUkhyWAIV5+cSG1HZtmqqji4V4WklRgq3DfqfablnW/jHSDqZ87OQkWMTFAStytueFZpMfgz1CDF2nye4yrS7Sqsr2YBZOPBZSPsYGfJA6IYgH0p9Ya3+YHXueaS7GbvoLfhDX2ZiCzVhl0tksgIS8qduKluR6S94ZD8clIH1d8jCuiWYfW89rUYLawjsHE9b6/FM3W5G611Rp4Q2gpQq9iETWherZuzxYn2C4e5lLYEySX3rc8eAfk862GGNEprSx0dVVlPZB7VMLfMFCp33FTMrVEaSf+RS1K6aReTW39Qir2umSKYeZxruLKSKtqyu9yGZrwzXzrDwdBrFULsWm1ib5HJA+tO5m1fqLrkkPiEoTgcSOZ/zEg6woR8oTHVJHFx2HRIWxFFG0pZ4c6quABcA/HTK39OPFZuxktL0kx+hIOd7WWkBDO0vdSotVhqxn60Log+PsJTcNNvYSqlXF748zc3Iw3k937kCnOIPrbUBLWi/GAjLs41QWMm86LnyxgUFcv65GxIaBt3yl54oTNrJGbHK+SYS1sp8WiT4yobhgFziKT9lpr0men2IsSdHskJaNnTTYDH9VmC8uHhPX0MMVS7c7ilCX123lq02q2oONUH2wYxBG5KzW22X6ZTwLEHOBbwzdKjuQFbrvB1NZ6tx1kadh7EJf7hjwx4o5wOuwLHb4tVIbtTRZIVLwveE2RNxdEPncXTyblXI78z0QnJ1wTpEPU9YfJS6OZ3LAdKF0ARIggd+CMA4V91I44Lke9YICUWzTbeesr3pgmvxB5Mrj6YDI4yyujmt64O2TOu1CdWImRw+KULTGVb6o9UN62hFrRJwjFe4L6QiDsp3dEGAgbKMZpctCOi/RUV6QaxdGcom7NcH2XCdKeXSOgFGf1AUjQsg5rYctPKHiBzF1aOKIlBAP1QIpohotuegSNFaspzrGHjgFG1C38HkDaI8dWo2uplnrBdUEDSA329xrTYvedrzMXG7unc0pdH/hm45pKHOithpVHq38ILWbuI4JV2PDuk752A4IvNxsuyW2Ovh+1hxTU5jzisij8QwNFsJ7VEEdpNYTTq8d29YMS1zV8lquHBSKXgyzEw5inX7Jq0dETDGnPOnxhCnBYFNCTG1Caj711UoyVOEsmsjOcxSOXPGmuOiOl7UklymFMUcafj3EH9bxRdwIyOFQaTOCXGPEW6q87EwX5bLOvOvFMLVQt7AhuEauUs7a4YrEiUnsB6oIrgF9jfhJzwfoMb2zV2uyPrpXljNFcscPZ1zpwRvRWol1ixDbjXiM9iVl5rhVToWsunxxHrapHmXC5jihw6qA7vjIqO0QRhaIQHa3ShAw0LmaQRSYpdAdp6i8QKo+HfeLfBJ3u52roGk9sXZJkAHrDceCL6CUI7RRjlRpHoVx60HGRSLB9At+543Xousw72AVl80U+exYx1gB/etMb+5h+3bYTIkKqNBILh0uRGfi5V69huNeubnq7UkstlOMtFqABVJ6uHJnAdf2e/p0FY6TyUPDnGWj3pmH4cLkni2cCKnjVXspJTDVuqSiJeQ9vSi8dYtbaszQGsNiV9yXLlGjz8x4Va09RUpwMeO0XGe2XSrFng5Huz0KiI+NG5boWapnNv2Sq4y8P2k7oTtU2aY3WhhhbKLmQgTKEga2R20F0oxDZsYcQBf9BIzRGNZkHFerYJWdITriO1LtxBjJz+jFG9p5N678pDue9enSy2vcVs8MmqZs1w+x1R1w7NyqxJZNGDOFXFJrTzNC6DZyxFytRrgALXQ11B+mcEAQpci2hrvAszQSK2oybJ9chuFl5SoyKiDichvlsDSxmgUS3w8xLqARG8SLaca9Ag+P+3qLAe0Oexqz6OroFTbFCaVRau6+Y0LmfNrMrajgLjny4R4YX8oc0EVdds7kwrM8sZUgG+jiRJW1faZdUqFssoZYvhmcQ7nVxR1Pro6mkNQXReVnao5MQqXcQXYdNMbnRCTPtiHHspZ2JjHjS6gmRQ8Q61pt6pVTmeO5WMSHo0HX1FoCaMdbKnsxwrIlY2FZxrsuGErPFPtiu5g9dLE3YDBraeE9N9iXS+C4xbCkDghT8kWLxZI+QqiJSU2hz9a6TkX8slgljV/guHFcZDNzzbUyCquSwzrSREVCVuxY68iTIKfk8RBeujmhhUUbbCBSRhcBQ5jUOhPkKaQRkEXjeK1tx2jPicXJnboC+Dh9yfiJx2gYB5Nf0TYqrupI0dquhy7Sis7oWXsMUbsYAE66I84k0HMki7ZumqUOkpDcuJkPiFe3A6OsMlevBz8xYtXIxmgpy/G8qRpo0rpUEP0oFIIuXWclP/htjEvN1XJjS9XCU7msl2si2axZVU62XgVjnPLMweiDDh2REmGgteFjizxZsuhRcb0ZXGxZKc32HLQ86amXJKuwFbT6bK1ZuUxcjB3ibqdzq7VJWhhYKnpWLKvaaUf1WHzJz8UIQUOzu6NViyzNInqbA17F5VHoqWQlycGhDiSwGzHbs+uBniLqFPAUBquJqzQk+6o7rnAAv/50KHg4M0gAL2N+NA3C357yQUbrhFYnyJ9roRkL+kKSe9EtyUHrA052k6gJNCgqbG1QUr+Uoq0Bko3J2+/CtpRRbEFQM0i4aNKO1NqI1nR9SswkrKJVl+nOLA1cWx2WW0leOC0IbBAx2nnXDhNTFdsZu/UhMardhhoqlom++eqLiAChM7xmlzFl91ERDbCSSMu6hXNNJkdRBGvr8N/POFXA3nxPgZPvT+0++qQF++OnCjB4fP7fEW99L/6PySX3vw==</diagram></mxfile>
2211.10435/main_diagram/main_diagram.pdf ADDED
Binary file (46.7 kB). View file
 
2211.10435/paper_text/intro_method.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Until as recently as two years ago, reasoning was considered to be one of the most significant challenges that large language models (LLMs) had not yet overcome (Marcus, 2018; 2020; Garcez & Lamb, 2020). Recently, LLMs have shown impressive success on a wide range of tasks, including commonsense (Wei et al., 2021; Sanh et al., 2021; Madaan et al., 2022), mathematical (Lewkowycz et al., 2022; Wu et al., 2022; Mishra et al., 2022), and symbolic reasoning (Yao et al., 2022; Ahn et al., 2022), using few-shot prompting (Brown et al., 2020).
4
+
5
+ This process has been accelerated by methods that require LLMs to generate their explicit reasoning steps, such as "chain-of-thought" (Wei et al., 2022), "scratchpads" (Nye et al., 2021), and "least-to-most" (Zhou et al., 2022) prompting. In particular, the widely used chain-of-thought (COT) method presents the model with the explicit intermediate steps that are required to reach the final answer. Then, the model is expected to apply a similar decomposition to the actual test example, and consecutively reach an accurate final answer (Ling et al., 2017; Amini et al., 2019). Nevertheless, while LLMs can decompose natural language problems into steps and perform *simple* arithmetic operations, their performance falls dramatically when dealing with complex arithmetic (Hendrycks et al., 2021; Madaan & Yazdanbakhsh, 2022) or large numbers (Nogueira et al., 2021; Qian et al., 2022). In fact, even when fine-tuning a PaLM-based model on 164B tokens of explicit mathematical content, its two most common failures are reportedly "incorrect reasoning" and "incorrect calculation" (Lewkowycz et al., 2022).
6
+
7
+ In this paper, we propose Program-Aided Language model (PAL): a novel method that uses an LLM to read natural language problems and generate *programs* as reasoning steps, but offloads the *solution* step to a Python interpreter, as illustrated in Figure 1. This offloading leverages an LLM that can decompose a natural language problem into programmatic steps, which is fortunately available using contemporary state-of-the-art LLMs that are pre-trained on both natural language and programming languages (Brown et al., 2020; Chen et al., 2021a; Chowdhery et al., 2022). While natural language understanding and decomposition
8
+
9
+ <sup>\*</sup>The first three authors contributed equally. <sup>1</sup>Language Technologies Institute, Carnegie Mellon University, USA <sup>2</sup> Inspired Cognition, USA.
10
+
11
+ Input
12
+
13
+ Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?
14
+
15
+ A: Roger started with 5 tennis balls. 2 cans of 3 tennis balls each is 6 tennis balls. 5 + 6 = 11. The answer is 11.
16
+
17
+ Q: The bakers at the Beverly Hills Bakery baked 200 loaves of bread on Monday morning. They sold 93 loaves in the morning and 39 loaves in the afternoon. A grocery store returned 6 unsold loaves. How many loaves of bread did they have left?
18
+
19
+ Model Output
20
+
21
+ A: The bakers started with 200 loaves. They sold 93 in the morning and 39 in the afternoon. So they sold 93 + 39 = 132 loaves. The grocery store returned 6 loaves. So they had 200 - 132 - 6 = 62 loaves left.
22
+
23
+ The answer is 62.
24
+
25
+ ![](_page_1_Picture_10.jpeg)
26
+
27
+ Input
28
+
29
+ Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?
30
+
31
+ A: Roger started with 5 tennis balls.
32
+
33
+ tennis balls = 5
34
+
35
+ 2 cans of 3 tennis balls each is
36
+
37
+ bought\_balls = 2 \* 3
38
+
39
+ tennis balls. The answer is
40
+
41
+ answer = tennis balls + bought balls
42
+
43
+ Q: The bakers at the Beverly Hills Bakery baked 200 loaves of bread on Monday morning. They sold 93 loaves in the morning and 39 loaves in the afternoon. A grocery store returned 6 unsold loaves. How many loaves of bread did they have left?
44
+
45
+ ![](_page_1_Figure_21.jpeg)
46
+
47
+ Figure 1: A diagram illustrating PAL: Given a mathematical reasoning question, Chain-of-thought (left) generates intermediate reasoning steps of free-form text. In contrast, Program-aided Language models (PAL, right) generate intermediate steps and Python code. This shifts the role of running the reasoning steps from the language model to the Python interpreter. The final answer is obtained by running the generated reasoning chain. Chain-of-thought reasoning is highlighted in blue; PAL steps are highlighted in gray and pink; the Python interpreter run is highlighted in black and green.
48
+
49
+ require LLMs, solving and reasoning can be done with the external solver. This bridges an important gap in chain-of-thought-like methods, where reasoning chains can be correct but produce an incorrect answer.
50
+
51
+ We demonstrate the effectiveness of PAL across 13 arithmetic and symbolic reasoning tasks. In all these tasks, PAL using Codex (Chen et al., 2021a) outperforms much larger models such as PaLM-540B using chain-of-thought prompting. For example, on the popular GSM8K benchmark, PAL achieves state-of-the-art accuracy, surpassing PaLM-540B with chain-of-thought by absolute 15% top-1 accuracy. When the questions contain large numbers, a dataset we call GSM-HARD, PAL outperforms CoT by an absolute 40%. We believe that this seamless synergy between a neural LLM and a symbolic interpreter is an essential step towards general and robust AI reasoners.
52
+
53
+ Few-shot prompting leverages the strength of large-language models to solve a task with a set of k examples that are provided as part of the test-time input (Brown et al., 2020; Liu et al., 2021; Chowdhery et al., 2022), where k is usually a number in the low single digits. These input-output examples $\{(x_i,y_i)\}_{i=1}^k$ are concatenated in a prompt $p \equiv \langle x_1 \cdot y_1 \rangle \parallel \langle x_2 \cdot y_2 \rangle \parallel \ldots \parallel \langle x_k \cdot y_k \rangle$ . where "·" denotes the concatenation of an input and output, and " $\parallel$ " indicate the concatenation of different examples. During inference, a test instance $x_{test}$ is appended to the prompt, and $p \parallel x_{test}$ , and thereby generate an answer $y_{test}$ . Note that such fewshot prompting does not modify the underlying LLM.
54
+
55
+ Wei et al. (2022) additionally augment each in-context example with *chain of thought* (COT) intermediate steps. Specifically, each in-context example in the COT setup is a triplet hx<sup>i</sup> , t<sup>i</sup> , yii, where x<sup>i</sup> and y<sup>i</sup> are input-output pair as before, and t<sup>i</sup> is a natural language description of the steps that are needed to arrive at the output y<sup>i</sup> from the input x<sup>i</sup> . See Figure 1 for an example. With the additional "thoughts" t<sup>i</sup> , the prompt is set to p ≡ hx1·t1·y1ikhx2·t2·y2ik. . .khxk·tk·yki.
56
+
57
+ During inference, the new question xtest is appended to the prompt as before and supplied to the LLM. Crucially, the model is tasked with generating *both* the thought ttest and the final answer ytest. This approach of prompting the model to first generate a reasoning process ttest improves the accuracy of the answer ytest across a wide range of tasks (Wang et al., 2022a; Wei et al., 2022; Zhou et al., 2022; Wang et al., 2022b).
58
+
59
+ In a Program-aided Language model, we propose to generate the thoughts t for a given natural language problem x as interleaved natural language (NL) and programming language (PL) statements. Since we delegate the solution step to an interpreter, we do not provide the final answers to the examples in our prompt. That is, every in-context example in PAL is a *pair* hx<sup>i</sup> , tii, where t<sup>j</sup> = [s1, s2, . . . , s<sup>N</sup> ] with each s<sup>i</sup> ∈ NL ∪ PL, a sequence of tokens in either NL or PL. The complete prompt is thus p ≡ hx<sup>1</sup> · t1i k hx<sup>2</sup> · t2i k . . . k hx<sup>k</sup> · tki.
60
+
61
+ Given a test instance xtest, we append it to the prompt, and p k xtest is fed to the LM. We let the LM generate a prediction ttest, which contains both the intermediate steps *and* their corresponding programmatic statements.
62
+
63
+ ```
64
+ A: Roger started with 5 tennis balls.
65
+ tennis_balls = 5
66
+ 2 cans of 3 tennis balls each is
67
+ bought_balls = 2 * 3
68
+ tennis balls. The answer is
69
+ answer = tennis_balls + bought_balls
70
+ ```
71
+
72
+ Figure 2: A close-up of a single example from a PAL prompt. Chain-of-thought reasoning is highlighted in blue, and PAL programmatic steps are **highlighted in gray and pink**.
73
+
74
+ Example A close-up of the example from Figure 1 is shown in Figure 2. While chain-of-thought only decomposes the solution in the prompt into natural language steps such as Roger started with 5 tennis balls and
75
+
76
+ 2 cans of 3 tennis balls each is 6, in PAL we also augment each such NL step with its corresponding programmatic statement such as **tennis balls = 5** and **bought balls = 2 \* 3**. This way, the model learns to generate a *program* that will provide the answer for the test question, instead of relying on LLM to perform the calculation correctly.
77
+
78
+ We prompt the language model to generate NL intermediate steps using comment syntax (e.g. "# ..." in Python) such they will be ignored by the interpreter. We pass the generated program ttest to its corresponding solver, we run it, and obtain the final run result ytest. In this work we use a standard Python interpreter, but this can be any solver, interpreter or a compiler.
79
+
80
+ Crafting prompts for PAL In our experiments, we leveraged the prompts of existing work whenever available, and otherwise randomly selected the same number (3-6) of examples as previous work for creating a fixed prompt for every benchmark. In all cases, we augmented the free-form text prompts into PAL-styled prompts, leveraging programming constructs such as for loops and dictionaries when needed. Generally, writing PAL prompts is easy and quick.
81
+
82
+ We also ensure that variable names in the prompt meaningfully reflect their roles. For example, a variable that describes the *number of apples in the basket* should have a name such as num apples in basket. This keeps the generated code linked to the entities in the question. In Section 6 we show that such meaningful variable names are critical. Notably, it is also possible to incrementally run the PL segments and feed the execution results back to the LLM to generate the following blocks. For simplicity, in our experiments, we used a single, post-hoc, execution.
83
+
84
+ This work focuses on COT-style reasoning chain, but in Appendix I we show that PAL also improves Least-to-Most (Zhou et al., 2022) prompts, which introduce reasoning chains that decompose a question into sub-questions.
85
+
86
+ # Method
87
+
88
+ Finally, we compare PAL and COT on algorithmic reasoning. These are tasks where a human programmer can write a deterministic program with prior knowledge of the question. We experiment with two algorithmic tasks: OBJECT COUNTING and REPEAT COPY. OBJECT COUNTING involves answering questions about the number of objects belonging to a certain type. For example, as shown in Figure 5: *"I have a chair, two potatoes, a cauliflower, a lettuce head, two tables, ... How many vegetables do I have?"*). REPEAT COPY requires generating a sequence of words according to instructions. For example, as shown in Appendix J.6: *"Repeat the word duck four times, but halfway through also say quack"*).
2302.10145/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2022-08-06T16:54:39.078Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6 Safari/605.1.15" version="20.2.3" etag="pQ9iq59WC3EueJG_gLiR" type="device"><diagram id="pcH-AAIj-e2PYwS15DwG">7Vtbd9o4EP41PJZj2Vg2j4FA92G7J2ezl/ZRsQVoa1usEAX2169kSb7JBAoG0oY8JNZ4JEvzfZoZjZ2eN063HxlaLj7RGCc914m3Pe+x57pg4ELxR0p2ShI6jhLMGYm1Uil4Jv9hLTRqaxLjVU2RU5pwsqwLI5plOOI1GWKMbupqM5rUn7pEc/1EpxQ8RyjBltrfJOYLvQq/ov0LJvOFeTIw60uRUdaC1QLFdFMReZOeN2aUcnWVbsc4kcYzdlH9pnvuFhNjOOPHdHBVh28oWeu16XnxnVkso+ssxlIf9LzRZkE4fl6iSN7dCHiFbMHTRN+e0YxPUUoSiewfJBUouc5veCN+/05TlGkVDamYnGiTJBnThDIhyGgm5KMVZ/QrNsKe64XuyINQ3InRalHMRakZBITtRiStmt4dCBFikX4akE/T68WM4+1em4ECCUFhTFPM2U6o6A4e9FUXzd4AaqNtSi74BvFFjQeh5qDm37wYu4RIXGiU2hHzWhCDCdeGrUEH/11Tc+PDKjfCg1AQa93mljD35RZQgJayNuwqHeBc/v0rn4TrTNdZxAnNZIcnulwnSLbMvMSK1NRUpzMJVmGLIEbs4zAetFPmRVGmC8TDYb+BeRBamAPYgrmRnQP54A75DSAHTcjD4HqQ+5bJcCxCkG5Sxhd0TjOUTErpqDSqdHSlzq+ULrUp/8Gc77Q7RGtO64bGW8I/V66/yKH6Yi6q+bjVQ+eNnWlkYmmfqw3VzTfNslveMv2+L1IIz6nabUFhxRHjDzKuV0OIkE2JNLB+eGw0XhIafVUirdAeTJq8QzicRW28g1GIX2YF7yRSNdat6JrpvWYyHzG7OTZE9I4mJ8Nyn32rD38Oz+CPEf/hOJyMpofjfyfR3W1sfAiG1sYvUtF6fO9g5wdvyNk/0YREu6s49fbNpfnQBayBBSu0YQ1bUB10AGp4S3deceFfKrcOufM+FI6p6tIPOPS89YSZIBbH7H17+aHt5W/o5IdvyKU088efwrkM7Jjh284FtJ0Ju0gWTdjZD3BUGKPEwxDagugR42We1Svn73xkKCY4x+YoqIQdeR2PVru3xH2UkLlwCY+ReJp0ISOJColQ8qBvpCSOc+fYRoC6wzwhE7lMmDE7v8qEYQsTvC6YACw0rhpoTogzoO84Qf3oAJ3hu4g1sxmGUWusiYPhi1NS8tVYY44P1WADbxdsgH2CeOsUFJ0Ct0rBD07fcQ8dYO8cfJ2DILghCe0q6Sec0vxho7VYM7vpMXe/3bvLSSBsxiFox6HQv8x5B9xLljcoWQZes2Q5bMH8UiVL4J+UhWoLHIXaM0YsWtzz0CN2v3kXad6wDuws1G07j3SShcLbpgDADStJQBHLj0kEgiLqF/nDD13Dluu0cpQTndPrGUBgZwDD7hOAvKswAtpVFJaUZHxVGflJCiqbwbysM27Rb7wUb+j73qv64kLNoNwMxVKO2x9vqsbLcEx0KcaZCHqx1Q1DYof5j9+MhS1vbC9V7zUv+s9AGIQWwgqGZ04ZvsfAwwzwrbf2ENgc8MGFguD5Vde9FBAena/ZC9J50Z0KBwu0Dmymxu71qFB87PUG3uYfmQnJCgio1UREWgWKOt1PXRI5PSHqPNOxU5NB06eFoKC2GUblZbrnK3kO9Ad9PxiWP/bAbn1kld1ZI5+QA5mlX8I9TuSg33W4f8++0febXzqFwC4bmEpC577RvWCmhNJlck+VjuBA4NTLBTAcXC86nv9F614G/LmMEb8z4Khk2W3kR86lGCCa5QfuKnSU/ybgTf4H</diagram></mxfile>
2302.10145/main_diagram/main_diagram.pdf ADDED
Binary file (30.9 kB). View file
 
2302.10145/paper_text/intro_method.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Deep Policy Gradient (PG) methods achieved impressive results in numerous control tasks [\(Haarnoja](#page-9-0) [et al., 2018\)](#page-9-0). However, these methods deviate from the underlying theoretical framework to compute gradients tractably. Hence, the promising performance of Deep PG algorithms suggests a lack of rigorous analysis to motivate such results. [Ilyas et al.](#page-9-1) [\(2020\)](#page-9-1) investigated the phenomena arising in practical implementations by taking a closer look at key PG primitives (e.g., gradient estimates, value predictions). Interestingly, the learned value networks used for predictions (*critics*) poorly fit the actual return. As a result, the local optima where critics get stuck limits their efficacy in the gradient estimates, driving policies (*actors*) toward sub-optimal performance.
4
+
5
+ Despite the lack of investigations to understand Deep PG's results, several approaches have been proposed to improve these methods. Ensemble learning [\(Lee et al., 2021;](#page-9-2) [He et al., 2022\)](#page-9-3), for example, combines multiple learning actors' (or critics') predictions to address overestimation and foster diversity. These methods generate different solutions that improve exploration and stabilize the training, leading to higher returns. However, the models used at training and inference time pose significant challenges that we discuss in Section [5.](#page-7-0) Nonetheless, popular Deep Reinforcement Learning (RL) algorithms (e.g., TD3 [\(Fujimoto et al., 2018\)](#page-9-4)) use two value networks, leveraging the benefits of ensemble approaches while limiting their complexity. To address the issues of ensembles, gradient-free methods have been recently proposed [\(Khadka & Tumer, 2018;](#page-9-5) [Marchesini & Farinelli,](#page-9-6) [2020;](#page-9-6) [Sigaud, 2022\)](#page-10-0). The idea is to complement Deep PG algorithms with a search mechanism that uses a population of perturbed policies to improve exploration and to find policy parameters with higher payoffs. In contrast to ensemble methods that employ multiple actors and critics, gradientfree population searches typically focus on the actors, disregarding the value network component of Deep PG. Section [5](#page-7-0) discusses the limitations of policy search methods in detail. However, in a PG context, critics have a pivotal role in driving the policy learning process as poor value predictions lead to sub-optimal performance and higher variance in gradient estimates [\(Sutton & Barto, 2018\)](#page-10-1). Such issues are further exacerbated in state-of-the-art Deep PG methods as they struggle to learn good value function estimation [Ilyas et al.](#page-9-1) [\(2020\)](#page-9-1). For this reason, we propose a novel gradient-free population-based approach for critics called Value Function Search (VFS), depicted in Figure [1.](#page-1-0) We aim to improve Deep PG algorithms by enhancing value networks to achieve (i) a better fit of the actual return, (ii) a higher correlation of the gradients estimate with the (approximate) true gradient, and (iii) reduced variance. In detail, given a Deep PG agent charac-
6
+
7
+ ![](_page_1_Figure_2.jpeg)
8
+
9
+ <span id="page-1-0"></span>Figure 1: Overview of VFS.
10
+
11
+ terized by actor and critic networks, VFS periodically instantiates a population of perturbed critics using a two-scale perturbation noise designed to improve value predictions. Small-scale perturbations explore *local* value predictions that only slightly modify those of the original critic (similarly to gradient-based perturbations [\(Lehman et al., 2018;](#page-9-7) [Martin H. & de Lope, 2009;](#page-10-2) [Marchesini &](#page-9-8) [Amato, 2022\)](#page-9-8)). Big-scale perturbations search for parameters that allow escaping from local optima where value networks get stuck. In contrast to previous search methods, evaluating perturbed value networks require computing standard value error measures using samples from the agent's buffer. Hence, the Deep PG agent uses the parameters with the lowest error until the next periodical search. Crucially, VFS's critics-based design addresses the issues of prior methods as it does not require a simulator, hand-designed environment interactions, or weighted optimizations. Moreover, our population's goal is to find the weights that minimize the same objective of the Deep PG critic.
12
+
13
+ We show the effectiveness of VFS on different Deep PG baselines: (i) Proximal Policy Optimization (PPO) [\(Schulman et al., 2017\)](#page-10-3), (ii) Deep Deterministic Policy Gradient (DDPG) [\(Lillicrap et al.,](#page-9-9) [2016\)](#page-9-9), and (iii) TD3 on a range of continuous control benchmark tasks [\(Brockman et al., 2016;](#page-9-10) [Todorov et al., 2012\)](#page-10-4). We compare over such baselines, an ensembled Deep RL method, SUNRISE [\(Lee et al., 2021\)](#page-9-2), and the policy search Supe-RL [\(Marchesini et al., 2021a\)](#page-10-5). In addition, we analyze key Deep PG primitives (i.e., value prediction errors, gradient estimate, and variance) to motivate the performance improvement of VFS-based algorithms. Our evaluation confirms that VFS leads to better gradient estimates with a lower variance that significantly improve sample efficiency and lead to policies with higher returns. Our analysis highlights a fundamental issue with current state-ofthe-art Deep PG methods, opening the door for future research.
14
+
15
+ PG methods parameterize a policy π<sup>θ</sup> with a parameters vector θ (typically the weights of a Deep Neural Network (DNN) in a Deep PG context), and πθ(at|st) models the probability to take action a<sup>t</sup> in a state s<sup>t</sup> at step t in the environment. These approaches aim to learn θ following the gradient of an objective η<sup>θ</sup> over such parameters [\(Sutton & Barto, 2018\)](#page-10-1). Formally, the primitive gradient estimate on which modern Deep PG algorithms build has the following form [\(Sutton et al., 1999\)](#page-10-6):
16
+
17
+ <span id="page-1-1"></span>
18
+ $$\nabla \eta_{\theta} = \mathbb{E}_{(s_t, a_t) \in \tau \sim \pi_{\theta}} \left[ \nabla_{\theta} \log \pi_{\theta}(a_t | s_t) Q_{\pi_{\theta}}(s_t, a_t) \right] \tag{1}$$
19
+
20
+ where (st, at) ∈ τ ∼ π<sup>θ</sup> are states and actions that form the trajectories sampled from the distribution induced by πθ, and Q<sup>π</sup><sup>θ</sup> (st, at) is the expected return after taking a<sup>t</sup> in st. However, Equation [1](#page-1-1) suffers from high-variance expectation, and different baselines have been used to margin the issue. In particular, given a discount value γ ∈ [0, 1), the state-value function V<sup>π</sup><sup>θ</sup> (s) is an ideal baseline as it leaves the expectation unchanged while reducing variance [\(Williams, 1992\)](#page-10-7):
21
+
22
+ <span id="page-1-3"></span>
23
+ $$V_{\pi_{\theta}}(s) = \mathbb{E}_{\pi_{\theta}} \left[ G_t := \sum_{t=0}^{\infty} \gamma^t r_{t+1} | s_t = s \right]$$
24
+ (2)
25
+
26
+ where G<sup>t</sup> is the sum of future discounted rewards (*return*). Despite building on the theoretical framework of PG, Deep PG algorithms rely on several assumptions and approximations for designing feasible updates for the policy parameters. In more detail, these methods typically build on surrogate objective functions that are easier to optimize. A leading example is TRPO [\(Schulman](#page-10-8) [et al., 2015\)](#page-10-8), which ensures that a surrogate objective updates the policy locally by imposing a trust region. Formally, TRPO imposes a constraint on the Kullback–Leibler diverge (KL) on successive policies πθ, π<sup>θ</sup> <sup>0</sup> , resulting in the following optimization problem:
27
+
28
+ <span id="page-1-2"></span>
29
+ $$\max_{\theta} \mathbb{E}_{\pi_{\theta}} \left[ \frac{\pi_{\theta}(a_t|s_t)}{\pi_{\theta'}(a_t|s_t)} A_{\pi_{\theta}}(s_t, a_t) \right] \text{s.t. } D_{KL}(\pi_{\theta}(\cdot|s)||\pi_{\theta'}(\cdot|s)) \le \delta$$
30
+ (3)
31
+
32
+ where $\delta$ is a hand-tuned divergence threshold, and A(s,a) = Q(s,a) - V(s) is the advantage function. In practical implementations, Equation 3 is tractably optimized with additional tricks: (i) a second-order approximation of the objective, (ii) a mean KL of the current trajectory instead of the actual KL divergence. Given TRPO's computational demands, Schulman et al. (2017) further "relaxes" the optimization landscape by replacing the constraint with the following clipped objective, providing a significantly faster (i.e., less demanding) yet better-performing alternative over TRPO:
33
+
34
+ $$\max_{\theta} \mathbb{E}_{\pi_{\theta}} \left[ \min \left( \frac{\pi_{\theta}(a_t|s_t)}{\pi_{\theta'}(a_t|s_t)} A(s_t, a_t), \operatorname{clip} \left( \frac{\pi_{\theta}(a_t|s_t)}{\pi_{\theta'}(a_t|s_t)}, 1 - \epsilon, 1 + \epsilon \right) A(s_t, a_t) \right) \right] \tag{4}$$
35
+
36
+ Similarly, Lillicrap et al. (2016) uses the deterministic PG theorem (Silver et al., 2014) for learning deterministic policies $\mu_{\theta}(s)$ . DDPG and further optimizations (e.g., TD3) are tightly related to standard Q-learning (Watkins & Dayan, 1992) and assume having a differentiable action-value function over actions. Deterministic PG algorithms learn the value parameters as in standard Double DQN. In contrast, policy parameters are learned under the $\max_{\theta} \mathbb{E}_{\pi_{\theta}} \left[ Q_{\phi}(s, \mu_{\theta}(s)) \right]$ optimization problem, where $\phi$ are the parameters of the critic. Moreover, TD3 addresses the overestimation of DDPG with additional tricks, e.g., learning two value functions, being pessimistic about the action value to use for the target computation, and delaying the policy updates.
37
+
38
+ In summary, Deep PG algorithms build their success on learning value functions for driving the learning of the policy parameters $\theta$ . However, significant research efforts have been devoted to improving policies, while the attention to improving critics appears marginal. Hence, to our knowledge, VFS is the first gradient-free population-based approach that works on top of Deep PG to improve critics without facing the complexities of learning ensembles or policy searches.
39
+
40
+ Policy search approaches generate a population of perturbed versions of the agent's policy $\pi_{\theta}$ to explore variations with higher returns (Khadka & Tumer, 2018; Marchesini & Farinelli, 2022; Marchesini et al., 2021b; Colas et al., 2018; Sigaud, 2022). To this end, gradient-informed perturbations (or variations) have been employed for limiting detrimental effects on $\pi_{\theta}$ , exploring small changes in the population's action sampling process (Lehman et al., 2018; Marchesini et al., 2022). In more detail, prior methods express the average divergence of a policy at state s over a perturbation $\omega$ as:
41
+
42
+ $$d(\pi_{\theta}, \omega) = \|\pi_{\theta}(\cdot|s) - \pi_{\theta+\omega}(\cdot|s)\|_{2} \tag{5}$$
43
+
44
+ and use the gradient of such divergence to approximate the sensitivity of $\theta$ over the policy decisions, which is used to normalize Gaussian-based perturbations. Formally, following Lehman et al. (2018):
45
+
46
+ $$\nabla_{\theta_a} d(\pi_{\theta}, \omega) \approx \nabla_{\theta} d(\pi_{\theta}, 0) + H_{\theta}(d(\pi_{\theta}, 0)) \omega \tag{6}$$
47
+
48
+ where $H_{\theta}$ is the Hessian of divergence with respect to $\theta$ . Although simpler first-order approximations achieve comparable results, gradient-based perturbations add significant overhead to the training process. Prior work introduced gradient-based variations because evaluating the population against detrimental changes is hard. As such, the quality of perturbed policies is assessed over many trajectories to ensure that they effectively achieve higher returns over arbitrary initializations (Marchesini et al., 2021a). In contrast, we focus on a critics-based search where high-scale perturbations may help the learning process to escape from local optima, achieving better predictions. Hence, VFS relies on a two-scale perturbation approach to maintain similar value predictions to the original value function while exploring diverse parameters to escape from local optima.
49
+
50
+ We introduce a gradient-free population-based search with two-scale perturbations to enhance critics employed in Deep PG. Ultimately, we aim to show that value functions with better predictions improve Deep PG primitives, leading to better sample efficiency and policies with higher returns.
51
+
52
+ Algorithm 1 shows the general flow of the proposed Value Function Search framework. During a standard training procedure of a Deep PG agent, VFS periodically generates a population $\mathcal{P}$ of perturbed value networks, starting from the agent's current critic parametrized by $\phi$ . We first instantiate
53
+
54
+ <sup>&</sup>lt;sup>1</sup>We do not consider the unconstrained penalty-based PPO due to the better performance of the clipped one.
55
+
56
+ n copies of $\phi$ and two Gaussian noise distributions $\mathcal{G}_{\min,\max}$ with $\sigma_{\min,\max}$ standard deviation for the two-scale variations (lines 3-4). Hence, we sample from the two distributions to apply each perturbation to half of the population (lines 5-6). In contrast to single-scale or gradient-based perturbations of prior work (Khadka & Tumer, 2018; Marchesini et al., 2021a; Sigaud, 2022), the two scales approach for value networks has the following advantages:<sup>2</sup> (i) $\mathcal{G}_{\min}$ maintains similar predictions to those of the unperturbed critic, similarly to gradient-based variations. Sampling from $\hat{N}(0, \sigma_{\min})$ translates into small changes in the value prediction process to find a better set of parameters within a local search fashion. (ii) $\mathcal{G}_{\max}$ is a Gaussian with a greater standard deviation $\mathcal{N}(0,\sigma_{\max})$ used to explore diverse value predictions to escape from local optima. (iii) Gaussian-based perturbations do not introduce significant overhead as gradient-based variations. After generating the population, VFS samples a batch b of trajectories from the agent's buffer B. There are different strategies for sampling the required experiences from the agent's memory based on the nature of the Deep PG algorithm. When using an on-policy agent (e.g., TRPO, PPO), we apply VFS after updating the policy and value networks. In this way, we reuse the same on-policy data to evaluate whether a perturbed set of weights has better value prediction (i.e., lower error). Off-policy agents (e.g., DDPG, TD3) could follow a similar strategy. Still, the reduced size of the mini-batches typically employed by off-policy updates would result in noisy evaluations. For this reason, we sample a larger batch of experiences to provide a more robust evaluation. In practice, to compute the prediction errors for each value network in the population (line 7), we use the typical objective used to train PG critics, the Mean Squared Error (MSE) (Sutton & Barto, 2018), whose general formulation is:
57
+
58
+ $$mse_{\phi_i}(b) = \frac{1}{|b|} \sum_{s \in b} \left[ V_{\pi}(s) - V_{\pi_{\phi_i}}(s) \right]^2$$
59
+ (7)
60
+
61
+ <span id="page-3-1"></span>where $V_{\pi}(s)$ is the true value function. However, it is typically unfeasible to access the true $V_{\pi}(s)$ and, as in standard training procedures of Deep PG algorithms, we rely on noisy samples of $V_{\pi}(s)$ or bootstrapped approximations. In more detail, on-policy updates are particularly interesting, as Deep PG algorithms typically learn critics with Monte-Carlo methods without bootstrapping the target value. Hence, performing a supervised learning procedure on $(s_t, G_t)$ pairs, where the return $G_t$ is an unbiased, noisy sample of $V_{\pi}(s)$ that replaces the expectation of Equation 2 with an empirical average. We then refer to a perturbed critic as a *local* improvement of the original unperturbed one when it achieves a lower error on b. In our context, given a value network parametrized by $\phi$ , a batch of visited trajectories b, and a perturbation $\omega$ , we define the value network parametrized by $\phi + \omega$ to be a local improvement of $\phi$ when $mse_{\phi+\omega}(b) \leq mse_{\phi}(b)$ . Moreover, in the limit where b samples all the trajectories, $G_t \equiv V_\pi(s_t)$ , and $mse_{\phi+\omega}(b) \leq mse_{\phi}(b)$ means that $\phi+\omega$ is a better or equal fit of the actual value function with respect to the original $\phi$ . Crucially, Monte-Carlo value approximation converges to a local optimum even when using non-linear approximation (Sutton & Barto, 2018). Hence, on-policy VFS potentially maintains the same convergence guarantees of the baseline.<sup>3</sup> To this end, it would be possible to anneal the perturbation scales to zero out the impact of VFS over the training phase. Hence, providing the diversity benefits of VFS early in the training while maintaining the convergence guarantee of the baseline algorithm within the limit.
62
+
63
+ # Method
64
+
65
+ - 1: Given: (i) a Deep PG agent with a value network parametrized by $\phi$ at training epoch e; (ii) a buffer B of visited trajectories; (iii) periodicity $e_s$ for VFS and population size n; (iv) scale $\sigma_{\min,\max}$ for the two-scale Gaussian noise $\mathcal{G}$ .
66
+ - 2: **if** $e \% e_s = 0$ **then**
67
+ - $\mathcal{P} \leftarrow \{\phi_0, \dots, \phi_n\}$ copies of $\phi$
68
+ - $\mathcal{G}_{\min,\max} \leftarrow \mathcal{N}(0,\sigma_{\min,\max})$ $\phi_{1:\frac{|P|}{2}} \leftarrow \phi_i + \mathcal{G}_{\min} \quad \phi_{\frac{|P|}{2}:n} \leftarrow \phi_i + \mathcal{G}_{\max}$
69
+ - $b \leftarrow$ Sample a batch of trajectories from B
70
+ - $mse_{\mathcal{P}} \leftarrow \text{Evaluate } \mathcal{P} \text{ over } b \text{ as Equation } 7$
71
+ - $\phi \leftarrow \min(mse_{\mathcal{P}})$
72
+ - <span id="page-3-0"></span>9: end if
73
+
74
+ <sup>&</sup>lt;sup>2</sup>We support claims on our perturbation approach with additional experiments in Section 4.
75
+
76
+ <sup>&</sup>lt;sup>3</sup>Due to bootstrapping, the same result generally does not apply to temporal difference targets.
77
+
78
+ Nonetheless, as discussed in [Ilyas et al.](#page-9-1) [\(2020\)](#page-9-1), Deep PG algorithms deviate from the underlying theoretical framework. We performed additional experiments in Section [4](#page-4-0) to show that maintaining VFS through the entire learning process results in better performance over zeroing out the perturbations. Moreover, in a worst-case scenario where the population search never improves the value prediction locally, VFS matches the same local optima of the original approach. However, following the insights of [Fujimoto et al.](#page-9-4) [\(2018\)](#page-9-4), practical Deep PG algorithms can not theoretically guarantee to reduce estimation errors or improve predictions outside the batch used for computing the update.
79
+
80
+ VFS addresses most limitations of previous policy search approaches and minimizes the same objective of the Deep PG critics, leading to the following crucial advantages:
81
+
82
+ - VFS does not require additional environment interactions or hand-designed evaluations for selecting the best set of parameters in the population.
83
+ - MSE is a widely adopted objective for learning value networks, providing an established evaluation strategy. Moreover, batch computations and parallelization make this process particularly efficient. Nonetheless, VFS is not explicitly designed for MSE, and extending it to different error measures is straightforward.
84
+ - At the cost of adding overhead, the population evaluation may scale to an arbitrarily large number of samples to improve robustness.
85
+
86
+ Finally, the parameters with the lowest error replace the current agent's critic until the next population search (line 8). Many Deep RL algorithms employ target networks parametrized by φtg to reduce overestimation. In this scenario, we also update the target weights toward the new ones by using standard Polyak averaging with an α ∈ (0, 1) weight transfer value: φtg = αφ + (1 − α)φtg.
87
+
88
+ Limitations. Here we highlight the limitations of VFS: (i) batched computations and parallelization do not avoid a marginal overhead induced by the population search. (ii) In an off-policy setting, VFS requires tuning the batch size for the error computation to balance the trade-off between computational demands and the evaluation quality. (iii) There are no guarantees that small-scale perturbations produce a local variation to the original predictions. However, they have been previously shown effective in doing so [\(Martin H. & de Lope, 2009\)](#page-10-2), and we confirm this result in Section [4.](#page-4-0)
2303.10752/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2303.10752/paper_text/intro_method.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Depth estimation is a fundamental task in computer vision. Predicting depth from RGB images shows great potential in applications like autonomous cars or robots. It has also been used as a powerful pretext training task in unsupervised downstream tasks including visual feature extraction [@jiang2018self; @ren2018cross], semantic segmentation [@hoyer2021three; @cardace2022plugging], *etc*. However, collecting sufficiently diverse datasets with per-pixel depth ground-truth for supervised learning is challenging. To overcome this limitation, self-supervised depth estimation works exploit the consistency of 3D geometries within stereo pairs [@garg2016unsupervised; @godard2017unsupervised] or monocular videos [@ji2021monoindoor; @godard2019digging], and have shown impressive results. Nevertheless collecting synchronized multi-view images or videos is resource-consuming.
4
+
5
+ <figure id="fig:head" data-latex-placement="!t">
6
+ <embed src="figure/dfd_fsf.pdf" />
7
+ <figcaption>(a) A sparse focal stack from the NYUv2 dataset <span class="citation" data-cites="Silberman:ECCV12"></span>. (b) The ground-truth depth map and all-in-focus (AIF) image. (c) The depth map and AIF image estimated by our self-supervised framework from the sparse focal stack.</figcaption>
8
+ </figure>
9
+
10
+ Another clue for depth estimation is defocus. Defocus blur is a measurable property of images that is associated with the geometry of camera lens and the changes in depth. Previous works [@favaro2003observing; @7298972] prove that depth can be recovered from a set of images with different focus distances, *i.e.*, a focal stack, by observing their blurry amounts. With the help of deep learning methods, supervised DFD works [@yang2022deep; @9157368] estimate depth in a data-driven way. Recent DFD methods [@8954440; @Wang-ICCV-2021; @9464709] claim they are freed from being supervised by the depth ground-truth. However, these works utilize the all-in-focus (AIF) image in model training, which is more of a theoretical concept than images captured by camera lens [@kingslake1989history] in the real world. Existing works generally treat images taken with small apertures as the AIF image. Such approximations inevitably contain regional blurriness and suffer from underexposure in short range. Thus, current DFD methods have drawbacks in piratical applications.
11
+
12
+ In order to close the gap between theories and real-world applications, we design a more realistic setting for the DFD tasks: only focal stacks are provided in model training, while the availability of depth and AIF images ground-truth is deprived. This is a challenging task because we no longer have the direct/indirect optimization goal for the model. To tackle this challenge, we propose a self-supervised DFD framework, which constrains the predicted depth map and AIF image to be accurate by precisely reconstructing focal stacks. Specifically, our framework consists of a neural model, **D**epth**AIF**-**Net** (**DAIF-Net**), which predicts the AIF image and the depth map from the focal stack, and a phyisical-realistic optical model that reconstructs the input from the predicted AIF image and depth map. Since all images in a focal stack are sharing the same depth and AIF image, and the physics model can deterministically map them to focal stacks, accurate depth maps and AIF images are a necessary and sufficient condition for precisely reconstructing the input. Therefore, by assuring the consistency between the input and the reconstructed focal stack, the prediction of depth map and AIF image can be intermediately improved. To the best of our knowledge, this is the first self-supervised DFD work that relieves the need for AIF images and depth ground-truth. Such improvements over previous supervised and indirectly-supervised works make our method more applicable in real scenarios.
13
+
14
+ Extensive experiments are conducted on both synthetic and real datasets. Results show that the proposed framework is on par with the state-of-the-art supervised/indirectly supervised depth-from-focus/defocus methods and has convincing visual quality, as shown in [1](#fig:head){reference-type="ref+Label" reference="fig:head"}. Additionally, we apply our model to the data in the wild, and demonstrate the ability of our framework to be applied in real scenarios. Finally, we extend the training paradigm of our framework so that our model has the ability to be transferred between focal stacks with an arbitrary number of images.
15
+
16
+ Our contribution is three-fold:
17
+
18
+ - We design a more realistic and challenging scenario for the Depth-from-Defocus tasks, where only focal stacks are available in model training and evaluation.
19
+
20
+ - We propose the first completely self-supervised framework for DFD tasks. The framework predicts depth and AIF images simultaneously from a focal stacks and is supervised by reconstructing the input.
21
+
22
+ - Our framework performs favorably against the supervised state-of-the-art methods, providing a strong baseline for future self-supervised DFD tasks.
23
+
24
+ # Method
25
+
26
+ In this section, we introduce our framework for depth estimation from sparse focal stacks. The framework is visualized in [2](#fig:ss-dfd){reference-type="ref+Label" reference="fig:ss-dfd"}. In the training stage, the DAIF-Net estimates AIF images and depth maps from the focal stacks. The optical model then reconstructs the inputs from the predicted depth and AIF image by simulating the physical process of defocus generation. Losses and regularization are applied to encourage the detailed reconstruction, and consequently improve the quality of the estimated depth and AIF image. In the inference stage, we estimate the depth map and AIF image directly from the trained DAIF-Net without the rendering process.
27
+
28
+ In the following, we first elaborate on the optical model behind the generation of defocus blur ([3.1](#sec:optical){reference-type="ref+label" reference="sec:optical"}). Then, we introduce our DAIF-Net and the intuition behind it ([3.2](#sec:funet){reference-type="ref+label" reference="sec:funet"}). Finally, we describe our supervision strategy that supports the self-supervised learning framework ([3.3](#sec:self_supervise){reference-type="ref+label" reference="sec:self_supervise"}).
29
+
30
+ ![**Left**: Illustration of the thin-lens equation. See text for symbol definitions. **Right**: The response curve of the $CoC$ radius, $\sigma$, at different scene depth, with different focus distance.](figure/defocus.pdf){#fig:defocus width="\\columnwidth"}
31
+
32
+ Defocus blur is a well-studied phenomenon that naturally exists in optical imaging systems. To quantitatively measure the defocus blur in an image, we introduce the defocus map. Given an optical system, the defocus map can be calculated from the depth map once we establish the relationship between depth and defocus. As illustrated in the left of Figure [7](#fig:defocus){reference-type="ref" reference="fig:defocus"}, when a point light source is out-of-focus, the light rays will converge either in front of or behind the image plane, and form a blurry circle on the image plane. The circle of confusion ($CoC$) measures the diameter of such a blurry circle. If the point light source is in focus, it will form an infinitely small point on the image plane, making it the sharpest projection with the minimum $CoC$. Therefore, $CoC$ describes the level of blurriness, in another word, the amount of defocus. Deriving from the thin-lens equation, the relationship between $CoC$, scene depth and camera parameters are well established: $$\begin{equation}
33
+ \centering
34
+ CoC = A\frac{|d_o-F|}{d_o}\frac{f}{F-f},
35
+ \end{equation}$$ where $A$ is the aperture diameter, $d_o$ is the object distance, or depth, $F$ is the focus distance, $f$ is the focal length. In practice, the f-number, $N=f/A$, is commonly used to describe the aperture size. Since all of the parameters are in units of meters, we also need to convert $CoC$ into units of pixels. Finally, we need the radius, $\sigma$, of the circle of confusion in focal stack rendering. The resulting equation becomes: $$\begin{equation}
36
+ \label{eq:thin_lens}
37
+ \sigma = \frac{CoC}{2\cdot p} = \frac{1}{2p}\frac{|d_o-F|}{d_o}\frac{f^2}{N(F-f)},
38
+ \end{equation}$$ where $p$ is the CMOS pixel size. With [\[eq:thin_lens\]](#eq:thin_lens){reference-type="ref+label" reference="eq:thin_lens"} and a set of camera parameters, we can easily convert a depth map into defocus map. By varying the focus distance $F$, we can produce the defocus map for a focal stack. The right of Figure [7](#fig:defocus){reference-type="ref" reference="fig:defocus"} shows how defocus responds to the changing of depth at the different focus distances. Note that with the larger focus distance, it is generally harder to distinguish the amount of defocus, especially when two focus distances are close. Since training our framework heavily relies on reconstructing the focal stack, indistinguishable defocus defects the effectiveness for our method. Therefore, we tend to avoid this issue by properly selecting the camera parameters, *i.e.*, the f-number $N$, the focal length $f$ and the focus distance $F$.
39
+
40
+ Given the defocus map and the AIF image, we can explicitly model the generation process of the defocus image. Taking advantage of the deterministic relationship, our predicted depth and AIF image can be supervised by reconstructing the input focal stack. To render a defocus image, we convolve the AIF image with the point spread function (PSF). PSF describes the pattern of a point light source transmitting to the image plane through the camera lens. In practice, we calculate the defocus blur using a simplified disc-shaped PSF, $i.e.$, a Gaussian kernel, following previous works [@8099990; @8954440]. We also consider the radius of the pixel-level circle of confusion $\sigma$ as the standard deviation for the Gaussian kernel. Given the defocus map $\Sigma$ calculated in Sec. [3.1.1](#sec:defocus){reference-type="ref" reference="sec:defocus"}, the PSF center at $x$, $y$ with $\sigma = \Sigma_{x, y}$ can be written as: $$\begin{equation}
41
+ \mathcal{F}_{x,y}(u, v) = \frac{1}{2\pi\Sigma_{x, y}^2}\exp\Big(-\frac{u^2+v^2}{2\Sigma_{x, y}^2}\Big),
42
+ \end{equation}$$ where $u$, $v$ is the location offset to the kernel center. Let $I$ be the AIF image and $J$ be the rendered defocus image, we produce $J$ with defocus map $\Sigma$ by convolving $\mathcal{F}$ with $I$: $$\begin{equation}
43
+ J:=I\otimes \mathcal{F}.
44
+ \end{equation}$$ Note $\mathcal{F}$ is a pixel-specific Gaussian kernel, therefore, unlike the traditional convolution, we need to change the kernel as the convolution window moves. Here, we adopt the PSF convolution layer [@8954440]. The PSF convolution layer is implemented in CUDA, thus supporting backpropagation and can be incorporated into our training pipeline. By providing the AIF image $I$ and the defocus map $\Sigma$, the PSF convolution layer will calculate the Gaussian kernel with the standard deviation clue from $\Sigma$ on the air and convolve with $I$. In practice, the window size for convolution is $7\times7$ and pixels with defocus value $\sigma<1$ will be treated as a clear pixel since its $CoC$ radius is less than one pixel. By generating multiple defocus maps and rendering the corresponding defocus images, we can construct the focal stack with varying focus distances.
45
+
46
+ **Discussion on the Optical Model.** We choose the physics-realistic optical model to reconstruct the focal stack over a neural model mainly for two reasons. First, the optical model is a physically explainable process, therefore, an implicit constraint on the physical meanings will be applied to the inputs. In another word, the optical model encourages the predicted depth and AIF images to have their corresponding physical properties so that the focal stack can be properly reconstructed. However, using a neural model does not have such constraints. Second, it is hard to optimize the DAIF-Net model and a neural model for reconstruction jointly. In contrast, using an optical model requires no training. Meanwhile, the physics model is naturally more robust and has a provable performance.
47
+
48
+ ![The DAIF-Net architecture. The architecture takes a focal stack of arbitrary size and estimates the depth map and AIF image. The parameters of encoders and bottlenecks are shared across all branches. We adopt the global pooling [@9157368] and fuse the branches by selecting the maxima of their features.](figure/DAIF.pdf){#fig:funet width="\\columnwidth"}
49
+
50
+ The optical model depicts the forward process of generating defocus image from the AIF image and the depth. The model calculates the following equation explicitly: $$\begin{equation}
51
+ \label{eq:forward}
52
+ J_F = \mathcal{G}_F(I, D),
53
+ \end{equation}$$ where $J_F$ is the defocus image with focus distance $F$, $\mathcal{G}$ is the optical model, $I$ and $D$ are the AIF image and the depth map respectively.
54
+
55
+ In contrast, predicting depth and AIF image from the focal stack, the DAIF-Net is implicitly learning the reverse expression of [\[eq:forward\]](#eq:forward){reference-type="ref+label" reference="eq:forward"}. To be specific, we are learning: $$\begin{equation}
56
+ \{I, D\} = \mathcal{D}(J_F),
57
+ \end{equation}$$ where $\mathcal{D}$ is the DAIF-Net. Solving this equation using a single defocus image is an ill-posed problem because we are calculating two variables from one input. Taking advantage that the defocus images in a focal stack are sharing the same depth and AIF image. Thus, we can train our model on a focal stack to make this task solvable, and our DAIF-Net has the following expression: $$\begin{equation}
58
+ \label{eq:reverse}
59
+ \{I, D\} = \mathcal{D}(J_F^0, J_F^1, \cdots, J_F^k),
60
+ \end{equation}$$ where that the accurate depth map and AIF image is the only solution to this function.
61
+
62
+ Consequently, from the aspect of model architecture, the DAIF-Net is supposed to take a focal stack with an arbitrary number of images and output the underlying AIF image and depth map. We modify the encoder of the U-Net [@ronneberger2015u], and design our DAIF-Net, as shown in [4](#fig:funet){reference-type="ref+Label" reference="fig:funet"}. In order to support a flexible number of input branches, every image will be passed through the same encoder and bottleneck.
63
+
64
+ In order to learn the depth and AIF image from a focal stack simultaneously, we observe that sharpness is the connection between them. Intuitively, a sharper point indicates a closer distance between the depth and the focus distance. Meanwhile, sharper points better preserve the corresponding colors of the AIF images. Therefore, training a model that is sensitive to the sharpest points in the focal stacks is beneficial to predicting both AIF images and depth maps. To measure the sharpness, our designed model exploits the difference between image pixel-wise local feature maps, $i.e.$, the absolute sharpness, and the stack-wise global feature maps, $i.e.$, the sharpest regions. To compare the local feature maps and the global feature maps, our model adopts layer-wise global pooling from DefocusNet [@9157368]. Specifically, the model computes the output of the convolution block for every input image to produce the local feature maps. Then the maximum values are selected from the local feature maps across all branches to generate the global feature maps. The global feature maps are then concatenated to the local feature maps and the combined features are passed into the next convolution block. This way, global and local feature maps are visible to every branch. By comparison, the sharper regions will be highlighted by the multi-input encoder. The decoder then produces the AIF image and the depth jointly from the sharpness clue.
65
+
66
+ Note that the inputs to the model have 4 channels: the RGB color channels and the focus distance channel, where we expand the focus distance to the size of the image, and concatenate it to the color channels. The focus distances can be extracted from the EXIF properties, which come together with the image and can be acquired with minimal cost. The focus distance is an essential input in our model. It increases the absolute accuracy of depth estimation, while not affecting the self-supervised characteristics of the framework.
67
+
68
+ While our DAIF-Net is trained by estimating an accurate depth map and AIF image that can perfectly reconstruct the input focal stack using the optical model, it is hard to achieve this goal solely using the reconstruction loss in practice. Noticing that the $CoC$ can have the same value in front of and behind the focus point, and the defocus is ambiguous in texture-less regions, we delicately design auxiliary losses and regularization to support our self-supervised training. First, we briefly introduce our reconstruction loss.
69
+
70
+ **Reconstruction Loss.** We adopt the reconstruction loss from [@8954440] and have: $$\begin{equation}
71
+ \mathcal{L}_{recon} = \frac{1}{N}\sum^N\alpha\frac{1-SSIM(\Bar{J}, J)}{2} + (1-\alpha)\|\Bar{J}-J\|_1,
72
+ \end{equation}$$ where $J$ is the input focal stack and $\Bar{J}$ is the reconstructed focal stack, *SSIM* is the Structural Similarity measure [@1284395], and $\alpha$ is used to balance the scale between the *SSIM* and the $L_1$ loss which we set to be 0.85.
73
+
74
+ **Coarse AIF Image.** We notice that the defocus map is a natural clue for multi-focus fusion. Given a stack of defocus maps, we acquire the index of the minimum defocus value for each pixel position and fuse these sharpest pixels to form the coarse AIF image. If the defocus maps are accurate, this is the sharpest image we can produce without extra calculation. Meanwhile, since the defocus map is produced from the depth map, a clearer coarse AIF image indicates a more precise map, and thus a more accurate depth map. Therefore, we regularize the blurriness to encourage the images to be clear, so that the quality of depth map can be improved.
75
+
76
+ **Blurriness Regularization.** To evaluate the sharpness of AIF image, we apply a Laplacian filter for the edge map and calculate its variance. By taking the negative log sum of the normalized variance, the blurry estimation loss [@9464709] encourages the image to be as clear as possible: $$\begin{equation}
77
+ \begin{aligned}
78
+ \mathcal{L}_{blur} = -\frac{1}{N}\sum^N_{c=1}\beta\log\Big(\frac{\Sigma_i\Sigma_j(\nabla^2\Hat{I}(i, j))^2}{M-\mu^2}\Big),
79
+ \end{aligned}
80
+ \end{equation}$$ where $I$ is the image, $M$ is the number of pixels, $\mu$ is the mean pixels value, $\nabla^2$ is the Laplacian operator and $\beta$ is a scale factor which we set to be 0.01. We apply the blur estimation loss on the predicted and the coarse AIF images to improve their sharpness.
81
+
82
+ **Smoothness Regularization.** To prevent the estimated depth from drastic disparity in homogeneous regions and increase the consistency between the estimated depth and AIF image, we apply the smoothness prior to regularize the predicted depth map. The edge-aware smoothness regularization is: $$\begin{equation}
83
+ \mathcal{L}_{smooth} = \frac{1}{N}\sum^N |\partial_x \Bar{D}|e^{-\beta\partial_x \Bar{I}} + |\partial_y \Bar{D}|e^{-\beta\partial_y \Bar{I}},
84
+ \end{equation}$$ where $\Bar{I}$ is the predicted AIF image, $\Bar{D}$ is the predicted depth map, and $\beta$ is the scale factor for the edge sensitivity which we set to be 2.5. The overall loss then becomes: $$\begin{equation}
85
+ \begin{aligned}
86
+ \mathcal{L} = \mathcal{L}_{recon} + \mathcal{L}_{blur}(\Bar{I}) + \mathcal{L}_{blur}(\Hat{I}) + \lambda \mathcal{L}_{smooth},
87
+ \end{aligned}
88
+ \end{equation}$$ where $\Hat{I}$ is the coarse AIF image, and $\lambda$ is used to control the importance of the the smooth regularization, which we set to be 0.5.
2304.02152/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2304.02152/main_diagram/main_diagram.pdf ADDED
Binary file (45.6 kB). View file
 
2304.02152/paper_text/intro_method.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Colonoscopy is a minimally invasive procedure widely adopted for polyp detection to diagnose colorectal cancer (CRC). In a colonoscopy, diagnostic accuracy relies on the correct analysis of the acquired recordings. However, the traditional assessment approaches by physicians suffer from inter-observer variations and demand extensive manual efforts. In recent years, accessibility to several colonoscopy datasets has paved the way for many machine learning based research works for automated CRC detection. However, the well-trained models proposed in the existing works still report limited diagnostic success. This limited success of automated methods is attributed to low-quality frames in the video samples, which contain artifacts, namely, ghost colors, low-illumination, interlacing due to camera motion, and fecal depositions due to inadequate patient preparation.
4
+
5
+ To overcome the low-quality frames, some related fields of laparoscopy and endoscopy followed keyframe selection [@ma2020keyframe] or performed super-resolution [@almalioglu2020endol2h], but no work in the colonoscopy domain explored the idea of extracting obscured clinical details from such low-quality uninformative video frames. Therefore, our work investigates whether GANs can convert uninformative frames to informative frames. In this direction, we propose a GAN-based image-to-image translation approach to generate informative frames from the degraded frames of the colonoscopy videos. We highlight the cases where GANs fail and where it helps, which gives us directions for future work. The main contributions are summarized below:
6
+
7
+ 1. To the best of our knowledge, this is the first framework to address the issue of uninformative colonoscopy frames using adversarial networks.
8
+
9
+ 2. We investigate the impact of translating uninformative frames on polyp detection performance and discuss future directions in this context.
10
+
11
+ ![The proposed framework contains two generators $G_{AB}$ and $G_{BA}$ and two discriminators $D_A$ and $D_B$.](gan_dia.png){#fig:gan height="130pt" width="\\columnwidth"}
12
+
13
+ # Method
14
+
15
+ The overview of the proposed framework is shown in Fig. [1](#fig:gan){reference-type="ref" reference="fig:gan"}. Given the uninformative colonoscopy frames $\{a_i\}_{i=1}^N$ from domain A, the aim is to learn a mapping function $G_{AB}: A \rightarrow B$ to generate frames such that the data distribution of obtained frames is indistinguishable from that of informative colonoscopy frames $\{b_j\}_{j=1}^M$ of domain B. Due to the unavailability of paired data, our work is inspired by the unpaired translation approach of CycleGAN [@zhu2017unpaired]. Hence, another mapping function $G_{BA}: B \rightarrow A$ is also introduced. Our implementation involves ResNet-based generators and PatchGAN discriminators $D_A$ and $D_B$. The CycleGAN objective integrates adversarial loss and cycle-consistency loss. The adversarial loss can be expressed as:
16
+
17
+ $$\begin{equation}
18
+ \label{adv}
19
+ \begin{split}
20
+ L_{adv}(G_{AB}, D_B) &= \mathbb{E}_{b\sim p_{data}(b)}[(D_B (b)-1)^2] \\ & + \mathbb{E}_{a\sim p_{data}(a)}[(D_B(G_{AB}(a)))^2]
21
+ \end{split}
22
+ \end{equation}$$ $G_{AB}$ aims to translate uninformative frames such that they appear similar to the informative frames, while $D_B$ tries to distinguish the translated frames from the high-quality, informative frames of domain B. In other words, $D_B$ is trained to minimize $L_{adv}(G_{AB}, D_B)$ and $G_{AB}$ is trained to minimize $\mathbb{E}_{a\sim p_{data}(a)}[(D_B(G_{AB}(a))-1)^2]$.
23
+
24
+ To ensure cycle-consistency and to reduce randomness in mapping, a cycle-consistency loss is used, which is given by: $$\begin{equation}
25
+ \begin{split}
26
+ L_{cyc}(G_{AB}, G_{BA}) & = \mathbb{E}_{a\sim p_{data}(a)}[\lVert G_{BA}(G_{AB}(a))-a\rVert_1] \\ & + \mathbb{E}_{b\sim p_{data}(b)}[\lVert G_{AB}(G_{BA}(b))-b\rVert_1]
27
+ \end{split}
28
+ \end{equation}$$ An identity mapping loss is also added to help preserve color in translated images. With this model, we intend to determine the clinically relevant details obscured by the artifacts. Furthermore, we carried out the following investigations:
29
+
30
+ 1. Polyp detection is performed using YOLOv5 [@yolov5] to determine the impact of GAN-translated frames.
31
+
32
+ 2. Qualitative analysis is done to identify the artifacts successfully handled by the CycleGAN and analyze the ones that still persist in the translated frames.
2304.07039/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="Electron" modified="2022-10-30T07:31:56.755Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/20.3.0 Chrome/104.0.5112.114 Electron/20.1.3 Safari/537.36" version="20.3.0" etag="jOhT1SFzqQI2onMhKsoN" type="device"><diagram id="Lsge3XHwfHVM7kjbf2wH">7V1Lc+K4Fv41WcZlWU8vE5JML25PTVUvZmY15Q5OQo2JuUA6yf31VwLbWLKMhZFsBUh3dQdjBJzv09F56Bxdwcn847dlsnj5nk/T7CoKpx9X8O4qigCIGf9PXPncXmE02l54Xs6mxU27Cz9m/0uLi2Fx9W02TVfSjes8z9azhXzxMX99TR/X0rVkuczf5due8kx+10XyXLxjuLvw4zHJ0sZtf86m65fiW0R0d/1bOnt+Kd8ZkHj7zDwpby6GWL0k0/y99l7w/gpOlnm+3v42/5ikmRBeKZftB3poebb6YMv0dW3ygkLuv5Lsrfhuxedaf5ZfNp3y7148XKXPcz7y/e7Sbfo6vREi5c8+ZslqNXvkF1/W84xfAPzXZf72Ok2nxaOnWZZN8ixfbsaGT1j84ddX62X+b1p7hmx+qmdKMUfbdywogTaP1svPv/iDMMDlw7+Ld9s8uBO0C6tHn+Wjj9n6r/I+/vvfuyH4o92LxIPyNVvZCIHIIOZvy8fiUsmBdbJ8Tkv8Q9jEBVRo82mS5vOUfzh+y/uOT7gg30uNSuW1ZZol69kv+XMkBa2fq+Gqd/gjn/E3jsJiCqK4GKeYgBSG8hDbr1S8qs4fZSCMY2kgglEAmDzWVhSNsThrks/abQtxw6r9M2OCtJ+5GI7/sh2xfFQT6+7Shv76qQANpoIx18Mm16dJyp4etVx/ZOnPp4O4Xmd6jfgtXG/hbZ2jWKM6RqUoRDJFMYqDGPdjKaTKWDgMkPKJWljag0iom0jPnCmL4rZ0uU4/JCEVy1bys7w9NMamHAhyYcWEUoqiOEIESF8fsThgEYgJwxwtAEOyfboGLKUBFZMbARZBiKImzADggISYRYxFCMYUwXaEJWHukRzulhxfMBfiVy6LJMvSLH9eJnNO8UW6nPH3S5fqc3/snrh9f5mt0x+LZKOs37ltIk/fp9lHWk637XRecwbnr/xhvJnQ2WzxrXhutb0NhlxOce2n17yX18hk+Vh8CKzVCZXNsJvaGhLVyYL2kuUasCBGIQAUiqUKkEhii/osbLIl1lNB0goyoQBDx9OFXOhiQBcbDFEJIjMk8pYh9MKQYRgCghDHHFxCGV9WIGWyrQa9ZQgzXaxVcb/mr+k+2R2/iEOmGOlRGERNwUUaGywKj5dM3C0Z/gLucqfdEyFZLbZ++GZGNAja7v0ZUZPpxTiAjMqAhAl9GjKwz6cWQZR8kqdkg0gAaaw8C1MMAAMm7TyrQhCtblUXWRpujjTjQr2IHPs0n3ppmvov6uvs+SrAJAB02tgoMu4LlQNsdBEJJP7iCdcOa65V6O2P/Gk9Tz6u6F3xnIqeuFNRxtnsWZggj1zyGytGaJvZY5LdFE/MZ9PpJra3TLlVUmgcAW8RouHj4tsrfCfGelvnq52Bo1VpdfOlvJS/rh+S+SwT0v6WZr9S8QGKJ0qDydi96VhGQ8X5j6FuGQW6NUINi/VSf7p4gIDq4R+O30ogh+9/Hoxelj6t/cLOAlYYy1BBFLCoAVXsCimD+EOHobM32k3vb9jDZK/hLpvq3N2Yzrjoanfe3N+zWykKsMPuYJu+Y+IQeeIQ7udpJg7RoEFsoGHg3svi7sBGipus/k3Xjy+ty1Z7RLZddclgkwm7v32ogXg3W3L7bev9pclKTN5nEUQuPoMG62oIC2ACoGjBiE8tUPczGwpxz1p7FK4GTrkrXFl0C1uySqa4VkPocOVWghhQAtYBeITg4eDSecjV8jW7LF/tyxehXXPM2Uqmc95V0/GB28vr9PViOrbPOyUUjilrzjvdxLOBYaSLLVwwPBZDCobE0CD0MW4ukMAwCMNdLhA0hNMjFWhBcAZxiS8YuccpmyK9YfJza1XsjdxLhk4/k7+DLGoukDvLEl3UZGC5J2P8QL7J/o3zY4wbkuzniJoO9IgjBlszLhyxwhE5IYhCwiSOqAlBjzhiEARyFnZo3/RoHHYohzB1T8XAtddvf+yYXqjMIZamV4i1kVdHnqvJ/pCxcruIymaYyO2i5hxwlbc02RjhSXJ3i+Io2d1DNgdohFDpdoSPJVObEEbO7BrtERgoe2iwC8Bh9vBahoDKrzfO89J9o9jLLEJdaOEscZMFDnriJo9yDZ3hZhBOOAfbRLWbjRaSrgWZSCASPKitAg3iHaPtQ4vk7ANGYQBYWP00Q0auFmRoUqPhh9myBXQUswUaFyAMsimtTRIj2y7QwM8bag2ELZtrz3VXGjTw3E4cG293pcER0/ee2iB8MbRjhOBISSCDOKDNqIAzI8TA37VaDVzW4O7qbv+uPaOvwW2r991VPeprIPtXGLcoiXrVb1k9KVX9YmP4B6qojBV2qcaGcTmlOlCoDHRwxe8x+uiwGIRPdbsdjG3hXZ1jxDeOVTtPapW2Ygdu+RP1Y1yEGsO6sj3QYbGRHnzqTteMxifmH59YQKMdf0LZXSBxGMDmDqvD6dX5LmGNxO4qx00qZawuwD0X0z4Lt9MF2L/FlsWBkhen4S7Hc/CSK0r260RXemHUSgPHWITLXUEjKM2KRX0auvRtqGGBsXFT+0a+kRgp+44p7M9ghLrHsqhJTSKFDhsbGSzyfRsX9eSeRDTqGdGUdkQk7umZIKVkhR7tmRh9XkrD/R+LgX33H926qJxHY/pAA6tSydMmntFZKdxveCvGEVa1dBHFAcFDGKKHBcfdEKrHem7DkvSNTEBNv/Vmk6ajVp1NztLW6LBwvk02VdVaY6on31Zb1lhtdfncY5UVES6LO06Z7GBzzamRNJRvfFI1lC1CIR2hWmOJFrllkge5eLNS21PvXFesmNzC3ezHQ0TDoL4DnzUHDlvJbpGVzhMd5qyU436hGSvB8KzUNOMt99D5w1TG6aWE80SkEB4d1taMTAh2GHUpJeIFRS0uypb77tYpCr9Ev2isFNES2nNNx0oijyAUqPWiFvlokE85pqHLDZnExzZ0ucP3bEOy4Ru6YBB39Cdw1dsFR93AVNtd63Kx1wpzb43T8ZtmI8V4rTqud0iX29IBx6nVnuglbYNg+PFV6FtMmwIpq0LNvn8kuw5hSC18f4Pw6DFqwICF9gu5t6C2868pbv1+40a7/l4Sdtw5y0cJo2ElbBBDOxkJDyRSgxDS1xNpixYeRy0YBFJOTcIDqwWDoMDJSHgYkRIDJ/ZkRKqohdJYHZTDxLGX5qPAVe9gWIEbeF8nI/BxJGzgcZ2MhPU6hAwr8JN08Q7SIQML/CQ9vv06ZGAJn6THd5AOYTtJDiTzM3QJ0dgyPycncTQhn5OfqFcmIBxa6PQMPUk0utDPyZscT8oGLqRRAq9/78qBE3gENq2/oRJ41DiB10fARwmR7mfmiFk/6tol7JasfUVAfcr6Udc+oIcSHjaORF37gD5JeCCRunbxRhGpmSU8kIRdO3QeSnhgteDam/NJwsOIlLn21XwSqQ9ZP+baT/NQ4KNm/cqDFM9C4ONI2HXWzycJ+5D1Yyfp4h2kQwYW+El6fPt1yMASPkmP7yAdMnhyhJ2hSzh21o+dk5M4mpDPyU/0JesXn6EnOXrWLz4nb3I8KRu4kI76U14dVHhrv4Xv9nvu7fMCfavahWr7qL6dgxongOAwKM9PGKPdZGzS3W/sHr2aBh6mnc0PI7sBNWPPmSmaaJDj+/hC1hzWXbeD2CD6MHKPSfPGBof1oDwFbUhoX20IFW2oVsRbajKpvg+x3DUybgZzfqTz5HU9e7y+eeewXQm1TzIOw+3PpcRr8t+3XFwWh2Ncb0+qveE3gHDxsWFH+Tz/7Vn8/z1ZNKbG1z6ZfvO4+DKl7XlUbwY1N8BQwOotyXHD0KsartUnC1K42MvM08WgkPiLJwK1t8UVvf12Re/E49k8XdWv/9lyfSKub0c5GSYwU0t+P/YMBkw5fQ6ITWRNxGOtbW8BcV0ErIH4BdqDoY3URacFWOQKWF2YTQV2csH1cHUtt5fDDARla6M6rtAVrrrInorrf37vA2CWPq29hQ+EdvBTOzSjrk5I2sUWWkCyGvgCpRUoSVlOPgx4urijHfA8V6S24EPKodckHhQ/g4jmWOfu4rJvVVWBgHSnAbo6RrbqQvgFTtstcOyxedCGnI45b7d2jCTCR/OpTQyl1SLxabizdkHo0WG7BV5jhaGulUOh5Ncb98am+0axF+MEoUdH8Y6MnCxy0BM5eZRrZw3yQWji019cv6NdP6Lp7OnM7wOhB73p+6S+ulowt0x/6dwDD9sixzIVRDfjqJ9ewHH3WDa1g8HWoP7HeNcP0N5/pPc2uK0llXksvqkPNI2Wb+7v2W10pT/5+z1dCaU2TVYv1Re2oCvU88hQyxYwnW0KRHfmqJ2SxioD6AIM1qDuOLG9fX+F8YntE3Z/+9CCW5pscKvzTQN9NYQNSJl8+DUGsDp6U3JZ27XOcWga7HRyhWZ18k1/NKshdGhyXSkGlOC0ABlW98EMDZlBkMHZBGxftI0nYDmEKWRi4Nrrtz+yrwuYLWzByNPxgNYMwweQZNkgtltRhgiMAIPAiC8BJNC3KN2GnAyiI60BJAekapPF2FEk4FMsYgva4J7Hp16e5meqya+z6FGAw07kO0l0FCn3BcsFOgZxA1/WKXEuTdnQdRD9a+ALe7NOsfHWqcjAkbQmpx7hhIKpliIJOsu3Jcqg8UiPQ1MbK6aNGUGaQBMbOBu4mN7oCjSsrihF8RV0hUFw1J2cDvCLHCdFu+L5Ep8GNGcjE/doKIMp6vZ/LknRCjmf0tnjIvfFkqKRydb2Sf766wTzn9Z2bJXUqepf9CE3XczNyqatyCSz/T1ZL2cf/K7vb9kFzD32VCSBKepOMNhfd6JLbtsB1mS3+n2WijK6/sgut5/61IFtBMZlYDW7LHVFCHZwNdmtvsP1Zjq94NqOq7xYYiRwHRBMqIsCNLY8J59ceUbh7/lyfsGyFcvyJSWWVAOkM20LL3vXbetYCgKg8SCdbV+HumjEpVJzwEpNioet1ARQF1i5lGoej61aqtmGrKtSTQAPqCMYOuyK1ALlgWtRoEnExZOwK+zepe5OTiYp8kuK5oAUTRua46Zo4GHJdifbwPs2j7HQaqvSB95sBUcxDmr1w2XdfdW6RemWZRoYRWWToaq61aztVp/YKDQpLXDYz+gJiz97Keeqn5HFigV/GEn2M5IGce3IpagnP9neNxGxCdT6Jpb6JCFCGu/a/Gq2uiYBaLJbZCjVC64OahU3VuVOqa6lBmHUswmDgewAEIwDdbE2ruYJY2leKLU9hs0T+9AT6cJzCj255bYQv3LRJ1mWZjm3kubCbUuX3GHc+Hryc3/snugyGje24o+dp1fL5ccbsnPj81tpQRYn+YUBjSRhNebEDZnEDxPtnChrO9rOCWx0Y23bnt6zZqhnc9eOyBIDQYj5ZA9ByDioWOEPCEgYhQwzREKEmmYnt0ZIiFnEWIRgTBHUzBrKpY5xjADjQuD/2tjTgHQxxXNn35cgXLyXcMhbwukioBfC+U84TrR9hMPeEq4t/vrwzxW9zTdx1/ufp5DwsrKMKRu7uFkE69xvwuoqVYJ00dVRUiUeQm0/mo6oHLigsAt5WykT/nCZi+6zOwuaa+2X7/k0FXf8Hw==</diagram></mxfile>