SlowGuess commited on
Commit
ef9d4c0
·
verified ·
1 Parent(s): b4185f1

Add Batch 1cde9d48-6fbd-438b-8f81-fa2d63ecead1

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/02332191-3640-4bf2-b0ec-5ad2e22699ee_content_list.json +3 -0
  2. askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/02332191-3640-4bf2-b0ec-5ad2e22699ee_model.json +3 -0
  3. askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/02332191-3640-4bf2-b0ec-5ad2e22699ee_origin.pdf +3 -0
  4. askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/full.md +668 -0
  5. askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/images.zip +3 -0
  6. askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/layout.json +3 -0
  7. attentionalconstellationnetsforfewshotlearning/8d357406-6e29-47af-b7d2-458a8aaa9f4c_content_list.json +3 -0
  8. attentionalconstellationnetsforfewshotlearning/8d357406-6e29-47af-b7d2-458a8aaa9f4c_model.json +3 -0
  9. attentionalconstellationnetsforfewshotlearning/8d357406-6e29-47af-b7d2-458a8aaa9f4c_origin.pdf +3 -0
  10. attentionalconstellationnetsforfewshotlearning/full.md +419 -0
  11. attentionalconstellationnetsforfewshotlearning/images.zip +3 -0
  12. attentionalconstellationnetsforfewshotlearning/layout.json +3 -0
  13. auctionlearningasatwoplayergame/c67499e2-b68c-4964-a190-4aa8387dbeeb_content_list.json +3 -0
  14. auctionlearningasatwoplayergame/c67499e2-b68c-4964-a190-4aa8387dbeeb_model.json +3 -0
  15. auctionlearningasatwoplayergame/c67499e2-b68c-4964-a190-4aa8387dbeeb_origin.pdf +3 -0
  16. auctionlearningasatwoplayergame/full.md +446 -0
  17. auctionlearningasatwoplayergame/images.zip +3 -0
  18. auctionlearningasatwoplayergame/layout.json +3 -0
  19. autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/d6d44d12-39ed-4868-9a0f-b1c48452e21c_content_list.json +3 -0
  20. autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/d6d44d12-39ed-4868-9a0f-b1c48452e21c_model.json +3 -0
  21. autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/d6d44d12-39ed-4868-9a0f-b1c48452e21c_origin.pdf +3 -0
  22. autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/full.md +463 -0
  23. autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/images.zip +3 -0
  24. autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/layout.json +3 -0
  25. autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/f7793aa3-ff60-4924-84b7-8a987c0c05b6_content_list.json +3 -0
  26. autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/f7793aa3-ff60-4924-84b7-8a987c0c05b6_model.json +3 -0
  27. autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/f7793aa3-ff60-4924-84b7-8a987c0c05b6_origin.pdf +3 -0
  28. autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/full.md +409 -0
  29. autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/images.zip +3 -0
  30. autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/layout.json +3 -0
  31. autoseglosssearchingmetricsurrogatesforsemanticsegmentation/2802d650-3a45-4a26-97d7-c37c1d6ee49e_content_list.json +3 -0
  32. autoseglosssearchingmetricsurrogatesforsemanticsegmentation/2802d650-3a45-4a26-97d7-c37c1d6ee49e_model.json +3 -0
  33. autoseglosssearchingmetricsurrogatesforsemanticsegmentation/2802d650-3a45-4a26-97d7-c37c1d6ee49e_origin.pdf +3 -0
  34. autoseglosssearchingmetricsurrogatesforsemanticsegmentation/full.md +331 -0
  35. autoseglosssearchingmetricsurrogatesforsemanticsegmentation/images.zip +3 -0
  36. autoseglosssearchingmetricsurrogatesforsemanticsegmentation/layout.json +3 -0
  37. auxiliarylearningbyimplicitdifferentiation/ec102424-6e52-4b06-9cc2-c97f7dd83d6e_content_list.json +3 -0
  38. auxiliarylearningbyimplicitdifferentiation/ec102424-6e52-4b06-9cc2-c97f7dd83d6e_model.json +3 -0
  39. auxiliarylearningbyimplicitdifferentiation/ec102424-6e52-4b06-9cc2-c97f7dd83d6e_origin.pdf +3 -0
  40. auxiliarylearningbyimplicitdifferentiation/full.md +497 -0
  41. auxiliarylearningbyimplicitdifferentiation/images.zip +3 -0
  42. auxiliarylearningbyimplicitdifferentiation/layout.json +3 -0
  43. averagecaseaccelerationforbilineargamesandnormalmatrices/a2131a7c-1207-4a46-87c5-3883e655b7ea_content_list.json +3 -0
  44. averagecaseaccelerationforbilineargamesandnormalmatrices/a2131a7c-1207-4a46-87c5-3883e655b7ea_model.json +3 -0
  45. averagecaseaccelerationforbilineargamesandnormalmatrices/a2131a7c-1207-4a46-87c5-3883e655b7ea_origin.pdf +3 -0
  46. averagecaseaccelerationforbilineargamesandnormalmatrices/full.md +999 -0
  47. averagecaseaccelerationforbilineargamesandnormalmatrices/images.zip +3 -0
  48. averagecaseaccelerationforbilineargamesandnormalmatrices/layout.json +3 -0
  49. bagoftricksforadversarialtraining/0e009d16-83be-41b1-8610-beb69bfecf8e_content_list.json +3 -0
  50. bagoftricksforadversarialtraining/0e009d16-83be-41b1-8610-beb69bfecf8e_model.json +3 -0
askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/02332191-3640-4bf2-b0ec-5ad2e22699ee_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7474035f114460dfd4b5becd9cabff4c6f6f1649fea4ee8fc765fec5ebe4cf7
3
+ size 126266
askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/02332191-3640-4bf2-b0ec-5ad2e22699ee_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6afd9d34b73c9f50e3abd491feb8dfb6907ff0a8b9378ffd4406f390a361e88
3
+ size 145618
askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/02332191-3640-4bf2-b0ec-5ad2e22699ee_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8bc5817f0348c06c11867ec616ca1ccd860bc0ddf6f2d6b00c319373f4fef81
3
+ size 3521633
askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/full.md ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ASK YOUR HUMANS: USING HUMAN INSTRUCTIONS TO IMPROVE GENERALIZATION IN REINFORCEMENT LEARNING
2
+
3
+ Valerie Chen, Abhinav Gupta, & Kenneth Marino
4
+
5
+ Carnegie Mellon University
6
+
7
+ {vchen2, abhinavg, kdmarino}@cs.cmu.edu
8
+
9
+ # ABSTRACT
10
+
11
+ Complex, multi-task problems have proven to be difficult to solve efficiently in a sparse-reward reinforcement learning setting. In order to be sample efficient, multi-task learning requires reuse and sharing of low-level policies. To facilitate the automatic decomposition of hierarchical tasks, we propose the use of step-by-step human demonstrations in the form of natural language instructions and action trajectories. We introduce a dataset of such demonstrations in a crafting-based grid world. Our model consists of a high-level language generator and low-level policy, conditioned on language. We find that human demonstrations help solve the most complex tasks. We also find that incorporating natural language allows the model to generalize to unseen tasks in a zero-shot setting and to learn quickly from a few demonstrations. Generalization is not only reflected in the actions of the agent, but also in the generated natural language instructions in unseen tasks. Our approach also gives our trained agent interpretable behaviors because it is able to generate a sequence of high-level descriptions of its actions.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ One of the most remarkable aspects of human intelligence is the ability to quickly adapt to new tasks and environments. From a young age, children are able to acquire new skills and solve new tasks through imitation and instruction (Council et al., 2000; Meltzoff, 1988; Hunt, 1965). The key is our ability to use language to learn abstract concepts and then reapply them in new settings. Inspired by this, one of the long term goals in AI is to build agents that can learn to accomplish new tasks and goals in an open-world setting using just a few examples or few instructions from humans. For example, if we had a health-care assistant robot, we might want to teach it how to bring us our favorite drink or make us a meal in just the way we like it, perhaps by showing it how to do this a few times and explaining the steps involved. However, the ability to adapt to new environments and tasks remains a distant dream.
16
+
17
+ Previous work have considered using language as a high-level representation for RL (Andreas et al., 2017; Jiang et al., 2019). However, these approaches typically use language generated from templates that are hard-coded into the simulators the agents are tested in, allowing the agents to receive virtually unlimited training data to learn language abstractions. But both ideally and practically, instructions are a limited resource. If we want to build agents that can quickly adapt in open-world settings, they need to be able to learn from limited, real instruction data (Luketina et al., 2019). And unlike the clean ontologies generated in these previous approaches, human language is noisy and diverse; there are many ways to say the same thing. Approaches that aim to learn new tasks from humans must be able to use human-generated instructions.
18
+
19
+ In this work, we take a step towards agents that can learn from limited human instruction and demonstration by collecting a new dataset with natural language annotated tasks and corresponding gameplay. The environment and dataset is designed to directly test multi-task and sub-task learning, as it consists of nearly 50 diverse crafting tasks.<sup>1</sup> Crafts are designed to share similar features and
20
+
21
+ ![](images/77751fd6b8aca8b0068067792822fa7ee8fc934a71bee7565bb614b6bb9337d0.jpg)
22
+ Figure 1: From state observation at time step $t$ , the agent generates a natural language instruction "go to key and press grab," which guides the agent to grab the key. After the instruction is fulfilled and the agent grabs the key, the agent generates a new instruction at $t + 1$ .
23
+
24
+ sub-steps so we would be able to test whether the method is able to learn these shared features and reuse existing knowledge to solve new, but related tasks more efficiently. Our dataset is collected in a crafting-based environment and contains over 6,000 game traces on 14 unique crafting tasks which serve as the training set. The other 35 crafting tasks will act as zero-shot tasks. The goal is for an agent to be able to learn one policy that is able to solve both tasks it was trained on as well as a variety of unseen tasks which contain similar sub-tasks as the training tasks.
25
+
26
+ To do this, we train a neural network system to generate natural language instructions as a high-level representation of the sub-task, and then a policy to achieve the goal condition given these instructions. Figure 1 shows how our agent takes in the given state of the environment and a goal (Iron Ore), generates a language representation of the next instruction, and then uses the policy to select an action conditioned on the language representation - in this case to grab the key. We incorporate both imitation learning (IL) using both the language and human demonstrations and Reinforcement Learning (RL) rewards to train our agent to solve complicated multi-step tasks.
27
+
28
+ Our approach which learns from human demonstrations and language outperforms or matches baseline methods in the standard RL setting. We demonstrate that language can be used to better generalize to new tasks without reward signals and outperforms baselines on average over 35 zero-shot crafting tasks. Our method uses language as a high-level task to help decompose a larger, complex task into sub-tasks and identify correct sub-tasks to utilize in a zero-shot task setting. We also show that the agent can learn few-shot tasks with only a few additional demos and instructions. Finally, training with human-generated instructions gives us an interpretable explanation of the agent's behavior in cases of success and failure. Generalization is further demonstrated in the agent's ability to explain how the task is decomposed in both train and evaluation settings, in a way that reflects the actual recipes that describe the crafting task. With our dataset collection procedure and language-conditioned method, we demonstrate that using natural human language can be practically applied to solving difficult RL problems and begin solving the generalization problem in RL. We hope that this will inspire future work that incorporates human annotation, specifically language annotation, to solve more difficult and diverse tasks.
29
+
30
+ # 2 RELATED WORK
31
+
32
+ Previous works on language descriptions of tasks and sub-tasks have generally relied on what Andreas et al. (2017) calls "sketches." A sketch specifies the necessary sub-tasks for a final task and is manually constructed for every task. The agent then relies on reward signals from the sketches in order to learn these predefined sub-tasks. However, in our setup, we want to infer such "sketches" from a limited number of instructions given by human demonstrations. This setting is not only more difficult but also more realistic for practical applications of RL where we might not have a predefined ontology and simulator, just a limited number of human-generated instructions. In addition, at test time, their true zero-shot task requires the sketch, whereas our method is able to generate the "sketches" in the form of high-level language with no additional training and supervision.
33
+
34
+ Similarly, other works have used synthetically generated sub-goals and descriptions to train their methods and suffer from similar problems of impracticality. Shu et al. (2018) introduces a Stochastic Temporal Grammar to enable interpretable multi-task RL in the Minecraft environment. Similarly, the BabyAI platform Chevalier-Boisvert et al. (2019a) presents a synthetic language which models commands inside a grid-based environment. They utilize curriculum training to approach learning complex skills and demonstrate through experimentation in their environment that existing approaches of pure IL or pure RL are extremely sample inefficient. Cideron et al. (2019) extend Hindsight Experience Replay (HER) to language goals in the BabyAI platform to solve a single instruction generated from a hand-crafted language. The BabyAI environment is extended by Cao et al. (2020) to include descriptive texts of the environment to improve the generalization of RL agents. Jiang et al. (2019) also uses procedural generated language using the MuJoCo physics engine and the CLEVR engine to learn a hierarchical representation for multi-task RL. Oh et al. (2017) also tackles zero-shot generalizations, but like the others considers only procedurally-generated instructions, learning to use analogies to learn correspondences between similar sub-tasks.
35
+
36
+ The main work that also investigates using a limited number of human-generated instructions in RL environments is Hu et al. (2019). This paper also uses natural language instructions in hierarchical decision making to play a real-time strategy game involving moving troop units across long time scales. This work uses only behavioral cloning with natural language instructions, whereas we use a mixture of RL and imitation learning. They also do not investigate the benefits of language in zero-shot or few-shot settings and do not demonstrate cross-task generalization as we do.
37
+
38
+ Hierarchical approaches as a way of learning abstractions is well studied in the Hierarchical Reinforcement Learning (HRL) literature (Dayan & Hinton, 1993; Parr & Russell, 1998; Stolle & Precup, 2002). This is typically done by predefining the low-level policies by hand, by using some proxy reward to learn a diverse set of useful low-level policies (Heess et al., 2016; Florensa et al., 2017; Eysenbach et al., 2018; Hausman et al., 2018; Marino et al., 2019), or more generally learning options (Sutton et al., 1999). Our approach differs in that unlike in options and other frameworks, we generate language as a high-level state which conditions the agent's policy rather than handing control over to low-level policies directly.
39
+
40
+ Other works have shown the effectiveness of using a combination of reinforcement learning and imitation learning. Le et al. (2018) presents a hybrid hierarchical reinforcement learning and imitation learning algorithm for the game Montezuma's revenge by leveraging IL for the high-level controller and RL for the low-level controller demonstrating the potential for combining IL and RL to achieve the benefits of both algorithms. By learning meta-actions, the agent is able to learn to solve the complex game. However, their meta-actions were also hand specified.
41
+
42
+ Others have utilized natural language for other tasks, including Williams et al. (2018), Co-Reyes et al. (2018), and Andreas et al. (2018) but not focused on the multi-task learning setting. Matthews et al. (2019) demonstrates the use of word embeddings to inform robotic motor control as evidence of particular promise for exploiting the relationship between language and control. Narasimhan et al. (2018) uses language descriptions of the environment to aid domain transfer. The sub-field of language and vision navigation specifically has investigated how to train agents to navigate to a particular location in an environment given templated or natural language (Chaplot et al., 2018; Anderson et al., 2018; TELex et al., 2011; Mei et al., 2016; Chen & Mooney, 2011; Yu et al., 2018) or to navigate to a particular location to answer a question Das et al. (2018). Similarly to this work, Nguyen et al. (2019) uses human-generated language to find objects in a simulated environment, Zhong et al. (2020) and Branavan et al. (2012) reads a document (i.e. a players manual) to play a variety of games, and Lynch & Sermanet (2020) trains agents to follow both image and language-based goals. All of these works require the agent to read some text at both train and test time and follow those instructions to achieve some goal. In contrast, at test time, our agent only receives a high-level goal, which is what item to craft. Our agent must take the high-level goal as input and generates its own instructions to solve the task. In other words, our task is both instruction following and instruction generation. Related to instruction generation, some work have explored more generally intrinsic motivation for goal generation (Florensa et al., 2018; Forestier et al., 2017). In our work, however, we learn the goals via the human language instructions.
43
+
44
+ # 3 HUMAN ANNOTATION COLLECTION
45
+
46
+ The first step of our approach requires human demonstrations and instructions. To that requirement, we built an interface to collect human-annotated data to guide the learning model.
47
+
48
+ ![](images/13e257927cf37a619a52ea10f6c52ae40a52810a4fedd4b4b2f58c3b68480d2a.jpg)
49
+ Figure 2: (Left) Example view of game interface that the worker would see on AMT. On the left the worker is given the goal and recipes; the board is in the middle; the worker provides annotations on the right. (Right) Example sequence of instructions provided by the Turker for the given task of Stone Pickaxe.
50
+
51
+ Crafting Environment: As shown in Figure 2, our environment is a Minecraft-inspired 5-by-5 gridworld. The Crafting Agent navigates the grid by moving up, down, left, and right. The agent can grab certain objects, like tools, if it is next to them and use the tools to mine resources. The agent must also use a key or switch to open doors blocking its path. Finally, the agent can also go to a crafting table to build final items. The agent can choose from 8 actions to execute: up, down, left, right, toggle, grab, mine, and craft. The environment is fully observable. Our crafting environment extends the crafting environment of Andreas et al. (2017) to include obstacles and crafts that are specified by material, introducing compositionally complex tasks (i.e. instead of 'Make Axe', we have 'Make Iron Axe' etc). In total, we consider about 50 crafting tasks, 14 of which we collect annotations for and 35 of which are used for test time. At the start of each game, all object/resource locations are fully randomized in the environment.
52
+
53
+ Crafting Task: The goal of the agent in our world is to complete crafts. By design, a crafting-based world allows for complexity and hierarchy in how the agent interacts with items in the gridworld. To craft an item, the agent must generally first pick up a tool, go to a resource, mine the resource, and then go to a table to craft the item. To make an iron ore, the agent must use the pickaxe at the Iron Ore Vein to mine Iron Ore to complete the task. The Iron Ore recipe is an example of a 1-step task because it creates one item. A 5-step task, like Diamond Pickaxe, involves the mining and/or crafting of 5 items. We capped the tasks at a maximum length of 5 recipe steps to limit the amount of time a worker would have to spend on the task. Note that each recipe step requires multiple time-steps to complete. Crafts are designed to share similar features and sub-steps to test whether the agent is able to learn these shared features and reuse existing knowledge to solve new, but related tasks more efficiently (these relations between tasks are detailed in Table 3 and in Figure 10). While the task may seem simple to human annotators to solve, such compositional tasks still pose difficulties for sparse-reward RL. We further increase the difficulty of this task by restricting the agent to a limited number of steps (100) to complete the task, leaving little room to make unrecoverable mistakes such as spending time collecting or using unnecessary resources.
54
+
55
+ Data Collection Process: Figure 2 shows our interface. Given the goal craft, relevant recipes, and the initial board configuration, the worker provides step-by-step instructions accompanied by execution on the actual game board of each instruction. The workflow would be to type one instruction, execute the instruction, then type the next instruction, and execute until the goal was completed. The data collection interface and a corresponding example set of natural language instructions provided by a Turker is illustrated on the rightmost side of Figure 2. This is but one way that a Turker might choose to break down the 5-step crafting task. The appendix has more details on the collection process in Section A.1. We will release the environment and dataset.
56
+
57
+ Dataset Analysis: Between the 14 crafts, we collected 6,322 games on AMT. In total, this dataset contains 195,405 state-action pairs and 35,901 total instructions. In the supplementary material we present relevant summary statistics about the data, including the number of instructions provided for each $n$ -step task. The number of instructions, and consequently, actions required increases with the number steps as shown in Table 4.
58
+
59
+ # 4 METHODS
60
+
61
+ Our proposed approach to solving these multi-step crafting tasks is to learn from human-generated natural language instructions and demonstrations. The model is first pre-trained using imitation learning (IL) and then fine-tuned using sparse-reward in reinforcement learning (RL). The goal of the agent is to learn one policy that is able to solve a variety of tasks (around 50) in the environment including ones it has not seen when only trained on a subset of the total tasks.
62
+
63
+ ![](images/ff36cbee2082abb80dff772efb8069c25b17e02a8ab29440a2b912b3e2256a90.jpg)
64
+ Figure 3: (Left) High-level language generator. (Right) Low-level policy conditioned on language.
65
+
66
+ Architecture: As outlined in Figure 3, we factor the agent into a hierarchical set-up with a language generator at the high-level and policy conditioned on the language at the low-level. At each time step, the state encoder produces a vector representation that is then used as input to both the language generator and language-conditioned policy. Relevant information about the state, including the grid, inventory, and goal are encoded. Items which are relevant for crafting are embedded using a 300-dimension GloVe embedding, summing the embeddings for multiple word items (i.e. Iron Ore Vein). Non-crafting items, such as door, wall, or key, are represented using a one-hot vector. Further details are provided in Section B.
67
+
68
+ Imitation Learning Pre-training: We warm-start the model using the human demonstrations. Language is generated at the high-level with an encoder-decoder framework. The encoding from the state encoder is decoded by an LSTM which generates a natural language instruction. The target language instruction is the AMT worker's provided instruction. In our dataset, the vocabulary size was 212, after filtering for words that appeared at least 5 times. At test time, we do not have access to the ground truth instructions, so instead the LSTM decoder feeds back the previously generated word as the next input and terminates when the stop token is generated. From the language generator module, we extract the last hidden state of the generated instruction. The hidden state is concatenated with the encoded state and passed through a series of fully connected layers. The final layer outputs the action. In the supervised training phase, the full model is trained by backpropagating through a language and action loss (cross entropy loss).
69
+
70
+ Reinforcement Learning Fine-tuning: We use the proximal policy optimization (PPO) algorithm Schulman et al. (2017) in RL with the reward defined below to learn an optimal policy to map from state encoding to output action. The maximum number of steps in an episode is set to 100. We utilize a training set-up which samples from all tasks (1-step through 5-step tasks). In preliminary experiments, we observe that sampling from 3-step tasks alone, for example, poses too complex of an exploration problem for the model to receive any reward. We define a sparse reward, where the agent only receives a reward when it has completed the full craft. In RL fine-tuning, we freeze the language generator component because there is no more language supervision provided in the simulated environment. We also find that empirically backpropagating the loss through language distorts the output language as there is no constraint for it to continue to be similar to human language. All training hyperparameters and details are provided in supplementary materials.
71
+
72
+ # 5 EXPERIMENTS
73
+
74
+ We compare our method against five baselines (1-5) which are reduced forms of our method to evaluate the necessity of each component. We also consider two baselines (6-7), which swap out the language generator for alternative high-level tasks, to evaluate the usefulness of language as a selected high-level task. These baselines have the additional training that our method received, as well as the implicit compositionality, but without language. In both baselines (6-7), we perform the same training steps as with our method. Implementation details are presented in Section B.
75
+
76
+ 1. IL: The IL baseline uses the same low-level architecture as our method, without a high-level hidden state. The model learns to map state encoding to an output action.
77
+
78
+ 2. IL w/ Generative Language: IL w/ Generative Language is the supervised baseline of our method, which does not include RL reward. This baseline allows us to observe and compare the benefit of having a reward to train in simulation when the model has access to both actions and language instructions.
79
+
80
+ 3. IL w/ Discriminative Language: We compare our method to a closely adapted version of the method proposed in Hu et al. (2019) which similarly uses language in the high-level. Rather than generate language, their high-level language is selected from a set of instructions from the collected user annotation. We discuss this adaptation in the Appendix. They consider instruction sets of sizes $N = \{50, 250, 500\}$ and find the best performance on the largest instruction set $N = 500$ which is the size we use in our implementation.
81
+
82
+ 4. RL: Another baseline we consider is the reinforcement learning (RL) setting where the agent is provided no demonstrations but has access to sparse-reward in RL. The architecture we use here is the same as the IL architecture. This baseline demonstrates the capacity to learn the crafting tasks without any human demonstrations and allows us to see whether human demonstrations are useful.
83
+
84
+ 5. IL + RL: We also consider a baseline that does not incorporate language which is IL+RL. In IL+RL, we pretrain the same IL architecture using the human demonstrations as a warm-start to RL. It is important to note that this baseline does not include the natural language instructions as a part of training. We extract all of the state-action pairs at train a supervised model on the data as in the IL model and then we utilize the RL sparse-reward to fine-tune.
85
+
86
+ 6. State Reconstruction (SR): We train an autoencoder to perform state reconstruction. The autoencoder reconstructs the state encoding and the vector at the bottleneck of the autoencoder is used as the hidden layer for the policy. SR as a baseline allows us to consider latent representations in the state encoding as a signal for the policy.
87
+
88
+ 7. State Prediction (SP): We train a recurrent network, with the same architecture as our language generator, to perform state prediction. The model stores the past 3 states from time $T$ to predict the $T + 1$ state. So at time $T$ , the states $T - 2$ , $T - 1$ , and $T$ are used to predict state $T + 1$ . From the LSTM, the hidden state is extracted in the same manner as our IL + RL w/ Lang model. SP as a baseline allows us to compare against another recurrent high-level method with the additional computation power.
89
+
90
+ # 5.1 RESULTS
91
+
92
+ Standard setting: We evaluate the various methods on crafts which we have collected human demonstrations for to benchmark comparative performance in our environment. An initial analysis is to first consider how much the IL model is able to learn from human demonstrations alone, so we consider IL, IL with Generative Language, and IL with Discriminative Language (results are in Section C). None of these approaches are able to solve the most difficult 5-step tasks or the simpler tasks consistently, with an average of about $18 - 19\%$ success rate for 1-step tasks. We believe the 3 and 5-step tasks are difficult enough such that annotations alone were not able to capture the diversity of board configurations for the variety of crafts given that the board is randomly initialized each time. However, based on an analysis of the language selected (see Tables 11 vs. Table 12), the generated language is more interpretable and made more sense in a zero shot setting. Given the language is fixed after this point, all remaining experiments moving forward use generative language.
93
+
94
+ As shown in Figure 4, our method performs well against baselines. We find that human demonstrations are necessary to guide learning because the learned behavior for RL is essential to arbitrarily
95
+
96
+ ![](images/7ae12346aa097c3edc96eee12858834f58b4a89e5f82652ba5daff6a5883d9f7.jpg)
97
+ Figure 4: Comparing baselines with our method on accuracy. Human demonstrations are necessary to complete tasks with 3 or more steps. Averaged over 3 runs.
98
+
99
+ ![](images/59675af4dbbfba4a56c60950ab9cb93c0bbb5d1b11266a190eb32c381a75da32.jpg)
100
+
101
+ ![](images/c852613d7a7d75faccdd5e873e8c496769c48f8a1298b4a9746bb685ceeff3a5.jpg)
102
+
103
+ ![](images/6144cce5572cb644c5cb6a14c340995cb8b253e5d83404b43fbdb5f3aa22a95c.jpg)
104
+
105
+ walk around the grid and interact with items. For simple 1 and 2 step tasks, this is a feasible strategy for the allotted steps for an episode. However, there is little room for error in the most difficult 5-step tasks, as even human demonstrations take on average 40 steps to solve. We also find that for the standard setting, incorporating a high-level network allows the model to achieve good results when comparing our method to SP and SR.
106
+
107
+ In Figure 5 we show the result of our method when we ablate the number of demonstrations we use. This lets us see how many demonstrations we would feasibly need for the model to learn how to solve the crafting tasks. As we decrease the amount of data provided, we find that there is greater variance in the policy's ability to complete the task, but the performance only significantly degrades when we start using only $25\%$ of the data on the hardest tasks.
108
+
109
+ ![](images/b0abf0cad390604c9fb62ac3bba5fef1fb257cd974c252aaf86fbc73e10f22fe.jpg)
110
+ Figure 5: Ablation of our method with varying amounts of human annotations (25%, 50%, 75% and 100%). For each fraction, we sample that number of demonstrations from the dataset for each type of task. Averaged over 3 runs.
111
+
112
+ ![](images/c2abab82513a6560808143b4a0df29258f73e79a5c2cb335ae98dbb63b4ad29d.jpg)
113
+
114
+ ![](images/27569ea7ffa3de62b0e2122969419e2130d64b2d4e2cb2f3e8aa43a2c2818cbd.jpg)
115
+
116
+ ![](images/0fbbd2c7741c8a536b944ee16cff0afb2ad798149b8fbdd11777f24a52003e00.jpg)
117
+
118
+ Zero Shot: Our method is able to use natural language instructions to improve performance on difficult tasks in the standard setting. But how well is our method able to do on completely new tasks not seen during training? We investigate our performance on zero-shot tasks, where the agent receives no human demonstrations or instructions, and no rewards on these tasks. The agent has to try to complete these tasks that it has never seen before and cannot train on at all. These unseen tasks do share sub-task structure with tasks which were seen in the training process, so the desired behavior is for the model to reuse subpolicies seen in other contexts for this new context. For example, in training the agent might have seen demonstrations or received rewards for a task like "Cobblestone Stairs" and "Iron Ingot." At test time, we can evaluate the agent on an item like "Cobblestone Ingot", which has never been seen by the agent. The agent should be able to infer the sub-task breakdown given prior knowledge of similar tasks.
119
+
120
+ We present 35 examples of unseen tasks in Table 1. We find that overall our method outperforms all other baselines. While SR and SP were able to match our method's performance in standard setting, they are not able to generalize. SR and SP are viable solutions to learn complex tasks in the standard RL setting, but the representations these models learned do not aid in generalizing to unseen tasks. Here, we believe, using language is key because it creates a representation that better abstracts to new tasks. In the supplementary material, we show that in the cases of unseen tasks, the model indeed is able to generate language that properly corresponds to these new combinations of materials and items, particularly decomposing the complex item into sub-tasks that were previously seen in the training phase.
121
+
122
+ **Demonstration Only and Few-Shot:** In the demonstration only, we assume that we have access to only human demonstrations for some subset of tasks. From the entire pool of 14 tasks we collected demonstrations for, we withhold 3 tasks (around $20\%$ of total tasks) for testing. These 3 tasks consist of a one, two, and three step task. We run results on 3 permutations of withholding 3
123
+
124
+ Table 1: Accuracy evaluated on 100 games for 35 unseen crafts. Our method outperforms baselines. We do not list IL or IL w/ Language results which are $0\%$ for all tasks.
125
+
126
+ <table><tr><td>Steps</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td><td>2</td></tr><tr><td>RL</td><td>93</td><td>91</td><td>95</td><td>90</td><td>92</td><td>92</td><td>91</td><td>81</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>13</td><td>0</td><td>72</td><td>28</td><td>46</td><td></td></tr><tr><td>IL+RL</td><td>92</td><td>98</td><td>85</td><td>94</td><td>91</td><td>83</td><td>95</td><td>97</td><td>21</td><td>96</td><td>37</td><td>18</td><td>97</td><td>82</td><td>97</td><td>93</td><td>94</td><td>91</td><td>81</td><td></td></tr><tr><td>SP</td><td>0</td><td>20</td><td>0</td><td>33</td><td>37</td><td>2</td><td>69</td><td>2</td><td>90</td><td>0</td><td>98</td><td>0</td><td>89</td><td>1</td><td>78</td><td>1</td><td>74</td><td>2</td><td>33</td><td></td></tr><tr><td>SR</td><td>96</td><td>64</td><td>0</td><td>0</td><td>67</td><td>70</td><td>60</td><td>99</td><td>0</td><td>79</td><td>88</td><td>74</td><td>69</td><td>37</td><td>16</td><td>98</td><td>70</td><td>0</td><td>55</td><td></td></tr><tr><td>Ours</td><td>99</td><td>99</td><td>100</td><td>100</td><td>93</td><td>99</td><td>100</td><td>100</td><td>97</td><td>98</td><td>99</td><td>99</td><td>99</td><td>100</td><td>97</td><td>100</td><td>99</td><td>97</td><td>98</td><td></td></tr><tr><td colspan="20">Overall, M/G</td><td></td></tr><tr><td>Steps</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>3</td><td>5</td><td>5</td><td>5</td><td>5</td><td>5</td><td>-</td><td></td></tr><tr><td>RL</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>23</td><td></td></tr><tr><td>IL+RL</td><td>90</td><td>87</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>85</td><td>29</td><td>86</td><td>87</td><td>39</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>55</td><td></td></tr><tr><td>SP</td><td>89</td><td>0</td><td>12</td><td>0</td><td>4</td><td>10</td><td>0</td><td>47</td><td>0</td><td>1</td><td>26</td><td>0</td><td>16</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>22</td><td></td></tr><tr><td>SR</td><td>1</td><td>0</td><td>2</td><td>0</td><td>12</td><td>0</td><td>0</td><td>0</td><td>0</td><td>38</td><td>6</td><td>0</td><td>5</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>30</td><td></td></tr><tr><td>Ours</td><td>97</td><td>98</td><td>2</td><td>3</td><td>18</td><td>0</td><td>40</td><td>0</td><td>96</td><td>39</td><td>95</td><td>98</td><td>49</td><td>36</td><td>0</td><td>0</td><td>0</td><td>14</td><td>69</td><td></td></tr></table>
127
+
128
+ tasks. For each of the 3 withheld tasks, we include these demonstrations in the supervised training phase but do not provide reward in RL fine-tuning. We vary the amount of demonstrations that are provided: $5\%$ , $10\%$ , and $100\%$ . The most generous case is to assume that the model has access to all demonstrations that were collected in the dataset. Per task, the total number of demonstrations was about 300-500. Additionally we considered a more strict few-shot case where we reduce the number of demonstrations to 20-40 which is about $5 - 10\%$ of the original number of demonstrations. We do not include 5-step tasks because we only collected demonstrations for two 5-step tasks. From the results in Table 2, we can see that our method outperforms baselines in its ability to utilize the few demonstrations to improve performance.
129
+
130
+ Table 2: Evaluation of few-shot tasks for our method against baseline comparisons. We consider three settings for how many demonstrations are given to the model: $5\%$ (20 demos), $10\%$ (40 demos), $100\%$ . Variance results are included in supplementary material. Results are averaged across 3 seeds.
131
+
132
+ <table><tr><td></td><td colspan="3">IL</td><td colspan="3">IL w/Lang</td><td colspan="3">IL+RL</td><td colspan="3">SP</td><td colspan="3">SR</td><td colspan="3">Ours</td></tr><tr><td>Steps</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td></tr><tr><td>1-step</td><td>16%</td><td>18%</td><td>18%</td><td>17%</td><td>19%</td><td>19%</td><td>96%</td><td>91%</td><td>98%</td><td>96%</td><td>98%</td><td>97%</td><td>53%</td><td>84%</td><td>94%</td><td>97%</td><td>90%</td><td>95%</td></tr><tr><td>2-step</td><td>4%</td><td>3%</td><td>0%</td><td>5%</td><td>5%</td><td>9%</td><td>66%</td><td>64%</td><td>66%</td><td>53%</td><td>64%</td><td>71%</td><td>10%</td><td>40%</td><td>63%</td><td>87%</td><td>73%</td><td>82%</td></tr><tr><td>3-step</td><td>1%</td><td>2%</td><td>0%</td><td>1%</td><td>3%</td><td>4%</td><td>1%</td><td>23%</td><td>22%</td><td>10%</td><td>27%</td><td>46%</td><td>0%</td><td>31%</td><td>50%</td><td>5%</td><td>47%</td><td>74%</td></tr></table>
133
+
134
+ Interpretability: One key benefit of incorporating natural language into the model is the ability for humans to interpret how the model is making decisions. We observe that the generated instructions closely match those of the recipes that we provide to the annotators in the data collection phase in both train (Table 12) and test (Table 13, 14) settings. However, the discriminative language didn't break down the task into steps that made sense (Table 11). Figure 6 presents example instructions generated by our model.
135
+
136
+ # 6 CONCLUSION
137
+
138
+ In this paper, we present a dataset of human demonstrations and natural language instructions to solve hierarchical tasks in a crafting-based world. We also describe a hierarchical model to enable efficient learning from this data through a combined supervised and reinforcement learning approach. In general, we find that leveraging human demonstrations allows the model to drastically outperform RL baselines. Additionally, our results demonstrate that natural language not only allows the model to explain its decisions but it also improves the model's performance on the most difficult crafting tasks and further allows generalization to unseen tasks. We also demonstrate the model's ability to expand its skillset through few additional human demonstrations. While we demonstrate our approach's success in a grid-based crafting environment, we believe that our method is able to be adapted towards generalizable, multi-task learning in a variety of other environments.
139
+
140
+ ![](images/87ff9a2a88bd944e461a1c51a02de2db918d3672f573dd75d63f99f9487762c2.jpg)
141
+ Figure 6: Generated language at test time for a 2-step craft. We only display key frames of the trajectory which led to changes in the language. These key frames match changes in the inventory to the object mentioned in the generated instruction. Qualitatively, the generated instructions are consistent during what we would describe as a sub-task. Quantitatively, the network will spend on average 4.8 steps in the environment for the same generated language output.
142
+
143
+ # REFERENCES
144
+
145
+ Peter Anderson, Qi Wu, Damien Teney, Jake Bruce, Mark Johnson, Niko Sunderhauf, Ian Reid, Stephen Gould, and Anton van den Hengel. Vision-and-language navigation: Interpreting visually-grounded navigation instructions in real environments. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3674-3683, 2018.
146
+ Jacob Andreas, Dan Klein, and Sergey Levine. Modular multitask reinforcement learning with policy sketches. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 166-175. JMLR.org, 2017.
147
+ Jacob Andreas, Dan Klein, and Sergey Levine. Learning with latent language. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 2166-2179, 2018.
148
+ SRK Branavan, David Silver, and Regina Barzilay. Learning to win by reading manuals in a montecarlo framework. Journal of Artificial Intelligence Research, 43:661-704, 2012.
149
+ Tianshi Cao, Jingkang Wang, Yining Zhang, and Sivabalan Manivasagam. Babyai++: Towards grounded-language learning beyond memorization. arXiv preprint arXiv:2004.07200, 2020.
150
+ Devendra Singh Chaplot, Kanhashree Mysore Sathyendra, Rama Kumar Pasumarthi, Dheeraj Rajagopal, and Ruslan Salakhutdinov. Gated-attention architectures for task-oriented language grounding. In Thirty-Second AAAI Conference on Artificial Intelligence, 2018.
151
+ David L Chen and Raymond J Mooney. Learning to interpret natural language navigation instructions from observations. In Twenty-Fifth AAAI Conference on Artificial Intelligence, 2011.
152
+ Maxime Chevalier-Boisvert, Dzmitry Bahdanau, Salem Lahlou, Lucas Willems, Chitwan Saharia, Thien Huu Nguyen, and Yoshua Bengio. BabyAI: First steps towards grounded language learning with a human in the loop. In International Conference on Learning Representations, 2019a. URL https://openreview.net/forum?id=rJeXCo0cYX.
153
+ Maxime Chevalier-Boisvert, Dzmitry Bahdanau, Salem Lahlou, Lucas Willems, Chitwan Saharia, Thien Huu Nguyen, and Yoshua Bengio. BabyAI: First steps towards grounded language learning with a human in the loop. In International Conference on Learning Representations, 2019b. URL https://openreview.net/forum?id=rJeXCo0cYX.
154
+ Geoffrey Cideron, Mathieu Seurin, Florian Strub, and Olivier Pietquin. Self-educated language agent with hindsight experience replay for instruction following. arXiv preprint arXiv:1910.09451, 2019.
155
+ John D Co-Reyes, Abhishek Gupta, Suvansh Sanjeev, Nick Altieri, Jacob Andreas, John DeNero, Pieter Abbeel, and Sergey Levine. Guiding policies with language via meta-learning. In International Conference on Learning Representations, 2018.
156
+
157
+ National Research Council et al. How people learn: Brain, mind, experience, and school: Expanded edition. National Academies Press, 2000.
158
+ Abhishek Das, Samyak Datta, Georgia Gkioxari, Stefan Lee, Devi Parikh, and Dhruv Batra. Embodied Question Answering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018.
159
+ Peter Dayan and Geoffrey E Hinton. Feudal reinforcement learning. In Advances in neural information processing systems, pp. 271-278, 1993.
160
+ Benjamin Eysenbach, Abhishek Gupta, Julian Ibarz, and Sergey Levine. Diversity is all you need: Learning skills without a reward function. In International Conference on Learning Representations, 2018.
161
+ Carlos Florensa, Yan Duan, and Pieter Abbeel. Stochastic neural networks for hierarchical reinforcement learning. In International Conference on Learning Representations, 2017.
162
+ Carlos Florensa, David Held, Xinyang Geng, and Pieter Abbeel. Automatic goal generation for reinforcement learning agents. In International conference on machine learning, pp. 1515-1528, 2018.
163
+ Sebastien Forestier, Rémy Portelas, Yoan Mollard, and Pierre-Yves Oudeyer. Intrinsically motivated goal exploration processes with automatic curriculum learning. arXiv preprint arXiv:1708.02190, 2017.
164
+ Karol Hausman, Jost Tobias Springenberg, Ziyu Wang, Nicolas Heess, and Martin Riedmiller. Learning an embedding space for transferable robot skills. 2018.
165
+ Nicolas Heess, Greg Wayne, Yuval Tassa, Timothy Lillicrap, Martin Riedmiller, and David Silver. Learning and transfer of modulated locomotor controllers. arXiv preprint arXiv:1610.05182, 2016.
166
+ Hengyuan Hu, Denis Yarats, Qucheng Gong, Yuandong Tian, and Mike Lewis. Hierarchical decision making by generating and following natural language instructions. In Advances in neural information processing systems, pp. 10025-10034, 2019.
167
+ JMcVLevine Hunt. Intrinsic motivation and its role in psychological development. In Nebraska symposium on motivation, volume 13, pp. 189-282. University of Nebraska Press, 1965.
168
+ Yiding Jiang, Shixiang Shane Gu, Kevin P Murphy, and Chelsea Finn. Language as an abstraction for hierarchical deep reinforcement learning. In Advances in Neural Information Processing Systems, pp. 9414-9426, 2019.
169
+ Hoang Le, Nan Jiang, Alekh Agarwal, Miroslav Dudik, Yisong Yue, and Hal Daumé. Hierarchical imitation and reinforcement learning. In International Conference on Machine Learning, pp. 2917-2926, 2018.
170
+ Jelena Luketina, Nantas Nardelli, Gregory Farquhar, Jakob Foerster, Jacob Andreas, Edward Grefenstette, Shimon Whiteson, and Tim Rocktäschel. A survey of reinforcement learning informed by natural language. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, pp. 6309-6317. International Joint Conferences on Artificial Intelligence Organization, 7 2019. doi: 10.24963/ijcai.2019/880. URL https://doi.org/10.24963/ijcai.2019/880.
171
+ Corey Lynch and Pierre Sermanet. Grounding language in play. arXiv preprint arXiv:2005.07648, 2020.
172
+ Kenneth Marino, Abhinav Gupta, Rob Fergus, and Arthur Szlam. Hierarchical rl using an ensemble of proprioceptive periodic policies. ICLR, 2019.
173
+ David Matthews, Sam Kriegman, Collin Cappelle, and Josh Bongard. Word2vec to behavior: morphology facilitates the grounding of language in machines. 2019.
174
+
175
+ Hongyuan Mei, Mohit Bansal, and Matthew R Walter. Listen, attend, and walk: Neural mapping of navigational instructions to action sequences. In Thirtieth AAAI Conference on Artificial Intelligence, 2016.
176
+ Andrew N Meltzoff. Imitation, objects, tools, and the rudiments of language in human ontogeny. Human evolution, 3(1-2):45-64, 1988.
177
+ Karthik Narasimhan, Regina Barzilay, and Tommi Jaakkola. Grounding language for transfer in deep reinforcement learning. Journal of Artificial Intelligence Research, 63:849-874, 2018.
178
+ Khanh Nguyen, Debadeepta Dey, Chris Brockett, and Bill Dolan. Vision-based navigation with language-based assistance via imitation learning with indirect intervention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 12527-12537, 2019.
179
+ Junhyuk Oh, Satinder Singh, Honglak Lee, and Pushmeet Kohli. Zero-shot task generalization with multi-task deep reinforcement learning. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 2661-2670. JMLR.org, 2017.
180
+ Ronald Parr and Stuart J Russell. Reinforcement learning with hierarchies of machines. In Advances in neural information processing systems, pp. 1043-1049, 1998.
181
+ John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
182
+ Tianmin Shu, Caiming Xiong, and Richard Socher. Hierarchical and interpretable skill acquisition in multi-task reinforcement learning. In International Conference on Learning Representations, 2018.
183
+ Martin Stolle and Doina Precup. Learning options in reinforcement learning. In International Symposium on abstraction, reformulation, and approximation, pp. 212-223. Springer, 2002.
184
+ Richard S Sutton, Doina Precup, and Satinder Singh. Between mdps and semi-mdps: A framework for temporal abstraction in reinforcement learning. Artificial intelligence, 112(1-2):181-211, 1999.
185
+ Stefanie TELlex, Thomas Kollar, Steven Dickerson, Matthew R Walter, Ashis Gopal Banerjee, Seth Teller, and Nicholas Roy. Understanding natural language commands for robotic navigation and mobile manipulation. In Twenty-Fifth AAAI Conference on Artificial Intelligence, 2011.
186
+ Edward C Williams, Nakul Gopalan, Mine Rhee, and Stefanie TELlex. Learning to parse natural language to grounded reward functions with weak supervision. In 2018 IEEE International Conference on Robotics and Automation (ICRA), pp. 1-7. IEEE, 2018.
187
+ Haonan Yu, Haichao Zhang, and Wei Xu. Interactive grounded language acquisition and generalization in a 2d world. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=H1UOm4gA-.
188
+ Victor Zhong, Tim Roktaschel, and Edward Grefenstette. Rtfm: Generalising to new environment dynamics via reading. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=SJgob6NKvH.
189
+
190
+ # A DATASET
191
+
192
+ # A.1 COLLECTION PROCESS
193
+
194
+ In this section, we provide additional details for our data collection process on Amazon Mechanical Turk (AMT). Firstly, we filter workers by the following criteria: a) HIT Approval % of greater than 95, b) Location is in an English speaking country, c) soft block has not been granted. These criteria help to ensure the quality of collected data, particularly in terms of natural language.
195
+
196
+ ![](images/a0e36752b224b6caa3da5327ad4366461e85bdbfd0e7bdb1d5f39e98030f00a1.jpg)
197
+ Figure 7: Workflow to collect human demonstrations for our dataset.
198
+
199
+ We collected the dataset over the course of a few weeks. For each HIT, we paid the Turker $0.65. On average the task took about 3-4 minutes. For each HIT, we generate a unique entrance code that is provided to the Turker on the AMT website. The Turker is also provided with a unique exit code once the HIT is complete. Given the entrance and exit code we are able to pay workers accordingly for their demonstrations.
200
+
201
+ Workers were provided with an entrance code at the beginning of the task to enter the website and an exit code when they completed the task to be able to submit the HIT. This enforces that we do not have workers doing extra HITs that we are unable to pay for and to ensure that worker who submit HITs have indeed completed our task. Then we also wrote a parsing script to be able to quickly verify all submitted HITs to the task before payment. Specifically, we used this script to manually review the list of instructions generated for each task to ensure that the instructions provided were indeed pertinent to the task at hand.
202
+
203
+ We had many returning workers who completed our task. A few even wrote emails to our requesting email to let us know that they really enjoyed the HIT and thought it was quite interesting to work on. This suggests that collecting demonstrations of this kind is relatively interesting for humans to provide.
204
+
205
+ # A.2 HIT INSTRUCTIONS
206
+
207
+ Prior to starting the task, each worker was provided with this instructions page which gives an analogy for cooking stir-fry as an analogy for the types of instructions that we believe they would provide. The demo page is shown in Figure 8.
208
+
209
+ Invoking users to provide a particular level of specificity was difficult to convey without explicitly providing example instructions. We deliberately chose not to provide examples as to not prime the worker to a particular format to follow. New workers were given two short games to complete to familiarize themselves with the environment. Returning workers were given one longer game to complete as they already had experience with the task. Workers who completed the task as previously described were fully compensated.
210
+
211
+ Originally, some workers provided not enough instructions, meaning that they wanted to finish the task as quickly as possible, and other workers provided instructions that were too granular, meaning that they did not abstract the task into sub-tasks and rather wrote "press left" or "go up 1" as their instruction. Prior to approving the HIT, checked prior to the worker submitting the HIT and built in precautions so that they had to redo a level if they did not comply with such instructions clearly delineated in the demo.
212
+
213
+ # Demo
214
+
215
+ Welcome to the HIT! Read instructions carefully. If you do not follow the instructions below, we reserve the right to not pay for the HIT.
216
+
217
+ Below is a snippet of a demo from Wikihow of the type of annotation we will be looking for, but in our game setting.
218
+
219
+ These are their steps to make vegetable stir-fry.
220
+
221
+ Notice how the tutorial gives you a one sentence description of each high level step, but it is not too specific about each action.
222
+
223
+ 1. Select vegetables to use.
224
+
225
+ ![](images/875ad7fde3db90898d71b1e3913598ffb044c89edd1e783796d1ec518ee8cd41.jpg)
226
+
227
+ 2. Wash and dry the vegetables...
228
+
229
+ ![](images/012cddfb6b7d460cd18b8ca61f3c3d10ef914afcf6560b5f72050696c5242dac.jpg)
230
+
231
+ 3. Slice the vegetables into thin pieces.
232
+
233
+ ![](images/e8e69f5e0acdcc545caed64c87eaea2633a09b116b9f9d51dfe27a4cce24245d.jpg)
234
+
235
+ Credits to https://m.wikihow.com/Stir-Fry-Vegetables.
236
+
237
+ Similarly, in our task, you will be given a goal for the agent to accomplish.
238
+
239
+ ![](images/044f6b4322283ef8000e4fe9bb5157bf4b0ff7757a150d8236fc54ccf76f1e94.jpg)
240
+
241
+ And we want you to write the instruction steps to achieve this goal.
242
+
243
+ 1. Keep it high level and break the task down, just like Wikihow.
244
+ 2. Do not write just the specific action (e.g. up, down, left, craft, mine, etc) or something like "press up" or "press left 2 times" which are not meaningful steps as your instruction.
245
+ 3. Since we want high level steps, this means you will execute more than 1 action per instruction.
246
+
247
+ ![](images/a29dfa313ea3ee3ca4b369a0a9890ebb94e6cde704ccf4c39bc36e11049c7d00.jpg)
248
+
249
+ Then, demonstrate how to do this step by executing the step using the UP, LEFT, DOWN, RIGHT (or arrow keys).
250
+
251
+ Use the other buttons (or keys denoted in the parentheses), explained below to complete the task.
252
+
253
+ ![](images/6137288e00e61d9cdbbe02af39d0ec7f3c1a0cda5709ceac34f85b37767532be.jpg)
254
+ Figure 8: Demo instructions for AMT workers.
255
+
256
+ After you have completed the first instruction, press the "DONE" button (or hit enter).
257
+
258
+ ![](images/1924ae6563336b0e0d13605c2a8dea6ef42d2039ac61c6d4ceb07647550bf499.jpg)
259
+
260
+ Then return to write the next instruction, then execute, and do this step-by-step until goal is completed.
261
+
262
+ ![](images/08314b79893dd44c95a1860a41d37e9922d007a9f21220181532f23c91b63998.jpg)
263
+
264
+ By the end of this game, you should have typed MULTIPLE instructions, each followed by a sequence of button/ key presses.
265
+
266
+ You should be writing meaningful high level instructions which are not "up", "press up", or "go left 2 times".
267
+
268
+ If you do not follow these instructions, you will not be paid.
269
+
270
+ # A.3 ADDITIONAL ENVIRONMENT AND CRAFTING TASK DETAILS
271
+
272
+ Figure 9 gives an example of the type of task (Make Iron Ore) that would be presented to the worker, which includes the goal, recipes, and the current board. Table 3 shows how the tasks are related in terms of sub-tasks. This might be because of a similar material (i.e. Iron) or a similar craft (i.e. Stairs).
273
+
274
+ GOAL:
275
+
276
+ Make Iron Ore (Iron Ore=1)
277
+
278
+ RECIPES:
279
+
280
+ ![](images/e8306904a92fd5b27370bde02d2f228a355814ba9be167d8935e4b9fb5ebb2ce.jpg)
281
+ Ore Vein
282
+
283
+ ![](images/610d86987b9fc7eac81200c8ef3d330b7eb0812b569adc6dee773e043b45c10e.jpg)
284
+ Pickaxe
285
+
286
+ ![](images/e7ec9be7496475b4896a5c8b3798f23705785a7e960f051259b5254d4434d676.jpg)
287
+
288
+ ![](images/2f32dc912be2e65b691d4307aa769ddaac89b498a6b1965c174b2029076b6121.jpg)
289
+ Iron Ore
290
+
291
+ ![](images/ace0580c51a466955a62ac3a00da778d896240851040f5876b1df17d0234a6ef.jpg)
292
+ Figure 9: Example board and goal configuration where the goal is to make an iron ore. The worker uses the recipes provided to give appropriate instructions and execute accordingly.
293
+
294
+ Table 3: List of recipes for which we have collected annotations, labeled by the number of steps needed to complete it and other recipes which may share sub-tasks of underlying structure.
295
+
296
+ <table><tr><td>ID</td><td>Recipe Name</td><td>Steps</td><td>Related Crafts by ID</td></tr><tr><td>1</td><td>Gold Ore</td><td>1</td><td>2</td></tr><tr><td>2</td><td>Iron Ore</td><td>1</td><td>1,8</td></tr><tr><td>3</td><td>Diamond Boots</td><td>2</td><td>12,14</td></tr><tr><td>4</td><td>Brick Stairs</td><td>2</td><td>5,7</td></tr><tr><td>5</td><td>Cobblestone Stairs</td><td>2</td><td>4,7,13</td></tr><tr><td>6</td><td>Wooden Door</td><td>3</td><td>7</td></tr><tr><td>7</td><td>Wood Stairs</td><td>3</td><td>4,5,6</td></tr><tr><td>8</td><td>Iron Ingot</td><td>3</td><td>2</td></tr><tr><td>9</td><td>Leather Leggings</td><td>3</td><td>10,11,12</td></tr><tr><td>10</td><td>Leather Chestplate</td><td>3</td><td>9,11,12</td></tr><tr><td>11</td><td>Leather Helmet</td><td>3</td><td>9,10,12</td></tr><tr><td>12</td><td>Leather Boots</td><td>3</td><td>3,9,10,11</td></tr><tr><td>13</td><td>Stone Pickaxe</td><td>5</td><td>5,14</td></tr><tr><td>14</td><td>Diamond Pickaxe</td><td>5</td><td>3,13</td></tr></table>
297
+
298
+ ![](images/b6faff0c2c639eb99b1237678a0eb3bc36ded58807b384e62d7612238cde3ea2.jpg)
299
+ Figure 10: A more in-depth example of 3 out of the 14 training tasks to show how the subtasks are related (red boxes = final craft, blue boxes = raw material).
300
+
301
+ ![](images/eb75e244be4270a0a6c02bf36c2077fe9a0aaa92ea2a1945eb390580721117a1.jpg)
302
+
303
+ ![](images/055e84bce56fb1abdd9b796f465264529f8cb09733a4690295653f25d1a6134c.jpg)
304
+
305
+ Table 4: Summary statistics for tasks of varying difficulty.
306
+
307
+ <table><tr><td>Steps</td><td>Average # of Instructions</td><td>Average # of Actions</td></tr><tr><td>1-step</td><td>3.7</td><td>15.4</td></tr><tr><td>2-step</td><td>4.9</td><td>21.5</td></tr><tr><td>3-step</td><td>6.1</td><td>27.6</td></tr><tr><td>5-step</td><td>8.8</td><td>40.1</td></tr></table>
308
+
309
+ # A.4 EXAMPLE DATA COLLECTED
310
+
311
+ Table 5 gives examples of instructions randomly sampled from our dataset. Even with a limited number of crafts, we were able to collect language instructions with diversity in sentence construction. There are inconsistencies in capitalization and spelling which we handled in the preprocessing of the data. Table 6 shows the most frequently used instructions. Figure 11 gives summary statistics of the instruction side of the dataset.
312
+
313
+ ![](images/cb38ba98165aa1b01eaf94ab244c41ba420bbb93ff815261d2dd4872b5182240.jpg)
314
+ Figure 11: (Left) Instruction frequency (Middle) Word frequency (Right) Histogram of instruction lengths.
315
+
316
+ ![](images/7dc872b53a8585d0306ea531d15f9606cc3133ac1cfb8b4583fa084155f7f528.jpg)
317
+
318
+ ![](images/2e0ae962d10c5ddfc0073cc4854be861618e1cb0b9fe5fe7f94c884fc608315e.jpg)
319
+
320
+ Table 5: Examples of randomly sampled instructions.
321
+
322
+ <table><tr><td>Grab the pickaxe.
323
+ Make a stone pickaxe from stick and cobblestone.
324
+ Go to leather boots bench and Craft Leather Boots.
325
+ Move to Tree.
326
+ Craft at the leather helmet bench
327
+ Make a leather chestplate from the leather.
328
+ Unlock the door.
329
+ Move up two squares and grab the key.
330
+ Go to switich and open door with Toggle Switch.
331
+ Chop down the tree to get its wood.
332
+ Go to the stone pickaxe crafting bench.
333
+ Mine diamond ore.
334
+ Go to stock and click Mine to harvest Cobblestone.
335
+ Toggle the switch to open the door.
336
+ Grab the pickaxe.
337
+ Go back through door and mine gold ore vein.
338
+ Walk to brick factory and acquire bricks.
339
+ Go to wood bench and Craft Wood Plank.
340
+ Craft Diamond Pickaxe.
341
+ Move to ingot.
342
+ Next step, use axe on tree to get wood.
343
+ Go to iron ore vein and mine for iron ore.
344
+ Unlock the door with the key and enter the room.
345
+ Pick up axe and put into inventory.
346
+ Craft at the stick bench.
347
+ Open the door to your right.
348
+ Mine the cobblestone stash.
349
+ Get eht pickaxe.
350
+ Go to tools and click Grab to take each one.</td><td>Craft diamond axe with its bench, stick, diamond.
351
+ Pick up key using &quot;GRAB&quot;.
352
+ Grab the key; this may be useful.
353
+ Collect Tool and Pass Through Door.
354
+ Go to brick stairs bench and craft.
355
+ Move up to the axe and pick it up.
356
+ Use Mine on Iron Ore Vein to collect Iron.
357
+ Use pickaxe to mine diamond ore vein.
358
+ Move to Stick bench and Craft Stick.
359
+ Craft Wood Plank.
360
+ Harvest wood from tree using &quot;MINE&quot;.
361
+ 2. &quot;Open Door&quot;, then move to Pickax and &quot;grab&quot;.
362
+ Craft brick stairs with its bench and brick.
363
+ Go &quot;Mine&quot; both the iron ore vein and the coal vein.
364
+ Mine the wood.
365
+ Mine Diamond Ore Vein.
366
+ Chop down the tree to get its wood.
367
+ Move to Rabbit.
368
+ Go to tools and click Grab to take each one.
369
+ Grab key.
370
+ Go to stick workbench and press craft.
371
+ Go to TOOL (Pickaxe).
372
+ Go to the stone pickaxe table.
373
+ Move to Wood Plank bench and Craft.
374
+ Mine cobblestone stash, then go to tree.
375
+ Craft Wooden Door.
376
+ Craft diamond boots.
377
+ Use pickaxe to mine iron ore vein.
378
+ Flip the switch.</td></tr></table>
379
+
380
+ # B METHODS DETAILS
381
+
382
+ Given the dataset of human demonstrations, we convert traces of each game into state-action pairs for training. In the subsequent sections are additional details of training parameters for both the supervised IL training and RL fine-tuning. The computing infrastructure for each experiment was on 1 GeForce GTX 1080 Ti GPU. Each experiment took between a few hours to a few days to run.
383
+
384
+ # B.1 DATA PREPROCESSING
385
+
386
+ From the dataset, which had 6,322 game traces, we extracted 195,405 state-action pairs and 35,901 total instructions. This is done by matching an action to the corresponding state within a trace as well as the high-level natural language instruction. Each instruction was edited using a spell checker package to reduce the size of the final vocabulary. In the link above, we provide the cleaned version of the dataset.
387
+
388
+ # B.2 IL PARAMETERS
389
+
390
+ Both language generation and language-conditioned policy networks use Cross Entropy Loss and Adam optimizer (learning rate 0.001). In addition, the language loss also includes the addition of doubly stochastic regularization, based on the attention mechanism, and clipping of the gradient norm to a max norm of 3. We train for 15-20 epochs. The batch size used during supervised training was 64. By evaluating after each epoch on 100 randomly spawned games, we find that performance plateaus after that number of epochs. As in the RL reward, we only consider a game to be complete if the final craft is completed within the given number of steps. We use the entire dataset for training, since validation/testing were performed on randomly generated new games.
391
+
392
+ Table 6: Instruction sorted by usage frequency.
393
+
394
+ <table><tr><td>Grab pickaxe.
395
+ Grab the pickaxe.
396
+ Open the door.
397
+ Open door.
398
+ Grab axe.
399
+ Craft wood plank.
400
+ Toggle the switch.
401
+ Grab the key.
402
+ Mine tree.
403
+ Grab key.
404
+ Craft stick.
405
+ Toggle switch to open door.
406
+ Grab the axe.
407
+ Toggle switch.
408
+ Go to pickaxe.
409
+ Go to wood bench and craft wood plank.
410
+ Grab key to open door.
411
+ Get the pickaxe.
412
+ Go to tools and click grab to take each one.
413
+ Craft leather.
414
+ Go to key grab it go to door and open door.
415
+ Go to switch and open door with toggle switch.
416
+ Go to tree.
417
+ Grab the sword.
418
+ Go to stick bench and craft stick.
419
+ Mine the tree.
420
+ Grab sword.
421
+ Pick up pickaxe using grab.
422
+ Mine rabbit.</td><td>Mine cobblestones stash.
423
+ Pick up the pickaxe.
424
+ Go to switch.
425
+ Go to pickaxe and click grab to take it.
426
+ Go to wood plank and press craft.
427
+ Go to axe.
428
+ Get the key.
429
+ Mine diamond ore vein.
430
+ Mine cobblestones.
431
+ Go to stick and press craft.
432
+ Go to pickaxe and select &quot;grab&quot;.
433
+ Make planks.
434
+ Use key to open door.
435
+ Craft diamond pickaxe.
436
+ Go to pickaxe and press grab.
437
+ Craft stone pickaxe.
438
+ Unlock the door.
439
+ Make sticks.
440
+ Go to stone bench and craft stone pickaxe.
441
+ Mine the diamond ore vein.
442
+ Go to key.
443
+ Go to door.
444
+ Mine cobblestone stash.
445
+ Grab axe to mine tree.
446
+ Pick up axe using grab.
447
+ Get the axe.
448
+ Go to stocks click mine to harvest wood/stone.
449
+ Craft wood plank with its bench and wood.
450
+ Use the switch to open the door.</td></tr></table>
451
+
452
+ Table 7: We compare to some related datasets/environments (Chevalier-Boisvert et al., 2019b; Jiang et al., 2019; Hu et al., 2019; Anderson et al., 2018). We don't report dataset size for an environment that generates synthetic language. Note that $\sim$ means limited evaluation, they demonstrate unseen evaluation on one setting only). Most notably, our work focuses on developing a method that performs well on unseen tasks. We want to clarify that unseen means tasks/environments which the agent has never received supervised reward for. This is not the same as generating a new configuration of a task that the agent received reward for in training.
453
+
454
+ <table><tr><td>Dataset</td><td>Language</td><td>Dataset Size</td><td>Task</td><td>Unseen Tasks</td></tr><tr><td>BabyAI</td><td>Synthetic</td><td>-</td><td>Navigation/object placement</td><td>No</td></tr><tr><td>HAL/CLEVR</td><td>Synthetic</td><td>-</td><td>Object sorting/arrangement</td><td>~</td></tr><tr><td>R2R</td><td>Natural</td><td>10,800 Views</td><td>Vision+language navigation</td><td>Yes</td></tr><tr><td>MiniRTS</td><td>Natural</td><td>5,392 Games</td><td>Real-time strategy game</td><td>No</td></tr><tr><td>Ours</td><td>Natural</td><td>6,322 Games</td><td>Compositional crafting tasks</td><td>Yes</td></tr></table>
455
+
456
+ # B.3 RL PARAMETERS
457
+
458
+ To fine-tune with RL, we first created a gym environment for the Maze game, which at reset time will spawn a new Mazebase game in the backend. In the parameters of the environment, we define the maximum episode steps to be 100. The action space of the environment is Discrete space of size 8 (up, down, left, right, toggle switch, grab, craft, mine) and the observation space is a flat vector that concatenates all state observations. For the PPO algorithm, we use a learning rate of 2.5e4, clip parameter of 0.1, value loss coefficient of 0.5, 8 processes, 128 steps, size 4 mini-batch, linear learning rate decay, 0.01 entropy coefficient, and 100000000 environment steps.
459
+
460
+ # B.4 ARCHITECTURE DETAILS
461
+
462
+ # B.4.1 STATE ENCODING
463
+
464
+ As shown in Figure 12, the relevant information about the state that is encoded includes the $5 \times 5$ grid, inventory, and goal. We have two representations of the $5 \times 5$ grid: one with items relevant for crafting and another with a one-hot representation of non-crafting-related items, such as a door, wall, or key. All crafting-related items on the board, inventory, and goal are embedded using a 300-dimension GloVe embedding, summing the embeddings for multiple word items (i.e. Iron Ore Vein). The intuition for this distinction is that for generalization, crafting items should be associated in terms of compositionality, whereas non-crafting items are standalone.
465
+
466
+ To compute the state encoding, we first passed the two grids, inventory, and goal through separate fully connected layers to reduce to the same dimension and concatenated along the vectors. The final size of the state encoding tensor is (27, 128), where 25 are for the grid, 1 for inventory, and 1 for goal.
467
+
468
+ ![](images/1b624554d1593f350b24eedb99ccfd22d78fb17aae26fe549df1e895c5fcc448.jpg)
469
+ Figure 12: At each time step we encode state relevant observations including the goal, inventory, and grid. This encoding is utilized by both the language generator and the language-conditioned policy. The boxes in green, denote the observations that were encoded using the GloVe embedding.
470
+
471
+ # B.4.2 IL W/ DISCRIMINATIVE LANGUAGE
472
+
473
+ Since Hu et al. (2019) is a closely related work to ours, we wanted to compare our method against theirs. Their method only uses behavioral cloning, which is our IL w/ Generative Language but instead with discriminative language. We modify our high-level language generator to discriminate amongst the most frequent $N = 500$ instructions by adapting code released from Hu et al. (2019) of their LSTM-based language model here). We plugged in our own state encoding instead of theirs, which is anyways tailored to their environment. In summary, the high-level language module is largely the same as our model, both LSTMs, except for the modification of the output layer, which predicts over the set of possible instructions. We similarly extract the hidden state to condition the low-level policy on. The low-level policy is kept the same as our IL w/ Generative Language model for fair comparison. The training parameters are the same as other baselines.
474
+
475
+ # B.4.3 OUR METHOD
476
+
477
+ The state encoder, which is used in the high- and low-level model is covered in the section above. The size of the hidden layer in the language generator LSTM takes input size 128 and has a hidden size of 32. The size of layers in the policy network is 48 and 8, and uses ReLU activations.
478
+
479
+ # B.4.4 STATE RECONSTRUCTION
480
+
481
+ The state reconstruction architecture was instantiated using an autoencoder with 4 hidden layers, taking as input the state encoding tensor. The low-dimensional representation after 2 hidden layers is used as the hidden state for the policy. The autoencoder is trained with MSE loss on the state encoding. This model trained for a total of 25 epochs in the IL phase.
482
+
483
+ # B.4.5 STATE PREDICTION
484
+
485
+ The state prediction architecture largely resembled the language generator. However, we removed the GloVe weight embedding layer. In IL training, the dataset was modified to include the past $T$ states. In RL training, the environment was modified to record the previous $T$ states. If there were $< T$ states in the current trajectory, the same state is used as input and subsequently replaced. The recurrent network is trained with MSE loss on the state encoding. This model trained for a total of 20 epochs in the IL phase.
486
+
487
+ # B.4.6 UNI-MODAL INPUT
488
+
489
+ A baseline we considered, but did not included in the main text, is to evaluate the necessity of the state encoding in multi-modal datasets. In other works, language instructions are sufficient to solve the task without a state encoding or some representation of the current state. This ablation helps verify that the generated instructions are sufficiently high level that they do not provide the agent with all the information necessary to complete the task in addition to the simulator itself. We considered a baseline where the agent only sees language instructions without the state encoding, so the state encoding is used to generate language, but is not provided as additional input to the policy network. This performs poorly and is not able to solve even the simplest 1-step task. We believe this is because a representation of the current state is critical to completing the task completion, and is not captured by the high-level instructions.
490
+
491
+ # C SUPPLEMENTARY RESULTS
492
+
493
+ # C.1 IL RESULTS IN STANDARD SETTING
494
+
495
+ As shown in Table 8, having access to natural language instructions is only marginally beneficial. While the two environments capture different tasks, we found empirically that the model proposed by Hu et al. [14], which is most similar to our IL+Lang baseline, was able to solve their miniRTS environment using a similarly sized dataset to ours, whereas IL+Lang is not sufficient to complete the most difficult tasks in our environment.
496
+
497
+ Table 8: Accuracy of IL (with and without language) evaluated over 100 games with 3 different seeds.
498
+
499
+ <table><tr><td>Steps</td><td>IL no language</td><td>IL Gen. Language</td><td>IL Disc. Language</td></tr><tr><td>1-step</td><td>18.00%± 3.55%</td><td>19.33% ± 1.89%</td><td>20.00% ± 1.35%</td></tr><tr><td>2-step</td><td>0.00%± 0.00%</td><td>9.33% ± 2.05%</td><td>8.33%± 0.98%</td></tr><tr><td>3-step</td><td>0.00%± 0.00%</td><td>4.33% ± 0.47%</td><td>0.00%± 0.00%</td></tr><tr><td>5-step</td><td>0.00%± 0.00%</td><td>0.00%± 0.00%</td><td>0.00%± 0.00%</td></tr></table>
500
+
501
+ # C.2 DEMONSTRATION ONLY AND FEW-SHOT
502
+
503
+ As shown in Table 9, we find low deviation in our multiple variance runs. However, we do observe in some cases such as IL+RL $10\%$ and $100\%$ higher variance because in some trials the model was not able to solve any of the 3-step tasks and in others it was. In the cases of low variance, either the model was able to consistently or not solve the tasks.
504
+
505
+ Table 9: Variance results from Table 3 in the main paper, which presents accuracy.
506
+
507
+ <table><tr><td></td><td colspan="3">IL</td><td colspan="3">IL w/Lang</td><td colspan="3">IL+RL</td><td colspan="3">SP</td><td colspan="3">SR</td><td colspan="3">Ours</td></tr><tr><td>Steps</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td><td>5%</td><td>10%</td><td>100%</td></tr><tr><td>1-step</td><td>2%</td><td>1%</td><td>3%</td><td>5%</td><td>3%</td><td>2%</td><td>2%</td><td>5%</td><td>8%</td><td>3%</td><td>1%</td><td>2%</td><td>4%</td><td>3%</td><td>1%</td><td>1%</td><td>2%</td><td>2%</td></tr><tr><td>2-step</td><td>1%</td><td>1%</td><td>0%</td><td>1%</td><td>1%</td><td>1%</td><td>9%</td><td>10%</td><td>17%</td><td>19%</td><td>9%</td><td>13%</td><td>8%</td><td>40%</td><td>15%</td><td>1%</td><td>3%</td><td>7%</td></tr><tr><td>3-step</td><td>1%</td><td>2%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>1%</td><td>33%</td><td>31%</td><td>14%</td><td>24%</td><td>18%</td><td>0%</td><td>7%</td><td>15%</td><td>4%</td><td>27%</td><td>10%</td></tr></table>
508
+
509
+ # C.3 REWARD ONLY
510
+
511
+ Finally, for completeness, we consider the scenario where the agent receives a reward but no demonstrations. The tasks which we select for this setting are sampled from the unseen tasks list. We
512
+
513
+ choose 3 2-5 step crafts. We evaluate this scenario on our method against other baselines which train using a reward signal. In Table 10, we evaluate on tasks for which we do not have demonstrations and fine-tune a trained model with the reward signal for these tasks. This setting is not very interesting from a generalization perspective, since rewards are a far more expensive resource compared to demonstrations and instructions. We don't include 1-step tasks since that is able to be solved easily by RL alone (see 1-step results in Figure 4). IL and IL w/ Language is not included because this reduces to the zero shot setting.
514
+
515
+ Table 10: Comparison of 2-5 step tasks where only reward is provided to the agent. We believe IL+RL is not able to adapt to these new tasks, given reward only, since it has overfit to the original training tasks. We find that our method outperforms baselines in this setting.
516
+
517
+ <table><tr><td>Steps</td><td>RL</td><td>IL+RL</td><td>Ours</td></tr><tr><td>2-step</td><td>92.00%±0.81%</td><td>0%</td><td>95.33%±0.94%</td></tr><tr><td>3-step</td><td>71.67%±0.47%</td><td>0%</td><td>88.00%±1.41%</td></tr><tr><td>5-step</td><td>0.00%±0.00%</td><td>0%</td><td>65.00%±5.67%</td></tr></table>
518
+
519
+ # C.4 INTERPRETABILITY
520
+
521
+ Table 11: Step-by-step discriminated high-level instructions for seen crafts.
522
+
523
+ <table><tr><td>Goal: Iron Ore
524
+ grab the pickaxe
525
+ mine iron ore
526
+ mine the iron ore vein
527
+ unknown</td><td>Goal: Gold Ore
528
+ unknown
529
+ go to the key
530
+ unknown
531
+ go to gold ore vein and mine</td></tr><tr><td>Goal: Brick Stairs
532
+ grab key and open door
533
+ mine bricks
534
+ unknown</td><td>Goal: Cobblestone Stairs
535
+ take the pickaxe
536
+ go to cobblestone stash and mine
537
+ use pickaxe to mine cobblestone stash
538
+ go to cobblestone stash and mine
539
+ got to stock and click mine to harvest cobblestones
540
+ unknown
541
+ craft cobblestone stairs</td></tr><tr><td>Goal: Diamond Boots
542
+ unknown
543
+ go to pickaxe
544
+ unknown</td><td>Goal: Iron Ore
545
+ toggle switch to open door
546
+ take the pickaxe
547
+ toggle switch to open door
548
+ unknown</td></tr></table>
549
+
550
+ We present more examples of generated language for both seen and unseen tasks (Table 12 and Table 13). The tables show a complete set of instructions for tasks which were successfully completed. We observe that if the task was not complete then the language generator would be stuck on a particular instruction. The language generated for tasks where the model received supervised data is, as expected, more consistent in using the correct language when generating instructions. However, the language generated for tasks which are new to the model also generated instructions which indicated the use of the correct items. We can observe some mistakes such as generating "leather boots" instead of "leather stairs" or generating an unknown token. Particularly for the Gold/Brick Pickaxe examples, the model is able to generate fairly consistent language for a challenging, new 5-step task. Note that in the Gold Pickaxe example, the model was not able to predict the use of item gold so it uses the word stocks. In the Brick Pickaxe example the model correctly inferred the use of brick. These inconsistencies perhaps can be explained by the amount of training examples where brick and gold appeared, with the former being much more frequent than the latter.
551
+
552
+ Natural language allows us to understand why a model fails to complete a task from simply looking at the generated instructions, which can facilitate future model development and refinement. We identify two failure cases: (1) failure of language generation and (2) failure of policy. In (1), we
553
+
554
+ Table 12: Step-by-step generated high-level instructions for seen crafts.
555
+
556
+ <table><tr><td>Goal: Gold Ore
557
+ go to key and press grab.
558
+ go to pickaxe and grab.
559
+ go to gold ore vein and mine.</td><td>Goal: Brick Stairs
560
+ go to pickaxe and press grab.
561
+ go to the brick factor and mine brick.
562
+ go to brick stairs and press craft.</td></tr><tr><td>Goal: Diamond Pickaxe
563
+ go to axe and press grab.
564
+ go to key grab it go to door and open door.
565
+ go to tools and click grab to take each one.
566
+ go to tree and press mine.
567
+ go to stocks click mine to harvest.
568
+ go to tree and mine.
569
+ go to wood plank and press craft.
570
+ go to stick bench and craft stick.</td><td>Goal: Wooden Door
571
+ go to the axe and grab it.
572
+ go to the switch and open door.
573
+ go to the axe and grab it.
574
+ go to the tree.
575
+ go to the tree and press mine.
576
+ go to wood plank and press craft.
577
+ go to wood plank bench and craft wooden door.</td></tr><tr><td>Goal: Leather Helmet
578
+ go to sword and click grab to take it.
579
+ go to key and press grab.
580
+ go to sword and click grab to take it.
581
+ go to rabbit and press mine.
582
+ go to leather and press craft.
583
+ go to leather boots bench and craft leather.</td><td>Goal: Diamond Boots
584
+ go to key and press grab.
585
+ go to pickaxe and press grab.
586
+ go to diamond ore vein and mine.
587
+ go to diamond boots and press craft.
588
+ go to diamond bench and craft diamond boots.</td></tr><tr><td>Goal: Iron Ore
589
+ go to key and press grab.
590
+ go to pickaxe and press grab.
591
+ go to iron ore vein and press mine.</td><td>Goal: Cobblestone Stairs
592
+ go to key and press grab.
593
+ go to pickaxe and press grab.
594
+ go to cobblestone stash and press mine.
595
+ go to cobblestone stairs and press craft.</td></tr><tr><td>Goal: Wood Stairs
596
+ go to axe and press grab.
597
+ go to tree and mine.
598
+ go to wood plank and press craft.
599
+ go to wood stairs and press craft.</td><td>Goal: Leather Chestplate
600
+ go to sword and press grab.
601
+ go to rabbit and mine.
602
+ go to leather and craft.
603
+ go to leather chestplate and craft.</td></tr><tr><td>Goal: Leather Leggings
604
+ go to sword and click grab to take it.
605
+ go to rabbit and press mine.
606
+ go to leather and press craft.
607
+ go to leather bench and craft leather</td><td>Goal: Iron Ingot
608
+ go to key and press grab.
609
+ go to pickaxe and press grab.
610
+ go to iron ore vein and mine.
611
+ go to iron ingot and craft.</td></tr></table>
612
+
613
+ observe occasional instructions generated for unseen tasks which have incorrect crafting materials. For example, "Go to iron ingot and press craft" was generated as an instruction for Gold Ingot. In (2), while the generated language is sensical for the task, the policy failed to execute correctly.
614
+
615
+ Table 13: Step-by-step generated high-level instructions for unseen crafts.
616
+
617
+ <table><tr><td>Goal: Cobblestone Boots
618
+ go to key and press grab.
619
+ go to pickaxe and press grab.
620
+ go to cobblestone stash and mine.
621
+ go to workbench and press craft.</td><td>Goal: Diamond Leggings
622
+ go to pickaxe and press grab.
623
+ go to diamond ore vein and mine.
624
+ go to diamond boots and press craft.</td></tr><tr><td>Goal: Leather Stairs
625
+ go to sword and press grab.
626
+ go to rabbit and mine the rabbit.
627
+ go to leather and press craft.</td><td>Goal: Stone Helmet
628
+ go to pickaxe and press grab.
629
+ go to the cobblestones stash and mine.
630
+ go to the workbench and craft.</td></tr><tr><td>Goal: Diamond Ingot
631
+ go to pickaxe and press grab.
632
+ go to diamond ore vein.
633
+ go to the workbench and craft.</td><td>Goal: Brick Door
634
+ go to pickaxe and press grab.
635
+ go to the brick factory and mine the brick.
636
+ go to the brick stairs and craft.</td></tr><tr><td>Goal: Brick Pickaxe
637
+ go to the pickaxe and grab it
638
+ go to the axe and press grab.
639
+ go to the tree.
640
+ go to the tree and mine.
641
+ go to the brick factory and mine.
642
+ go to the wood plank and craft.
643
+ go to the stick bench and craft stick.
644
+ go to stick and craft.</td><td>Goal: Gold Pickaxe
645
+ go to the pickaxe and press grab.
646
+ go to the axe and grab it.
647
+ go to the tree.
648
+ go to stocks and click mine to harvest &lt;unk&gt;.
649
+ go to the tree and mine the tree.
650
+ go to wood plank and press craft.
651
+ go to stick and press craft.</td></tr><tr><td>Goal: Diamond Stairs
652
+ go to key and press grab.
653
+ go to pickaxe and press grab.
654
+ go to the diamond ore vein and mine.
655
+ go to the bench and craft.</td><td>Goal: Wood Chestplate
656
+ go to key and grab it.
657
+ go to axe and grab it.
658
+ go to the tree.
659
+ go to tree and mine.
660
+ go to wood plank and craft.</td></tr></table>
661
+
662
+ Table 14: Example of instruction and inventory side-by-side for 3 unseen tasks. As in Figure 6 from the main paper, the inventory changes when a subtask, given by the instruction, is completed.
663
+
664
+ <table><tr><td colspan="2">Goal: Leather Door</td></tr><tr><td>Instruction</td><td>Inventory</td></tr><tr><td>go to the sword and grab it</td><td>{&#x27;Sword&#x27;: 1}</td></tr><tr><td>go to the rabbit and mine</td><td>{&#x27;Sword&#x27;: 1, &#x27;Rabbit Hide&#x27;: 1}</td></tr><tr><td>go to the leather and press craft</td><td>{&#x27;Sword&#x27;: 1, &#x27;Rabbit Hide&#x27;: 0, &#x27;Leather&#x27;: 1}</td></tr><tr><td>go to the leather boots bench and craft leather</td><td>{&#x27;Sword&#x27;: 1, &#x27;Rabbit Hide&#x27;: 0, &#x27;Leather&#x27;: 0, &#x27;Leather Door&#x27;: 1}</td></tr></table>
665
+
666
+ <table><tr><td colspan="2">Goal: Stone Boots</td></tr><tr><td>Instruction</td><td>Inventory</td></tr><tr><td>go to key and press grab</td><td>{&#x27;key&#x27;: 1}</td></tr><tr><td>go to pickaxe and press grab</td><td>{&#x27;key&#x27;: 1, &#x27;Pickaxe&#x27;: 1}</td></tr><tr><td>go to the cobblestone stash and mine the &lt;unk&gt;</td><td>{&#x27;key&#x27;: 1, &#x27;Pickaxe&#x27;: 1, &#x27;Cobblestone&#x27;: 1}</td></tr><tr><td>go to the bench and craft</td><td>{&#x27;key&#x27;: 1, &#x27;Pickaxe&#x27;: 1, &#x27;Cobblestone&#x27;: 0, &#x27;Stone Boots&#x27;: 1}</td></tr></table>
667
+
668
+ <table><tr><td colspan="2">Goal: Diamond Stairs</td></tr><tr><td>Instruction</td><td>Inventory</td></tr><tr><td>go to key and press grab</td><td>{&#x27;key&#x27;: 1}</td></tr><tr><td>go to pickaxe and press grab</td><td>{&#x27;key&#x27;: 1, &#x27;Pickaxe&#x27;: 1}</td></tr><tr><td>go to the diamond ore vein</td><td>{&#x27;key&#x27;: 1, &#x27;Pickaxe&#x27;: 1}</td></tr><tr><td>go to diamond ore vein and mine</td><td>{&#x27;key&#x27;: 1, &#x27;Pickaxe&#x27;: 1, &#x27;Diamond&#x27;: 1}</td></tr><tr><td>go to the bench and craft</td><td>{&#x27;key&#x27;: 1, &#x27;Pickaxe&#x27;: 1, &#x27;Diamond&#x27;: 0, &#x27;Diamond Stairs&#x27;: 1}</td></tr></table>
askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e346401a13b901396384fdf729d03e89f02a8109c06806f3fde8783f0a5f7264
3
+ size 1583792
askyourhumansusinghumaninstructionstoimprovegeneralizationinreinforcementlearning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:760d2158e69ef233ffdc93d222134a58b2e14c9d7d2c229acab69b89fcc4c00f
3
+ size 494733
attentionalconstellationnetsforfewshotlearning/8d357406-6e29-47af-b7d2-458a8aaa9f4c_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e72f0dafe3d223eca9c522e8875088c6ea35662a823d42db4d4254d6a54d40d
3
+ size 110791
attentionalconstellationnetsforfewshotlearning/8d357406-6e29-47af-b7d2-458a8aaa9f4c_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9686f7134be3d15e24baa19d03be1d4cc25476c3167ad366c289d5eb5dc48036
3
+ size 132853
attentionalconstellationnetsforfewshotlearning/8d357406-6e29-47af-b7d2-458a8aaa9f4c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0decbce91088f71525fc77ad7c0d9b5e5c8ae8af5236dfda244ee286674cb01c
3
+ size 3961693
attentionalconstellationnetsforfewshotlearning/full.md ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ATTENTIONAL CONSTELLATION NETS FOR FEW-SHOT LEARNING
2
+
3
+ Weijian $\mathbf{X}\mathbf{u}^{*1}$ , Yifan $\mathbf{X}\mathbf{u}^{*1}$ , Huajin Wang $^{*1}$ & Zhuowen $\mathbf{T}\mathbf{u}^{1,2}$
4
+
5
+ University of California San Diego<sup>1</sup>, Amazon Web Services<sup>2</sup>
6
+
7
+ {wex041,yix081,huw011,ztu}@ucsd.edu
8
+
9
+ # ABSTRACT
10
+
11
+ The success of deep convolutional neural networks builds on top of the learning of effective convolution operations, capturing a hierarchy of structured features via filtering, activation, and pooling. However, the explicit structured features, e.g. object parts, are not expressive in the existing CNN frameworks. In this paper, we tackle the few-shot learning problem and make an effort to enhance structured features by expanding CNNs with a constellation model, which performs cell feature clustering and encoding with a dense part representation; the relationships among the cell features are further modeled by an attention mechanism. With the additional constellation branch to increase the awareness of object parts, our method is able to attain the advantages of the CNNs while making the overall internal representations more robust in the few-shot learning setting. Our approach attains a significant improvement over the existing methods in few-shot learning on the CIFAR-FS, FC100, and mini-ImageNet benchmarks.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ Tremendous progress has been made in both the development and the applications of the deep convolutional neural networks (CNNs) (Krizhevsky et al., 2012; Simonyan & Zisserman, 2015; Szegedy et al., 2015; He et al., 2016; Xie et al., 2017). Visualization of the internal CNN structure trained on e.g. ImageNet (Deng et al., 2009) has revealed the increasing level of semantic relevance for the learned convolution kernels/filters to the semantics of the object classes, displaying bar/edge like patterns in the early layers, object parts in the middle layers, and face/object like patterns in the higher layers (Zeiler & Fergus, 2014). In general, we consider the learned convolution kernels being somewhat implicit about the underlying objects since they represent projections/mappings for the input but without the explicit knowledge about the parts in terms of their numbers, distributions, and spatial configurations.
16
+
17
+ On the other hand, there has been a rich history about explicit object representations starting from deformable templates (Yuille et al., 1992), pictorial structure (Felzenszwalb & Huttenlocher, 2005), constellation models (Weber et al., 2000; Fergus et al., 2003; Sudderth et al., 2005; Fei-Fei et al., 2006), and grammar-based model (Zhu & Mumford, 2007). These part-based models (Weber et al., 2000; Felzenszwalb & Huttenlocher, 2005; Fergus et al., 2003; Sudderth et al., 2005; Zhu & Mumford, 2007) share three common properties in the algorithm design: (1) unsupervised learning, (2) explicit clustering to obtain the parts, and (3) modeling to characterize the spatial configuration of the parts. Compared to the CNN architectures, these methods are expressive with explicit part-based representation. They have pointed to a promising direction for object recognition, albeit a lack of strong practice performance on the modern datasets. Another line of object recognition system with the part concept but trained discriminatively includes the discriminative trained part-based model (DPM) (Felzenszwalb et al., 2009) and the spatial pyramid matching method (SPM) (Lazebnik et al., 2006). In the context of deep learning, efforts exist to bring the explicit part representation into deep hierarchical structures (Salakhutdinov et al., 2012).
18
+
19
+ The implicit and explicit feature representations could share mutual benefits, especially in few-shot learning where training data is scarce: CNNs may face difficulty in learning a generalized representation due to lack of sufficient training data, whereas clustering and dictionary learning
20
+
21
+ provide a direct means for data abstraction. In general, end-to-end learning of both the implicit and explicit part-based representations is a viable and valuable means in machine learning. We view convolutional features as an implicit part-based representation since they are learned through back-propagation via filtering processes. On the other hand, an explicit representation can be attained by introducing feature clustering that captures the data abstraction/distribution under a mixture model.
22
+
23
+ In this paper, we develop an end-to-end framework to combine the implicit and explicit part-based representations for the few-shot classification task by seamlessly integrating constellation models with convolution operations. In addition to keeping a standard CNN architecture, we also employ a cell feature clustering module to encode the potential object parts. This procedure is similar to the clustering/codebook learning for appearance in the constellation model (Weber et al., 2000). The cell feature clustering process generates a dense distance map. We further model the relations for the cells using a self-attention mechanism, resembling the spatial configuration design in the constellation model (Weber et al., 2000). Thus, we name our method constellation networks (ConstellationNet). We demonstrate the effectiveness of our approach on standard few-shot benchmarks, including FC100 (Oreshkin et al., 2018), CIFAR-FS (Bertinetto et al., 2018) and mini-ImageNet (Vinyals et al., 2016) by showing a significant improvement over the existing methods. An ablation study also demonstrates the effectiveness of ConstellationNet is not achieved by simply increasing the model complexity using e.g. more convolution channels or deeper and wider convolution layers (WRN-28-10 (Zagoruyko & Komodakis, 2016)) (see ablation study in Table 3 and Figure 2 (e)).
24
+
25
+ # 2 RELATED WORK
26
+
27
+ Few-Shot Learning. Recently, few-shot learning attracts much attention in the deep learning community (Snell et al., 2017; Lee et al., 2019). Current few-shot learning is typically formulated as a meta-learning problem (Finn et al., 2017), in which an effective feature embedding is learned for generalization across novel tasks. We broadly divide the existing few-shot learning approaches into three categories: (1) Gradient-based methods optimize feature embedding with gradient descent during meta-test stage (Finn et al., 2017; Bertinetto et al., 2018; Lee et al., 2019). (2) Metric-based methods learn a fixed optimal embedding with a distance-based prediction rule (Vinyals et al., 2016; Snell et al., 2017). (3) Model-based methods obtains a conditional feature embedding via a weight predictor (Mishra et al., 2017; Munkhdalai et al., 2017). Here we adopt ProtoNet (Snell et al., 2017), a popular metric-based framework, in our approach and boost the generalization ability of the feature embeddings with explicit structured representations from the constellation model. Recently, Tokmakov et al. (2019) proposes a compositional regularization to the image with its attribute annotations, which is different from out unsupervised part-discovery strategy.
28
+
29
+ Part-Based Constellation/Discriminative Models. The constellation model family (Weber et al., 2000; Felzenszwalb & Huttenlocher, 2005; Fergus et al., 2003; Sudderth et al., 2005; Fei-Fei et al., 2006; Zhu & Mumford, 2007) is mostly generative/expressive that shares two commonalities in the representation: (1) clustering/codebook learning in the appearance and (2) modeling of the spatial configurations. The key difference among these approaches lies in how the spatial configuration is modeled: Gaussian distributions (Weber et al., 2000); pictorial structure (Felzenszwalb & Huttenlocher, 2005); joint shape model (Fergus et al., 2003); hierarchical graphical model (Sudderth et al., 2005); grammar-based (Zhu & Mumford, 2007). These constellation models represent a promising direction for object recognition but are not practical competitive compared with deep learning based approaches. There are also discriminative models: The discriminatively trained part-based model (DPM) (Felzenszwalb et al., 2009) is a typical method in this vein where object parts (as HOG features (Dalal & Triggs, 2005)) and their configurations (a star model) are learned jointly in a discriminative way. The spatial pyramid matching method (SPM) (Lazebnik et al., 2006) has no explicit parts but instead builds on top of different levels of grids with codebook learned on top of the SIFT features (Lowe, 2004). DPM and SPM are of practical significance for object detection and recognition. In our approach, we implement the constellation model with cell feature clustering and attention-based cell relation modeling to demonstrate the appearance learning and spatial configuration respectively.
30
+
31
+ Parts models are extensively studied in fine-grained image classifications and object detection to provide spatial guidance for filtering uninformative object proposals (Simon & Rodner, 2015; Peng et al., 2017; Zhu et al., 2017; Ge et al., 2019; Qi et al., 2019). Related to our work, Neural Activation Constellations (NAC) (Simon & Rodner, 2015) introduces the constellation model to perform unsupervised part model discovery with convolutional networks. Our work is different from NAC in three aspects: (1) The algorithmic mechanisms behind Simon & Rodner (2015) and ours are
32
+
33
+ ![](images/bb50089c3fb3cd1f0bfbacd530f2282b2e6453281e12bb124769864c57a1a3f4.jpg)
34
+ Figure 1: Illustration of our ConstellationNet pipeline where the bottom part is the network architecture based on Conv-4 backbone, and the top part shows the constellation model. Our proposed ConstellationNet consists of "Constell." modules that perform explicit cell feature clustering with self-attention for joint relation modeling.
35
+
36
+ different. Simon & Rodner (2015) implements a traditional Gaussian-based constellation module to model the spatial configuration and part selection on top of a fixed pre-trained CNN. However, in our ConstellationNet, our part representation and spatial configuration are modeled by cell feature clustering and self-attention based cell relation module, which is general-purpose, modularized and recursive. (2) In Simon & Rodner (2015), the constellation module is optimized in an EM-like algorithm, which is separate from the CNN optimization. Our constellation modules are seamlessly integrated into the current CNNs and jointly optimized with them. (3) Our ConstellationNet uses the dense cell features from the CNN feature maps, which considers all positions from the images as potential parts and models their relation. However, (Simon et al. 2015) extracts sparse part representations (i.e. it uses at most one part proposal per channel and selects even less parts later), which may not fully utilize the rich information from the CNN feature maps.
37
+
38
+ # 3 FEW-SHOT LEARNING
39
+
40
+ In a standard classification problem, we aim to learn a model trained on the dataset $\mathcal{D}^{\mathrm{base}}$ that can generalize its classification ability to unseen test set $\mathcal{D}^{\mathrm{novel}}$ belonging to same categories. In few-shot classification problem, we encourage $\mathcal{D}^{\mathrm{base}}$ and $\mathcal{D}^{\mathrm{novel}}$ to be formed from different categories to emphasize model's generalization ability on novel categories, where we denote training categories as $\mathcal{C}_{\mathrm{base}}$ , test categories as $\mathcal{C}_{\mathrm{novel}}$ , and $\mathcal{C}_{\mathrm{base}} \cap \mathcal{C}_{\mathrm{novel}} = \emptyset$ to ensure the fairness.
41
+
42
+ In the training stage (a.k.a. meta-train stage), metric-based few-shot learning approaches (Snell et al., 2017; Vinyals et al., 2016; Oreshkin et al., 2018) usually learn a feature extractor $\phi(\mathbf{x})$ on the dataset $\mathcal{D}^{\mathrm{base}}$ to obtain generic feature embedding by optimizing the loss $\mathcal{L}(\phi)$ :
43
+
44
+ $$
45
+ \mathcal {L} (\phi) = \mathbb {E} _ {\left\{\left(\mathbf {x}, y\right) \right\} \sim \mathcal {D} _ {\text {b a s e}}} \ell \left(\left\{\left(\phi (\mathbf {x}), y\right) \right\}\right) \tag {1}
46
+ $$
47
+
48
+ where $\{(\mathbf{x},y)\}$ is a sampled mini-batch of data points and $\ell (\cdot)$ is usually an episodic few-shot loss (Vinyals et al., 2016) or a standard cross-entropy loss (Chen et al., 2020).
49
+
50
+ In the inference stage (a.k.a. meta-test stage), a typical few-shot benchmark evaluates the model on $K$ -way, $N$ -shot classification tasks $\mathcal{T}$ drawn from $\mathcal{D}^{\mathrm{novel}}$ , where each task has a support set and a query set, i.e. $\mathcal{T} = (\mathcal{T}^{\mathrm{supp}},\mathcal{T}^{\mathrm{query}})$ . The support set $\mathcal{T}^{\mathrm{supp}}$ contains $K$ classes and each class has $N$ images (e.g. $K = 5$ , $N\in \{1,5\}$ ). Following Snell et al. (2017), the prediction $\hat{y}^\prime$ of a query image $\mathbf{x}'\in \mathcal{T}^{\mathrm{query}}$ is given by the label of nearest prototype $\mathbf{c}_k$ from $\mathcal{T}^{\mathrm{supp}}$ under a cosine similarity $d(\cdot ,\cdot)$ :
51
+
52
+ $$
53
+ \hat {y} ^ {\prime} = \arg \max _ {k} d \left(\phi \left(\mathbf {x} ^ {\prime}\right), \mathbf {c} _ {k}\right), \quad \mathbf {c} _ {k} = \frac {1}{N} \sum_ {(\mathbf {x}, y) \in \mathcal {T} ^ {\text {s u p p}}, y = k} \phi (\mathbf {x}). \tag {2}
54
+ $$
55
+
56
+ An extended description of the few-shot learning framework can be found from Appendix A.1. The generalization ability of the feature extractor $\phi (\mathbf{x})$ is improved in terms of training scheme (e.g.
57
+
58
+ episodic learning (Vinyals et al., 2016)), network design (e.g. task condition (Oreshkin et al., 2018)) or objective function (e.g. learnable distance (Sung et al., 2018)). In our method, we propose a novel network design by inserting constellation models into CNNs and strengthen the intermediate features.
59
+
60
+ # 4 CONSTELLATION MODEL
61
+
62
+ The concept of constellation has been introduced to the few-shot learning scenario in early years (Fei-Fei et al., 2006), in which the appearance and the shape are independently learned in a mixture model. In our work, we revisit the constellation model in an end-to-end learning framework: First, we define the a cell feature as the individual local feature at a position in the feature map (see Figure 1). We then employ cell feature clustering to model the underlying distribution of input cell features, implying a part discovery procedure. We further obtain the distance map of the cell features from clustering and then perform cell relation modeling to build spatial relationships.
63
+
64
+ # 4.1 CELL FEATURE CLUSTERING
65
+
66
+ In convolutional neural networks (CNNs), the convolutional filters are learned to detect the discriminative patterns from low-level to high-level through back-propagation (Zeiler & Fergus, 2014). In fact, the backward signal in the back-propagation is not necessarily needed to obtain a pattern detector. With the feature map in the forward step of the CNN, we are able to cluster the individual features at each location of the feature map (a.k.a. cell features) into multiple centers and employ the cluster centers as filters (Coates & Ng, 2012; Krähenbuhl et al., 2015). Assume we obtain a convolutional feature map $\mathbf{U}$ with batch size $B$ , spatial size $H\times W$ and channels $C$ . We disassemble the feature map $\mathbf{U}\in \mathbb{R}^{B\times H\times W\times C}$ into a cell features set $\mathcal{U} = \{\mathbf{u}_1,\mathbf{u}_2,\dots,\mathbf{u}_n\}$ where $n = BHW$ and $\mathbf{u}_i\in \mathbb{R}^C$ is a cell feature. Naively, we can conduct a $k$ -means algorithm on input cell features $\mathcal{U}$ to solve the clustering objective:
67
+
68
+ $$
69
+ \min \sum_ {i} \sum_ {k} m _ {i k} \| \mathbf {u} _ {i} - \mathbf {v} _ {k} \| _ {2} ^ {2} \quad \text {s . t .} \quad m _ {i k} \in \{0, 1 \}, \quad \sum_ {k} m _ {i k} = 1 \tag {3}
70
+ $$
71
+
72
+ where $\mathcal{V} = \{\mathbf{v}_1, \mathbf{v}_2, \dots, \mathbf{v}_K\}$ is a set of cluster centers and $m_{ik}$ indicates if the input cell feature $\mathbf{u}_i$ is assigned to cluster center $\mathbf{v}_k$ . The clustering-based filters $\mathcal{V}$ can model the underlying cell feature distributions and capture the most frequent features, which can be explicitly interpreted as meaningful part patterns/part types. The hard assignment map $\mathbf{m}_i = (m_{i1}, m_{i2}, \dots, m_{iK})$ of input cell feature $\mathbf{u}_i$ onto the cluster centers can be used as a part-based representation, providing alternative information to the next layer in the CNN.
73
+
74
+ However, there are two issues remaining unsolved in the naive design: Firstly, CNNs are typically optimized in a stochastic gradient descent (SGD) manner. Thus, in each forward step, only a minibatch of images are proceeded to provide cell features, which implies that the cluster centers cannot extract the global feature distribution across the whole dataset. Secondly, the hard assignment map has limited information due to its discrete representation. Therefore, inspired by Sculley (2010), we design a mini-batch soft $k$ -means algorithm to cluster the cell features approximately:
75
+
76
+ - Initialization. Randomly initialize global cluster centers $\mathcal{V} = \{\mathbf{v}_1, \mathbf{v}_2, \dots, \mathbf{v}_K\}$ and a counter $\mathbf{s} = (s_1, s_2, \dots, s_K) = \mathbf{0}$ .
77
+ - Cluster Assignment. In forward step, given input cell features $\mathcal{U} = \{\mathbf{u}_1,\mathbf{u}_2,\dots,\mathbf{u}_n\}$ , we compute the distance vector $\mathbf{d}_i = (d_{i1},d_{i2},\ldots d_{iK})$ between input cell feature $\mathbf{u}_i$ and all cluster centers $\nu$ . We then compute the soft assignment $m_{ik}\in \mathbb{R}$ and generate the current mini-batch centers $\mathbf{v}_k^{\prime}$ :
78
+
79
+ $$
80
+ d _ {i k} = \left\| \mathbf {u} _ {i} - \mathbf {v} _ {k} \right\| _ {2} ^ {2}, \quad m _ {i k} = \frac {e ^ {- \beta d _ {i k}}}{\sum_ {j} e ^ {- \beta d _ {i j}}}, \quad \mathbf {v} _ {k} ^ {\prime} = \frac {\sum_ {i} m _ {i k} \mathbf {u} _ {i}}{\sum_ {i} m _ {i k}} \tag {4}
81
+ $$
82
+
83
+ where $\beta > 0$ is an inverse temperature.
84
+
85
+ - Centroid Movement. We formulate a count update $\Delta \mathbf{s} = \sum_{i}\mathbf{m}_{i}$ by summing all assignment maps $\mathbf{m}_i = (m_{i1},m_{i2},\dots m_{iK})$ . The current mini-batch centers $\mathbf{v}_k^{\prime}$ are then updated to the global centers $\mathbf{v}_k$ with a momentum coefficient $\eta$ :
86
+
87
+ $$
88
+ \mathbf {v} _ {k} \leftarrow (1 - \eta) \mathbf {v} _ {k} + \eta \mathbf {v} _ {k} ^ {\prime}, \quad \eta = \frac {\lambda}{s _ {k} + \Delta s _ {k}} \tag {5}
89
+ $$
90
+
91
+ - Counter Update. Counter s is updated and distance vectors $\{\mathbf{d}_i\}$ are reshaped and returned:
92
+
93
+ $$
94
+ \mathbf {s} \leftarrow \mathbf {s} + \Delta \mathbf {s} \tag {6}
95
+ $$
96
+
97
+ With gradually updating global cluster centers, the above algorithm is able to address the issue of limited data in a mini-batch. In addition, we reshape the distance vectors $\{\mathbf{d}_i\}$ of all input cell features to a distance map $\mathbf{D} \in \mathbb{R}^{B \times H \times W \times K}$ . Each distance vector $\mathbf{d}_i$ can be seen as a learned cell code in codebook (dictionary) learning, which encodes a soft assignment of the visual word (i.e. cell feature) onto the codewords (i.e. cluster centers) and implies a part representation. The distance map $\mathbf{D}$ then can be viewed as a cell code map that represents a spatial distribution of identified parts, which is passed to following layers. Empirically, it is observed that when $\mathbf{u}_i$ and $\mathbf{v}_k$ are $L_2$ normalized, the training procedure is more stable and the Euclidean distance $d_{ik}$ is equivalent to a cosine similarity up to an affine transformation. Details of the cell feature clustering can be found in Appendix A.9.
98
+
99
+ # 4.2 CELL RELATION AND SPATIAL CONFIGURATION MODELING
100
+
101
+ Before the deep learning era, traditional constellation models (Fei-Fei et al., 2006) decompose visual information into appearance and shape representation. The appearance of different parts in the image is treated independently while the shape of parts is assumed to have spatial connections. In our constellation model, we establish the spatial relationship among the individual part-based representations at a different location from the distance map as well. Specifically, we apply the self-attention mechanism (Vaswani et al., 2017) to build the spatial relationship and enhance the representation instead of using probabilistic graphical models in prior work (Fei-Fei et al., 2006).
102
+
103
+ In cell relation modeling, we add a positional encoding $\mathbf{P} \in \mathbb{R}^{B \times H \times W \times C}$ following Carion et al. (2020) for spatial locations to the distance map $\mathbf{D}$ and obtain the input feature map $\mathbf{F}_{\mathrm{I}}$ for query and key layers. For value layer, we directly flatten the distance map $\mathbf{D}$ to another input feature map $\mathbf{F}_{\mathrm{I}}'$ :
104
+
105
+ $$
106
+ \mathbf {F} _ {\mathrm {I}} = \text {S p a t i a l F l a t t e n} (\mathbf {D} + \mathbf {P}) \in \mathbb {R} ^ {B \times H W \times K}, \quad \mathbf {F} _ {\mathrm {I}} ^ {\prime} = \text {S p a t i a l F l a t t e n} (\mathbf {D}) \in \mathbb {R} ^ {B \times H W \times K} \tag {7}
107
+ $$
108
+
109
+ The input feature maps $\mathbf{F}_I, \mathbf{F}_I'$ are transformed into query, key and value $\{\mathbf{F}^q, \mathbf{F}^k, \mathbf{F}^v\} \subset \mathbb{R}^{B \times HW \times K}$ by three linear layers $\{\mathbf{W}^q, \mathbf{W}^k, \mathbf{W}^v\} \subset \mathbb{R}^{K \times K}$ and further computes the output feature $\mathbf{F}_A$ :
110
+
111
+ $$
112
+ \left[ \mathbf {F} ^ {q}, \mathbf {F} ^ {k}, \mathbf {F} ^ {v} \right] = \left[ \mathbf {F} _ {\mathrm {I}} \mathbf {W} ^ {q}, \mathbf {F} _ {\mathrm {I}} \mathbf {W} ^ {k}, \mathbf {F} _ {\mathrm {I}} ^ {\prime} \mathbf {W} ^ {v} \right] \tag {8}
113
+ $$
114
+
115
+ $$
116
+ \mathbf {F} _ {\mathrm {A}} = \operatorname {A t t} \left(\mathbf {F} ^ {q}, \mathbf {F} ^ {k}, \mathbf {F} ^ {v}\right) = \operatorname {s o f t m a x} \left(\frac {\mathbf {F} ^ {q} \left(\mathbf {F} ^ {k}\right) ^ {\top}}{\sqrt {K}}\right) \mathbf {F} ^ {v} \tag {9}
117
+ $$
118
+
119
+ The softmax of dot product between query and key matrix $\mathbf{F}^q (\mathbf{F}^k)^\top \in \mathbb{R}^{B\times HW\times HW}$ calculates the similarity scores in the embedding space among features across the spatial dimension. This encodes the spatial relationships of input features and leads to an enhanced output feature representation $\mathbf{F}_{\mathrm{A}}$ . Besides, $\sqrt{K}$ in the denominator is to stabilize the gradient. In practice, we adopt a multi-head attention to model the feature relation in the embedding subspaces:
120
+
121
+ $$
122
+ \mathbf {F} _ {\mathrm {M H A}} = \operatorname {M u l t i H e a d A t t} \left(\mathbf {F} ^ {q}, \mathbf {F} ^ {k}, \mathbf {F} ^ {v}\right) = \left[ \mathbf {F} _ {1}, \dots , \mathbf {F} _ {J} \right] \mathbf {W}, \quad \mathbf {F} _ {j} = \operatorname {A t t} \left(\mathbf {F} _ {j} ^ {q}, \mathbf {F} _ {j} ^ {k}, \mathbf {F} _ {j} ^ {v}\right) \tag {10}
123
+ $$
124
+
125
+ In a $J$ -head attention, the aforementioned similarity scores in the $K' = \frac{K}{J}$ dimensional embedding subspace are calculated using the query, key and value from $j$ -th head, i.e. $\{\mathbf{F}_j^q, \mathbf{F}_j^k, \mathbf{F}_j^v\} \subset \mathbb{R}^{B \times HW \times K'}$ . The output features $\mathbf{F}_j$ of each head are computed following Eq. 9. All the output features $\{\mathbf{F}_1, \dots, \mathbf{F}_J\}$ are concatenated back into $K$ dimension embedding and further processed with a linear layer $\mathbf{W} \in \mathbb{R}^{K \times K}$ to generate multi-head output features $\mathbf{F}_{\mathrm{MHA}}$ . Such multi-head attention settings could provide more diverse feature relation without introducing extra parameters.
126
+
127
+ # 4.3 INTEGRATE CONSTELLATION MODEL WITH CNNS
128
+
129
+ Our constellation model has the capability to capture explicit structured features and encodes spatial relations among the cell features. The output features yield informative visual cues which are able to strengthen the convolutional features. Thus, as shown in Figure 1, we place the constellation model after the convolution operation to extract its unique explicit features and concatenate them with the original convolutional feature map. A following $1 \times 1$ convolutional layer is used on the concatenated features to restore the channels of convolutional feature map. In Table 3, we provide evidence that merging features from constellation model to the CNN backbone can significantly improve the representation ability. In contrast, increasing channels in CNNs alone to double the parameters (second row in Table 3) can only improve the performance marginally. Optionally, we found it is useful to adopt auxiliary loss when training the constellation model in deeper networks (e.g. ResNet-12). On top of each constellation model, we conduct a standard classification to acquire additional regularization.
130
+
131
+ # 4.4 WHY CLUSTERING AND SELF-ATTENTION (CLUSTERING MAP + POSITIONAL ENCODING)?
132
+
133
+ As described in Section 1 and 2, classical constellation models (Fergus et al., 2003; Felzenszwalb & Huttenlocher, 2005) extract parts with their spatial relationships; they are expressive but do not produce competitive results on modern image benchmarks. CNN models (Krizhevsky et al., 2012; He et al., 2016) attain remarkable results on large-scale image benchmarks (Deng et al., 2009) but they are limited when training data is scarce. We take the inspiration from the traditional constellation models, but with a realization that overcomes their previous modeling limitations.
134
+
135
+ The main contribution of our work is a constellation module/block that performs cell-wise clustering, followed by self-attention on the clustering distance map + positional encoding. This separates our work from previous attempts, e.g. non-local block work (Wang et al., 2018) in which long-range non-linear averaging is performed on the convolution features (no clustering, nor positional encoding for the spatial configuration). The main properties of our constellation block include: (1) Cell based dense representation as opposed to the sparse part representation in (Weber et al., 2000) to make the cells recursively modeled in the self-attention unit in a modularized and general-purpose way. (2) Clustering to generate the cell code after clustering (codebook learning) that attains abstraction and is not dependent on the CNN feature dimensions. (3) Positional encoding (as in Carion et al. (2020)) for cells to encode the spatial locations. (4) Tokenized representation as expressive parts (code/clustering distance map + positional encoding) for the cells. (5) Self-attention to jointly model the cell code and positional encoding to capture the relationships between the parts together with their spatial configurations.
136
+
137
+ # 5 EXPERIMENT
138
+
139
+ # 5.1 DATASETS
140
+
141
+ We adopt three standard benchmark datasets that are widely used in few-shot learning, CIFAR-FS dataset (Bertinetto et al., 2018), FC100 dataset (Oreshkin et al., 2018), and mini-ImageNet dataset (Vinyals et al., 2016). Details about dataset settings in few-shot learning are in Appendix A.2.
142
+
143
+ # 5.2 NETWORK WITH MULTI-BRANCH
144
+
145
+ We build ConstellationNet on two ProtoNet variants, namely Conv-4 and ResNet-12, which are commonly used in few-shot learning. Details of networks and the optimization are in Appendix.
146
+
147
+ We develop a new technique, Multi-Branch, to optimize standard classification loss and prototypical loss simultaneously. We find the two training schemes, standard classification scheme and prototypical scheme, can be a companion rather than a conflict. Details of these two schemes can be found from Appendix A.1. Different from standard network backbone used in prior works, our embedding $\phi (\mathbf{x})$ is separated into two branches after a shared stem (Y-shape). Details of our multi-branch design are elaborated in A.10. The detailed ablation study is described in Table 3.
148
+
149
+ Feature Augmentation. During the meta-testing stage, we discover that concatenating features before average pooling to the final output can improve classification accuracy. The advantage of this technique is that no additional training and model parameters are introduced.
150
+
151
+ # 5.3 RESULTS ON STANDARD BENCHMARKS
152
+
153
+ Table 1 and 2 summarize the results of the few-shot classification tasks on CIFAR-FS, FC100, and mini-ImageNet, respectively. Our method shows a notable improvement over several strong baselines in various settings. ConstellationNet significantly improves the performance on shallow networks (Conv-4). In Table 2, our model outperforms SIB (Hu et al., 2020) 1-shot by $0.6\%$ and 5-shot by $5.6\%$ . In Table 1, our model outperforms MetaOptNet (Lee et al., 2019) by $5.95\%$ in 1-shot and $6.24\%$ in 5-shot. For deep networks with rich features, the constellation module still contributes to the performance, showing its complementary advantage to convolution. Our ResNet-12 model beats (Lee et al., 2019) 1-shot result by $2.7\%$ on FC100, $3.4\%$ on CIFAR-FS, and $1.72\%$ on mini-ImageNet. The consistent improvement over both shallow and deep networks across all three datasets shows the generality of our method. Our ConstellationNet is orthogonal to the margin loss based methods (Liu et al., 2020; Li et al., 2020), and we also do not use extra cross-modal information (Xing et al., 2019; Li et al., 2020). On the contrary, our model enhances the embedding generalization ability by incorporating its own part-based representation. Additionally, to verify the orthogonality of our method, we adapt the negative margin loss following Liu et al. (2020) to our Conv-4 models in
154
+
155
+ Table 1: Comparison to prior work on mini-ImageNet. Average 5-way classification accuracies (%) on mini-ImageNet meta-test split are reported with $95\%$ confidence intervals. Results of prior works are adopted from Lee et al. (2019) and original papers. $\dagger$ used extra cross-modal information.
156
+
157
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Backbone</td><td colspan="2">mini-ImageNet 5-way</td></tr><tr><td>1-shot</td><td>5-shot</td></tr><tr><td>Meta-Learning LSTM (Ravi &amp; Larochelle, 2016)</td><td>Conv-4</td><td>43.44 ± 0.77</td><td>60.60 ± 0.71</td></tr><tr><td>Matching Networks (Vinyals et al., 2016)</td><td>Conv-4</td><td>43.56 ± 0.84</td><td>55.31 ± 0.73</td></tr><tr><td>Prototypical Networks (Snell et al., 2017)</td><td>Conv-4</td><td>49.42 ± 0.78</td><td>68.20 ± 0.66</td></tr><tr><td>Transductive Prop Nets (Liu et al., 2018)</td><td>Conv-4</td><td>55.51 ± 0.86</td><td>69.86 ± 0.65</td></tr><tr><td>MetaOptNet (Lee et al., 2019)</td><td>Conv-4</td><td>52.87 ± 0.57</td><td>68.76 ± 0.48</td></tr><tr><td>Negative Margin (Liu et al., 2020)</td><td>Conv-4</td><td>52.84 ± 0.76</td><td>70.41 ± 0.66</td></tr><tr><td>ConstellationNet (ours)</td><td>Conv-4</td><td>58.82 ± 0.23</td><td>75.00 ± 0.18</td></tr><tr><td>SNAIL (Mishra et al., 2018)</td><td>ResNet-12</td><td>55.71 ± 0.99</td><td>68.88 ± 0.92</td></tr><tr><td>TADAM (Oreshkin et al., 2018)</td><td>ResNet-12</td><td>58.50 ± 0.30</td><td>76.70 ± 0.30</td></tr><tr><td>TapNet (Yoon et al., 2019)</td><td>ResNet-12</td><td>61.65 ± 0.15</td><td>76.36 ± 0.10</td></tr><tr><td>Variational FSL (Zhang et al., 2019)</td><td>ResNet-12</td><td>61.23 ± 0.26</td><td>77.69 ± 0.17</td></tr><tr><td>MetaOptNet (Lee et al., 2019)</td><td>ResNet-12</td><td>62.64 ± 0.61</td><td>78.63 ± 0.46</td></tr><tr><td>CAN (Hou et al., 2019)</td><td>ResNet-12</td><td>63.85 ± 0.48</td><td>79.44 ± 0.34</td></tr><tr><td>SLA-AG (Lee et al., 2020)</td><td>ResNet-12</td><td>62.93 ± 0.63</td><td>79.63 ± 0.47</td></tr><tr><td>Meta-Baseline (Chen et al., 2020)</td><td>ResNet-12</td><td>63.17 ± 0.23</td><td>79.26 ± 0.17</td></tr><tr><td>AM3 (Xing et al., 2019)†</td><td>ResNet-12</td><td>65.21 ± 0.30</td><td>75.20 ± 0.27</td></tr><tr><td>ProtoNets + TRAML (Li et al., 2020)</td><td>ResNet-12</td><td>60.31 ± 0.48</td><td>77.94 ± 0.57</td></tr><tr><td>AM3 + TRAML (Li et al., 2020)†</td><td>ResNet-12</td><td>67.10 ± 0.52</td><td>79.54 ± 0.60</td></tr><tr><td>Negative Margin (Liu et al., 2020)</td><td>ResNet-12</td><td>63.85 ± 0.81</td><td>81.57 ± 0.56</td></tr><tr><td>ConstellationNet (ours)</td><td>ResNet-12</td><td>64.89 ± 0.23</td><td>79.95 ± 0.17</td></tr></table>
158
+
159
+ Table 2: Comparison to prior work on FC100 and CIFAR-FS. Average 5-way classification accuracies (%) on CIFAR-FS and FC100 meta-test split are reported with $95\%$ confidence intervals. Results of prior works are adopted from Lee et al. (2019) and original papers.
160
+
161
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Backbone</td><td colspan="2">CIFAR-FS 5-way</td><td colspan="2">FC100 5-way</td></tr><tr><td>1-shot</td><td>5-shot</td><td>1-shot</td><td>5-shot</td></tr><tr><td>MAML (Finn et al., 2017)</td><td>Conv-4</td><td>58.9 ± 1.9</td><td>71.5 ± 1.0</td><td>-</td><td>-</td></tr><tr><td>Prototypical Networks (Snell et al., 2017)</td><td>Conv-4</td><td>55.5 ± 0.7</td><td>72.0 ± 0.6</td><td>-</td><td>-</td></tr><tr><td>Relation Networks (Sung et al., 2018)</td><td>Conv-4</td><td>55.0 ± 1.0</td><td>69.3 ± 0.8</td><td>-</td><td>-</td></tr><tr><td>R2D2 (Bertinetto et al., 2018)</td><td>Conv-4</td><td>65.3 ± 0.2</td><td>79.4 ± 0.1</td><td>-</td><td>-</td></tr><tr><td>SIB (Hu et al., 2020)</td><td>Conv-4</td><td>68.7 ± 0.6</td><td>77.1 ± 0.4</td><td>-</td><td>-</td></tr><tr><td>ConstellationNet (ours)</td><td>Conv-4</td><td>69.3 ± 0.3</td><td>82.7 ± 0.2</td><td>-</td><td>-</td></tr><tr><td>Prototypical Networks (Snell et al., 2017)</td><td>ResNet-12</td><td>72.2 ± 0.7</td><td>83.5 ± 0.5</td><td>37.5 ± 0.6</td><td>52.5 ± 0.6</td></tr><tr><td>TADAM (Oreshkin et al., 2018)</td><td>ResNet-12</td><td>-</td><td>-</td><td>40.1 ± 0.4</td><td>56.1 ± 0.4</td></tr><tr><td>MetaOptNet-RR (Lee et al., 2019)</td><td>ResNet-12</td><td>72.6 ± 0.7</td><td>84.3 ± 0.5</td><td>40.5 ± 0.6</td><td>55.3 ± 0.6</td></tr><tr><td>MetaOptNet-SVM (Lee et al., 2019)</td><td>ResNet-12</td><td>72.0 ± 0.7</td><td>84.2 ± 0.5</td><td>41.1 ± 0.6</td><td>55.5 ± 0.6</td></tr><tr><td>ConstellationNet (ours)</td><td>ResNet-12</td><td>75.4 ± 0.2</td><td>86.8 ± 0.2</td><td>43.8 ± 0.2</td><td>59.7 ± 0.2</td></tr></table>
162
+
163
+ Appendix A.8. We observe ConstellationNet with negative margin brings $0.52\%$ improvement to ConstellationNet, and obtains $6.93\%$ gain compared with baseline on mini-ImageNet.
164
+
165
+ # 6 MODEL ANALYSIS
166
+
167
+ # 6.1 ARCHITECTURE ALTERNATIVES
168
+
169
+ In Table 3, we first study the role of each module in ConstellationNet, where the number of parameters is controlled approximately equivalent to the baseline's size. Our constellation model brings $6.41\%$ and $2.59\%$ improvements over baseline on 1-shot Conv-4 and ResNet-12 results. Combined with our multi-branch training procedure, the model further improves additional $1.34\%$ and $1.26\%$ on 1-shot Conv-4 and ResNet-12, respectively. Finally, feature augmentation from penultimate layer to final output embedding brings additional $0.45\%$ and $0.27\%$ improvements on two variants.
170
+
171
+ We also test the baseline model with extra channels in the Table 3. The new model only shows slight improvements over original baseline, and is outperformed by our ConstellationNet with a large margin. We also obtain WRN-28-10 baseline results to validate our improvement. While making ResNet baselines deeper and wider, our ConstellationNet still outperforms this strong baseline. In Figure 2 (e), we further study whether the performance gap between ConstellationNet and baseline can be reduced by simply altering the baseline's model complexity using e.g. more convolution channels. Although the trend of baseline accuracy increases when increasing the model parameter number gradually, the performance gap is still significant. This validates our concept that modeling hierarchical part structures can greatly benefit features learned from convolution operation, and obtain a more robust feature representation. In addition, applying self-attention on the distance map (6-th
172
+
173
+ Table 3: Effectiveness of modules. Average classification accuracies $(\%)$ on mini-ImageNet meta-test split. We compare our ConstellationNet with alternative architectures including the baseline and the modified baseline with extra channels based on Conv-4 and ResNet-12. We also include a baseline with WideResNet-28-10 (Zagoruyko & Komodakis, 2016) backbone for comparison.
174
+
175
+ <table><tr><td rowspan="2">Baseline</td><td rowspan="2">Cell Feature Clustering</td><td rowspan="2">Cell Relation Modeling</td><td rowspan="2">Multi Branch</td><td rowspan="2">Feature Augment</td><td rowspan="2">Extra Channels</td><td rowspan="2">1x1 Convolution</td><td colspan="2">#Params Conv-4</td><td colspan="2">ResNet-12</td></tr><tr><td>Conv-4/Res-12</td><td>1-shot</td><td>5-shot</td><td>1-shot</td></tr><tr><td>✓</td><td></td><td></td><td></td><td></td><td></td><td></td><td>117K/8.0M</td><td>50.62 ± 0.23</td><td>68.40 ± 0.19</td><td>60.77 ± 0.22</td></tr><tr><td>✓</td><td></td><td></td><td></td><td></td><td>✓</td><td></td><td>222K/16M</td><td>51.76 ± 0.22</td><td>69.54 ± 0.18</td><td>61.45 ± 0.22</td></tr><tr><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td>146K/8.3M</td><td>53.34 ± 0.23</td><td>70.61 ± 0.19</td><td>62.24 ± 0.23</td></tr><tr><td>✓</td><td></td><td>✓</td><td></td><td></td><td></td><td></td><td>184K/9.7M</td><td>55.92 ± 0.23</td><td>73.02 ± 0.18</td><td>62.75 ± 0.23</td></tr><tr><td>✓</td><td></td><td>✓</td><td></td><td></td><td></td><td>✓</td><td>192K/8.4M</td><td>55.46 ± 0.23</td><td>72.52 ± 0.18</td><td>61.54 ± 0.24</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td>200K/8.4M</td><td>57.03 ± 0.23</td><td>74.09 ± 0.18</td><td>63.36 ± 0.23</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td>200K/8.4M</td><td>58.37 ± 0.23</td><td>74.52 ± 0.18</td><td>64.62 ± 0.23</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>200K/8.4M</td><td>58.82 ± 0.23</td><td>75.00 ± 0.18</td><td>64.89 ± 0.23</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>WRN</td><td></td><td></td><td>WideResNet-28-10</td></tr><tr><td>✓</td><td></td><td></td><td></td><td></td><td>✓</td><td></td><td>36.5M</td><td></td><td></td><td>61.54 ± 0.25</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>79.41 ± 0.23</td></tr></table>
176
+
177
+ row: $57.03\%$ on Conv-4, 1-shot) achieves better performance than directly applying it to the original cell features (i.e. convolutional feature map) (4-th row: $55.92\%$ on Conv-4, 1-shot). We also tried to replace the cell feature clustering module with a 1x1 convolution layer (output dimension is equal to the number of clusters) (5-th row: $55.46\%$ on Conv-4, 1-shot). It is worse than our results (6-th row) as well. We observe that the 1x1 convolution layer is less expressive than the cell feature clustering module, making it difficult to extract enough context information during cell relation modeling.
178
+
179
+ # 6.2 MODULES ANALYSIS
180
+
181
+ ![](images/ad78f359d2515ef3db9fbb7fdaa038d66646c0d21fad2d86a37992337d223b84.jpg)
182
+ Figure 2: Modules analysis. (a, b, c, d) We study the effectiveness of changing the number of clusters, the number of heads in attention layer, and the layer indices with constellation based on Conv-4, (e) We demonstrate the performance gain of our ConstellationNet is unmatched by increasing the model complexity of our baselines. All experiments are done on mini-ImageNet.
183
+
184
+ ![](images/a47c331855f16a21436777bede62c69e7f744d27bbbee06119c8737972776314.jpg)
185
+
186
+ ![](images/2c80f1fa530e936f15cacb5f16a55bceadcff33915d0644f82519cbbba38fbad.jpg)
187
+
188
+ ![](images/4418c3770817ccda9a81c8d12b2f864043120d53c86e80381610128818c285ff.jpg)
189
+
190
+ ![](images/413ebd7b44b009a6a1ca2b59ec1c0fd2c62351026278ab67c207d68ded79749e.jpg)
191
+
192
+ In Figure 2 (a), we vary the number of clusters adapted in all layers to observe the performance change. We found that increasing the number of clusters improves the accuracy in general, and set clusters to 64 is optimal in terms of both model size and classification performance. Figure 2 (b) shows the number of attention heads does not effect performance as much as the number of cluster, and 8-head attention obtains $1.80\%$ performance gain on the 1-shot setting compared to 1-head attention. In Figure 2 (c, d), we also study the effectiveness of clustering algorithm applied to different layers. The results show both early features and high-level features benefit from introducing clusters algorithm into the original CNN architecture.
193
+
194
+ # 6.3 VISUALIZATION
195
+
196
+ Figure 3 demonstrates the visualization of cluster centers in each layer of Conv-4 model on miniImageNet. In the upper part of the figure, each image shows patches corresponding to the nearest cell features to a cluster center (i.e. with lowest Euclidean distance). It is observed that clusters in early layers (e.g. layer 1,2) represent simple low-level patterns while the clusters in high layers (e.g. layer 3,4) indicate more complex structures and parts. In the lower part of the figure, we choose two cluster centers from layer 4 for further interpretation: The left one with green box could possibly represent legs since it consists of various types of legs from human, dog and other animals. The right one with the red box shows most nearest cell features to this cluster center are parts with bird's head or beetles, which share a dotted structure (i.e. black dots on beetles / eyes on bird's head).
197
+
198
+ The left side of Figure 4 shows the visualization of cell features that are assigned to different clusters. For each image, we extract the assignment maps corresponding to three cluster centers generated in the last constellation module of Conv-4 and find multiple cell features with the highest assignments within each assignment map. The locations of cell features are projected back in the original image space, marked by three different colors of ". . ." in the raw image to show three different feature clusters. For a given class of images, the same cluster centers are selected for comparison across 6 samples. As shown in Figure 4, we observe part information of each class is explicitly discovered. For the bird
199
+
200
+ ![](images/17a7c705c801a7ce3c15407448e561d7ccf06a5f6c89f701ab3497e452b78705.jpg)
201
+ Layer 1
202
+
203
+ ![](images/9106dec65b4720ea7f50649685dd7478780bacbfe93be59ff1b95121f546c0a8.jpg)
204
+ Layer 2
205
+
206
+ ![](images/1b7dba4308bd7efa2dcd0b755ce1456bb13571d55f1d005c998d8b95ec638fde.jpg)
207
+ Layer 3
208
+
209
+ ![](images/87f8281167fd36733d1dfead1f6b3c14559ec217a7c90d241a857447bb0ac268.jpg)
210
+ Layer 4
211
+
212
+ ![](images/539c7fd3e34349c1c7e534251b5aedc6cf7d45bc21f34e117361ae5f7dc53d26.jpg)
213
+
214
+ ![](images/0ef917f863ab14db6d36c549e6da612b1c367cada978487fa4a801f4a118e4dd.jpg)
215
+ Unicycle wheels (w/ human legs)
216
+ Dog's legs
217
+ Other legs
218
+ Figure 3: Visualization of cluster centers. (Upper) We visualize four cluster centers in each layer by showing patches associated with cell features that have the nearest distance to the clustering center. (Lower) Identifying parts from two cluster centers in layer 4: Left one with green box represents various types of legs. Right one with red box mostly shows beetles and bird's head, sharing a dotted structure.
219
+
220
+ ![](images/37cbb5b41c3dce6138a07a2246c392ed014cc7fae968f133f4d06cc35a3d2d61.jpg)
221
+ Human legs
222
+
223
+ ![](images/cc090c17558d80f23231d1c3712fb46aafe9c7bf68574fc279a190cc291ee435.jpg)
224
+ Beetles
225
+
226
+ ![](images/b5081d614465b4b8b4d5179959178c10a886ad0169c8eadcb199379551fd116b.jpg)
227
+ Bird's head
228
+
229
+ ![](images/d782cf5575a504e109309c9d83ce6694b3c1e73fc94095635cc3311058ebbf52.jpg)
230
+ Figure 4: Visualization of the cells assignment and attention maps. (Left) Each color represents a cluster, and each point, marked as "·", represents a cell assigned to a cluster center. We demonstrate 6 samples for each class (bird, dog and tank). (Right) We visualize attention maps of one query feature (at the location of red point in left part) with all key features. The middle part shows the attention maps corresponding to 8 heads in the multi-head attention. The right part shows an overlapped map of all attention maps.
231
+
232
+ ![](images/485a20d87468828fce8fdd98fbda41024b0f1e71ad54562435d06f1d3cd0c449.jpg)
233
+
234
+ ![](images/ca29778f2a0229a0e5ecb930245b1e1676810e09eb02227aad80836ce9761037.jpg)
235
+
236
+ ![](images/66e920897ae28547b1a3ace73582d203624a70d7f2896bb94d520ae98dc20378.jpg)
237
+
238
+ ![](images/27f28e6923ad860ed1df9543595fcdaa39c91782e34c79aeefee33b8dd647998.jpg)
239
+
240
+ ![](images/d88bca04dfd767f631f5bfea02a9930b5a5fcdb97b0e85f962a1b5a7d22733ab.jpg)
241
+
242
+ category, we can see different parts in each image, including head (cyan " $\cdot$ ", body (purple " $\cdot$ " and tail (yellow " $\cdot$ "). For the dog category, we see parts including heads (red " $\cdot$ ", legs (green " $\cdot$ " and body (blue " $\cdot$ ". For the tank category, we see parts like track (light blue " $\cdot$ " and turret (pink " $\cdot$ ".
243
+
244
+ The right side of Figure 4 visualizes the attention maps in the cell relation model. We use the last constellation module in the ResNet-12 model for visualization since it captures high-level features that better represent parts. We choose one query feature at the center of the object and show its attention map to all key features. The middle part of the figure shows the attention maps corresponding to 8 heads in the multi-head attention. It is observed that some parts are identified such as head (second map in first row), legs (first two map in second row), buttock (first map in first row) and body (second map in the second row). A merged attention map by overlaying all 8 attention maps is presented at right part of the figure. It indicates that all the attention heads together can extract the features of the whole object, which would be useful for final classification.
245
+
246
+ # 7 CONCLUSION
247
+
248
+ In this paper, we present ConstellationNet by introducing an explicit feature clustering procedure with relation learning via self-attention. We implement a mini-batch soft $k$ -means algorithm to capture the cell feature distribution. With integrated implicit (standard CNN modules) and explicit (cell feature clustering + cell relation modeling) representations, our proposed ConstellationNet achieves significant improvement over the competing methods on few-shot classification benchmarks.
249
+
250
+ # ACKNOWLEDGMENTS
251
+
252
+ This work is funded by NSF IIS-1618477 and NSF IIS-1717431. We thank Qualcomm Inc. for an award support. We thank Kwonjoon Lee, Tiange Luo and Hao Su for valuable feedbacks.
253
+
254
+ # REFERENCES
255
+
256
+ Luca Bertinetto, Joao F Henriques, Philip HS Torr, and Andrea Vedaldi. Meta-learning with differentiable closed-form solvers. arXiv preprint arXiv:1805.08136, 2018.
257
+ Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020.
258
+ Yinbo Chen, Xiaolong Wang, Zhuang Liu, Huijuan Xu, and Trevor Darrell. A new meta-baseline for few-shot learning. arXiv preprint arXiv:2003.04390, 2020.
259
+ Adam Coates and Andrew Y Ng. Learning feature representations with k-means. In Neural networks: Tricks of the trade, pp. 561-580. Springer, 2012.
260
+ Navneet Dalal and Bill Triggs. Histograms of oriented gradients for human detection. In CVPR, 2005.
261
+ Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on, pp. 248-255. IEEE, 2009.
262
+ Li Fei-Fei, Rob Fergus, and Pietro Perona. One-shot learning of object categories. IEEE transactions on pattern analysis and machine intelligence, 28(4):594-611, 2006.
263
+ Pedro F Felzenszwalb and Daniel P Huttenlocher. Pictorial structures for object recognition. International journal of computer vision, 61(1):55-79, 2005.
264
+ Pedro F Felzenszwalb, Ross B Girshick, David McAllester, and Deva Ramanan. Object detection with discriminatively trained part-based models. IEEE transactions on pattern analysis and machine intelligence, 32(9):1627-1645, 2009.
265
+ Robert Fergus, Pietro Perona, and Andrew Zisserman. Object class recognition by unsupervised scale-invariant learning. In CVPR, 2003.
266
+ Chelsea Finn, Pieter Abbeel, and Sergey Levine. Model-agnostic meta-learning for fast adaptation of deep networks. arXiv preprint arXiv:1703.03400, 2017.
267
+ Weifeng Ge, Xiangru Lin, and Yizhou Yu. Weakly supervised complementary parts models for fine-grained image classification from the bottom up. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3034-3043, 2019.
268
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016.
269
+ Geoffrey E Hinton, Sara Sabour, and Nicholas Frosst. Matrix capsules with em routing. 2018.
270
+ Ruiming Hou, Hong Chang, MA Bingpeng, Shiguang Shan, and Xilin Chen. Cross attention network for few-shot classification. In Advances in Neural Information Processing Systems, pp. 4005-4016, 2019.
271
+ Shell Xu Hu, Pablo G Moreno, Yang Xiao, Xi Shen, Guillaume Obozinski, Neil D Lawrence, and Andreas Damianou. Empirical bayes transductive meta-learning with synthetic gradients. arXiv preprint arXiv:2004.12696, 2020.
272
+ Adam Kosiorek, Sara Sabour, Yee Whye Teh, and Geoffrey E Hinton. Stacked capsule autoencoders. In Advances in Neural Information Processing Systems, pp. 15486-15496, 2019.
273
+ Philipp Krahenbuhl, Carl Doersch, Jeff Donahue, and Trevor Darrell. Data-dependent initializations of convolutional neural networks. arXiv preprint arXiv:1511.06856, 2015.
274
+ Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009.
275
+ Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, 2012.
276
+
277
+ Svetlana Lazebnik, Cordelia Schmid, and Jean Ponce. Beyond bags of features: Spatial pyramid matching for recognizing natural scene categories. In CVPR, 2006.
278
+ Hankook Lee, Sung Ju Hwang, and Jinwoo Shin. Self-supervised label augmentation via input transformations. 37th International Conference on Machine Learning, Vienna, Austria, PMLR 119, 2020, 2020.
279
+ Kwonjoon Lee, Subhransu Maji, Avinash Ravichandran, and Stefano Soatto. Meta-learning with differentiable convex optimization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 10657-10665, 2019.
280
+ Aoxue Li, Weiran Huang, Xu Lan, Jiashi Feng, Zhenguo Li, and Liwei Wang. Boosting few-shot learning with adaptive margin loss. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12576-12584, 2020.
281
+ Bin Liu, Yue Cao, Yutong Lin, Qi Li, Zheng Zhang, Mingsheng Long, and Han Hu. Negative margin matters: Understanding margin in few-shot classification. arXiv preprint arXiv:2003.12060, 2020.
282
+ Yanbin Liu, Juho Lee, Minseop Park, Saehoon Kim, Eunho Yang, Sung Ju Hwang, and Yi Yang. Learning to propagate labels: Transductive propagation network for few-shot learning. arXiv preprint arXiv:1805.10002, 2018.
283
+ David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60(2):91-110, 2004.
284
+ Nikhil Mishra, Mostafa Rohaninejad, Xi Chen, and Pieter Abbeel. A simple neural attentive metalearner. arXiv preprint arXiv:1707.03141, 2017.
285
+ Nikhil Mishra, Mostafa Rohaninejad, Xi Chen, and Pieter Abbeel. A simple neural attentive metalearner. 2018.
286
+ Tsendsuren Munkhdalai, Xingdi Yuan, Soroush Mehri, and Adam Trischler. Rapid adaptation with conditionally shifted neurons. arXiv preprint arXiv:1712.09926, 2017.
287
+ Boris N Oreshkin, Alexandre Lacoste, and Pau Rodriguez. Tadam: Task dependent adaptive metric for improved few-shot learning. arXiv preprint arXiv:1805.10123, 2018.
288
+ Yuxin Peng, Xiangteng He, and Junjie Zhao. Object-part attention model for fine-grained image classification. IEEE Transactions on Image Processing, 27(3):1487-1500, 2017.
289
+ Lei Qi, Xiaoqiang Lu, and Xuelong Li. Exploiting spatial relation for fine-grained image classification. Pattern Recognition, 91:47-55, 2019.
290
+ Sachin Ravi and Hugo Larochelle. Optimization as a model for few-shot learning. 2016.
291
+ Sara Sabour, Nicholas Frosst, and Geoffrey E Hinton. Dynamic routing between capsules. In Advances in neural information processing systems, pp. 3856-3866, 2017.
292
+ Ruslan Salakhutdinov, Joshua B Tenenbaum, and Antonio Torralba. Learning with hierarchical-deep models. IEEE transactions on pattern analysis and machine intelligence, 35(8):1958-1971, 2012.
293
+ David Sculley. Web-scale k-means clustering. In Proceedings of the 19th international conference on World wide web, pp. 1177-1178, 2010.
294
+ Marcel Simon and Erik Rodner. Neural activation constellations: Unsupervised part model discovery with convolutional networks. In Proceedings of the IEEE international conference on computer vision, pp. 1143-1151, 2015.
295
+ Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In ICLR, 2015.
296
+ Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical networks for few-shot learning. In Advances in Neural Information Processing Systems, pp. 4077-4087, 2017.
297
+
298
+ Erik B Sudderth, Antonio Torralba, William T Freeman, and Alan S Willsky. Learning hierarchical models of scenes, objects, and parts. In ICCV, volume 2, 2005.
299
+ Flood Sung, Yongxin Yang, Li Zhang, Tao Xiang, Philip HS Torr, and Timothy M Hospedales. Learning to compare: Relation network for few-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1199-1208, 2018.
300
+ Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. Going deeper with convolutions. In CVPR, 2015.
301
+ Pavel Tokmakov, Yu-Xiong Wang, and Martial Hebert. Learning compositional representations for few-shot recognition. In Proceedings of the IEEE International Conference on Computer Vision, pp. 6372-6381, 2019.
302
+ Yao-Hung Hubert Tsai, Nitish Srivastava, Hanlin Goh, and Ruslan Salakhutdinov. Capsules with inverted dot-product attention routing. arXiv preprint arXiv:2002.04764, 2020.
303
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017.
304
+ Oriol Vinyals, Charles Blundell, Timothy Lillicrap, Daan Wierstra, et al. Matching networks for one shot learning. In Advances in neural information processing systems, pp. 3630-3638, 2016.
305
+ Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. Non-local neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 7794-7803, 2018.
306
+ Markus Weber, Max Welling, and Pietro Perona. Unsupervised learning of models for recognition. In ECCV, 2000.
307
+ Saining Xie, Ross Girshick, Piotr Dólar, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In CVPR, 2017.
308
+ Chen Xing, Negar Rostamzadeh, Boris Oreshkin, and Pedro O O Pinheiro. Adaptive cross-modal few-shot learning. Advances in Neural Information Processing Systems, 32:4847-4857, 2019.
309
+ Sung Whan Yoon, Jun Seo, and Jaekyun Moon. Tapnet: Neural network augmented with task-adaptive projection for few-shot learning. arXiv preprint arXiv:1905.06549, 2019.
310
+ Alan L Yuille, Peter W Hallinan, and David S Cohen. Feature extraction from faces using deformable templates. International journal of computer vision, 8(2):99-111, 1992.
311
+ Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. arXiv preprint arXiv:1605.07146, 2016.
312
+ Matthew D Zeiler and Rob Fergus. Visualizing and understanding convolutional networks. In ECCV, 2014.
313
+ Jian Zhang, Chenglong Zhao, Bingbing Ni, Minghao Xu, and Xiaokang Yang. Variational few-shot learning. In Proceedings of the IEEE International Conference on Computer Vision, pp. 1685-1694, 2019.
314
+ Song-Chun Zhu and David Mumford. A stochastic grammar of images. Now Publishers Inc, 2007.
315
+ Yousong Zhu, Chaoyang Zhao, Jinqiao Wang, Xu Zhao, Yi Wu, and Hanqing Lu. Coupling global structure with local parts for object detection. In Proceedings of the IEEE international conference on computer vision, pp. 4126-4134, 2017.
316
+
317
+ # A APPENDIX
318
+
319
+ # A.1 FEW-SHOT LEARNING FRAMEWORK
320
+
321
+ In this section, we introduce background concepts of meta-learning and elaborate the few-shot learning framework used in our ConstellationNet.
322
+
323
+ Meta-Learning in Few-Shot Classification. Current few-shot learning is typically formulated as a meta-learning task (Finn et al., 2017), in which an dataset $\mathcal{D}^{\mathrm{base}}$ is used to provide commonsense knowledge and a dataset $\mathcal{D}^{\mathrm{novel}}$ for the few-shot classification. $\mathcal{D}^{\mathrm{base}}$ has the classes $\mathcal{C}_{\mathrm{base}}$ which are disjoint from the $\mathcal{C}_{\mathrm{novel}}$ in $\mathcal{D}^{\mathrm{novel}}$ to ensure fairness. There are two stages, meta-training and meta-test, in the meta-learning framework: In meta-training stage, we attempt to train a model to learn generic features from $\mathcal{D}^{\mathrm{base}}$ . In meta-test stage, we adapt the model on the limited training split from $\mathcal{D}^{\mathrm{novel}}$ and evaluate the performance of the model on the test split.
324
+
325
+ ProtoNet-Based Framework. In our ConstellationNet, we adopt ProtoNet (Snell et al., 2017) as the base few-shot learning framework. In ProtoNet, the dataset $\mathcal{D}^{\mathrm{novel}}$ is represented by a series of $K$ -way $N$ -shot tasks $\{\mathcal{T}\}$ where each task consists of a support set and a query set, i.e. $\mathcal{T} = (\mathcal{T}^{\mathrm{supp}}, \mathcal{T}^{\mathrm{query}})$ . The support set $\mathcal{T}^{\mathrm{supp}}$ contains $K$ classes and each class has $N$ examples from the training split of $\mathcal{D}^{\mathrm{novel}}$ , which are used to adapt the model in meta-test stage. The query set $\mathcal{T}^{\mathrm{query}}$ from the test split of $\mathcal{D}^{\mathrm{novel}}$ is then used to evaluate the model.
326
+
327
+ The ProtoNet attempts to learn a generic feature extractor $\phi(\mathbf{x})$ on image $\mathbf{x}$ , and represent a class $k$ by the prototype $\mathbf{c}_k$ , which is the average feature of examples from support set $\mathcal{T}^{\mathrm{supp}}$ with this class:
328
+
329
+ $$
330
+ \mathbf {c} _ {k} = \frac {1}{| N |} \sum_ {(\mathbf {x}, y) \in \mathcal {T} ^ {\text {s u p p}}, y = k} \phi (\mathbf {x}) \tag {11}
331
+ $$
332
+
333
+ During the meta-test stage, we use the prototypes to compute the probability $p_k$ of a query example $\mathbf{x}' \in \mathcal{T}^{\text{query}}$ on class $k$ and predict its label $y'$ :
334
+
335
+ $$
336
+ p _ {k} = p \left(y = k \mid \mathbf {x} ^ {\prime}, \mathcal {T} ^ {\text {s u p p}}\right) = \frac {\exp \left(d \left(\mathbf {x} ^ {\prime} , \mathbf {c} _ {k}\right)\right)}{\sum_ {k ^ {\prime}} \exp \left(d \left(\mathbf {x} ^ {\prime} , \mathbf {c} _ {k ^ {\prime}}\right)\right)}, \quad y ^ {\prime} = \arg \max _ {k} p _ {k}. \tag {12}
337
+ $$
338
+
339
+ where $d(\cdot, \cdot)$ is a cosine similarity function (different from the Euclidean distance in Snell et al. (2017)).
340
+
341
+ During the meta-training stage, there are two different training schemes: The prototypical scheme from ProtoNet uses an episodic learning strategy that also formulates the dataset $\mathcal{D}^{\mathrm{base}}$ as a series of tasks $\{\mathcal{T}\}$ . The negative log-likelihood loss $\mathcal{L}(\phi)$ is optimized:
342
+
343
+ $$
344
+ \ell \left(\mathcal {T} ^ {\text {s u p p}}, \mathcal {T} ^ {\text {q u e r y}}\right) = \mathbb {E} _ {\left(\mathbf {x} ^ {\prime}, y ^ {\prime}\right) \in \mathcal {T} ^ {\text {q u e r y}}} - \log p \left(y = y ^ {\prime} \mid \mathbf {x} ^ {\prime}, \mathcal {T} ^ {\text {s u p p}}\right), \tag {13}
345
+ $$
346
+
347
+ $$
348
+ \mathcal {L} (\phi) = \mathbb {E} _ {\mathcal {T} = \left(\mathcal {T} ^ {\text {s u p p}}, \mathcal {T} ^ {\text {q u e r y}}\right) \sim \mathcal {D} ^ {\text {b a s e}}} \ell \left(\mathcal {T} ^ {\text {s u p p}}, \mathcal {T} ^ {\text {q u e r y}}\right). \tag {14}
349
+ $$
350
+
351
+ Another way is the standard classification scheme (Chen et al., 2020): It simply uses $\mathcal{D}^{\mathrm{base}}$ as a standard classification dataset $\{(\mathbf{x},y)\}$ consisting of $Q$ classes in total. Thus, a cross-entropy loss $\mathcal{L}(\phi)$ is optimized:
352
+
353
+ $$
354
+ \mathcal {L} (\phi) = \mathbb {E} _ {(\mathbf {x}, y) \sim \mathcal {D} ^ {\text {b a s e}}} - \log \frac {\exp \left(\mathbf {w} _ {y} \cdot \phi (\mathbf {x})\right)}{\sum_ {q} \exp \left(\mathbf {w} _ {q} \cdot \phi (\mathbf {x})\right)} \tag {15}
355
+ $$
356
+
357
+ where $\mathbf{w}_q$ is the linear weight for class $q$ . In our ConstellationNet, we use the standard classification scheme at default. For the experiment with multi-branch network, we use the prototypical scheme and standard classification scheme for separate branches.
358
+
359
+ # A.2 DATASETS
360
+
361
+ The CIFAR-FS dataset (Bertinetto et al., 2018) is a few-shot classification benchmark containing 100 classes from CIFAR-100 (Krizhevsky et al., 2009). The classes are randomly split into 64, 16 and 20 classes as meta-training, meta-validation and meta-testing set respectively. For each class, it
362
+
363
+ contains 600 images of size $32 \times 32$ . We adopt the split from Lee et al. (2019). The FC100 dataset (Oreshkin et al., 2018) is another benchmark based on CIFAR-100 where classes are grouped into 20 superclasses to void the overlap between the splits. The mini-ImageNet dataset (Vinyals et al., 2016) is a common benchmark for few-shot classification containing 100 classes from ILSVRC-2012 (Deng et al., 2009). The classes are randomly split into 64, 16 and 20 classes as meta-training, meta-validation and meta-testing set respectively. For each class, it contains 600 images of size $84 \times 84$ . We follow the commonly-used split in Ravi & Larochelle (2016), Lee et al. (2019) and Chen et al. (2020). In all experiments, we conduct data augmentation for the meta-training set of all datasets to match Lee et al. (2019)'s implementation.
364
+
365
+ # A.3 NETWORK BACKBONE
366
+
367
+ Conv-4. Following Lee et al. (2019), we adopt the same network with 4 convolutional blocks. Each of the 4 blocks has a $3 \times 3$ convolutional layer, a batch normalization layer, a ReLU activation and a $2 \times 2$ max-pooling layer sequentially. The numbers of filters are 64 for all 4 convolutional layers.
368
+
369
+ ResNet-12. Following Chen et al. (2020), we construct the residual block with 3 consecutive convolutional blocks followed by an addition average pooling layer where each convolutional block has a $3 \times 3$ convolutional layer, a batch normalization layer, a leaky ReLU activation, and max-pooling layers. The ResNet-12 network has 4 residual blocks with each filter size set to 64, 128, 256, 512, respectively.
370
+
371
+ WRN-28-10. WideResNet expands the residual blocks by increasing the convolutional channels and layers (Zagoruyko & Komodakis, 2016). WRN-28-10 uses 28 convolutional layers with a widening factor of 10.
372
+
373
+ # A.4 CONSTELLATION MODULE CONFIGURATION
374
+
375
+ To achieve the best performance with constellation modules, we do not always fully enable them after all the convolutional layers. For Conv-4, we use constellation modules after all four convolutional layers, but the cell relation modeling module is disabled in first two constellation modules due to the high memory consumption. For ResNet-12, we enable the constellation modules after the convolutional layer 1,7,8,9 and disable the relation modeling module in the first constellation module. We use the deep supervision in ResNet-12 to stabilize the training of constellation modules.
376
+
377
+ # A.5 SELF-ATTENTION SETTINGS
378
+
379
+ We follow the common practice in Vaswani et al. (2017) to set the attention layer with residual connections, dropout and layer normalization. The sine positional encoding follows settings in Carion et al. (2020).
380
+
381
+ # A.6 TRAINING DETAILS
382
+
383
+ Optimization Settings. We follow implementation in Lee et al. (2019), and use SGD optimizer with initial learning rate of 1, and set momentum to 0.9 and weight decay rate to $5 \times 10^{-4}$ . The learning rate reduces to 0.06, 0.012, and 0.0024 at epoch 20, 40 and 50. The inverse temperature $\beta$ is set to 100.0 in the cluster assignment step, and $\lambda$ is set to 1.0 in the centroid movement step.
384
+
385
+ # A.7 ABLATION STUDY ON THE NUMBER OF CLUSTERS
386
+
387
+ Table 4 studies the number of clusters needed for random and similar classes. The result shows the optimal number of clusters are less affected by the number of clusters but more affected by the similarity between classes. Less number of clusters are needed for dataset with classes of high similarity, which aligns with our intuition, limited number of patterns exist in this dataset so that small number of clusters are enough to represent its part-based information.
388
+
389
+ FC100 training dataset consists of 60 classes that are grouped evenly into 12 superclasses. In the random classes group, the training dataset includes 6 randomly selected super-classes (i.e., 30 classes) and models are trained with 8, 16, 32, 64 and 128 number of clusters. The highest accuracy occurs at 16 clusters (1-shot: $39.12\%$ in ResNet-12). In the similar classes group, 30 classes are randomly
390
+
391
+ Table 4: Ablation study on the number of clusters for random and similar classes. We investigate how similarities of images in the training dataset affect the optimal number of clusters. The first group of experiments use training dataset with 30 similar classes while the second group use 30 random classes from FC100 dataset, all of which performed on ResNet-12 with Constellation module.
392
+
393
+ <table><tr><td rowspan="2"># Clusters</td><td colspan="2">Similar Classes</td><td colspan="2">Random Classes</td></tr><tr><td>1-shot</td><td>5-shot</td><td>1-shot</td><td>5-shot</td></tr><tr><td>8</td><td>38.9 ± 0.2</td><td>52.8 ± 0.2</td><td>40.9 ± 0.2</td><td>54.5 ± 0.2</td></tr><tr><td>16</td><td>39.1 ± 0.2</td><td>51.8 ± 0.2</td><td>40.9 ± 0.2</td><td>54.9 ± 0.2</td></tr><tr><td>32</td><td>38.7 ± 0.2</td><td>52.3 ± 0.2</td><td>40.9 ± 0.2</td><td>54.7 ± 0.2</td></tr><tr><td>64</td><td>38.8 ± 0.2</td><td>52.3 ± 0.2</td><td>41.2 ± 0.2</td><td>54.9 ± 0.2</td></tr><tr><td>128</td><td>38.8 ± 0.2</td><td>52.1 ± 0.2</td><td>40.8 ± 0.2</td><td>54.7 ± 0.2</td></tr></table>
394
+
395
+ sampled from the original training dataset and we repeat the same experiments as above. The highest accuracy occurs at 64 clusters (1-shot: $41.22\%$ in ResNet-12), which is much more than the 16 clusters used for images from similar classes.
396
+
397
+ # A.8 ADDITIONAL EXPERIMENTS WITH NEGATIVE MARGIN
398
+
399
+ Table 5: Additional experiments with the use of negative margin. Average classification accuracies (%) on mini-ImageNet meta-test split. We compare our ConstellationNet and baseline with and without the negative margin loss based on Conv-4.
400
+
401
+ <table><tr><td rowspan="2">Baseline</td><td rowspan="2">Cell Feature Clustering</td><td rowspan="2">Cell Relation Modeling</td><td rowspan="2">Negative Margin</td><td colspan="2">Conv-4</td></tr><tr><td>1-shot</td><td>5-shot</td></tr><tr><td>✓</td><td></td><td></td><td></td><td>50.62 ± 0.23</td><td>68.40 ± 0.19</td></tr><tr><td>✓</td><td></td><td></td><td>✓</td><td>51.42 ± 0.23</td><td>68.84 ± 0.19</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td></td><td>57.03 ± 0.23</td><td>74.09 ± 0.18</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>57.55 ± 0.23</td><td>74.49 ± 0.18</td></tr></table>
402
+
403
+ Table 5 studies the use of negative margin loss (Liu et al., 2020) on our Conv-4 models. In the negative margin loss, we use the inner-product similarity, the temperature coefficient $\beta = 1.0$ and the negative margin $m = -0.5$ , which attains the best performance improvement on our models. Besides, we do not have the fine-tune step during meta-test. Our baseline with the negative margin loss obtains $0.80\%$ improvement on 1-shot and $0.44\%$ improvement on 5-shot compared with the baseline. Similarly, our ConstellationNet with the negative margin loss achieves $0.52\%$ improvement on 1-shot and $0.40\%$ improvement on 5-shot. The consistent improvement of negative margin loss on the baseline and our ConstellationNet indicates that our constellation module is orthogonal to the negative margin loss, and both modules can boost the performance on few-shot classification.
404
+
405
+ # A.9 CLARIFICATION ON CLUSTERING PROCEDURE
406
+
407
+ In this section, we add more clarification on our cell feature clustering procedure in Sec. 4.1: During the training stage, the global cluster centers $\mathcal{V} = \{\mathbf{v}_k\}$ are updated by the computed clustering centers $\{\mathbf{v}_k^{\prime}\}$ in current mini-batch. Each update to a cluster center $\mathbf{v}_k$ is weighted by a momentum coefficient $\eta$ determined by the value of an associated counter $s_k$ , since we would like to avoid large adjustment from the current mini-batch in order to stabilize the global cluster centers. Besides, the mini-batches of examples are randomly drawn from the dataset following Sculley (2010), without specialized design to optimize clustering learning. During the evaluation stage, we fix the global cluster centers $\mathcal{V}$ in the forward step of our model, avoiding the potential information leak or transduction from the test mini-batches.
408
+
409
+ # A.10 MULTI-BRANCH DETAILS
410
+
411
+ Our embedding $\phi(\mathbf{x})$ is separated into two branches after a shared stem (Y-shape), which is defined as $\phi(\mathbf{x}) = \{\phi^{\mathrm{cls}}(\mathbf{x}), \phi^{\mathrm{proto}}(\mathbf{x})\}$ and $\phi^{\mathrm{cls}}(\mathbf{x}) = g^{\mathrm{cls}}(f^{\mathrm{stem}}(\mathbf{x}))$ , $\phi^{\mathrm{proto}}(\mathbf{x}) = g^{\mathrm{proto}}(f^{\mathrm{stem}}(\mathbf{x}))$ . Two branches $\phi^{\mathrm{cls}}(\mathbf{x}), \phi^{\mathrm{proto}}(\mathbf{x})$ are trained by standard classification and prototypical schemes separately
412
+
413
+ in a multi-task learning fashion. During the testing time, $\phi^{\mathrm{cls}}(\mathbf{x})$ and $\phi^{\mathrm{proto}}(\mathbf{x})$ are concatenated together to compute distance between support prototypes and query images.
414
+
415
+ For our ConstellationNet, we split the network into two branches after the second convolutional blocks (Conv-4) or the second residual blocks (ResNet-12). We keep the shared stem identical to the network backbone and reduce the channels of two separate branches to match the parameter size of the model without multi-branch.
416
+
417
+ # A.11 CONNECTION WITH CAPSULE NETWORKS
418
+
419
+ A notable development to learning the explicit structured representation in an end-to-end framework is the capsule networks (CapsNets) (Sabour et al., 2017). The line of works on CapsNets (Sabour et al., 2017; Hinton et al., 2018; Kosiorek et al., 2019; Tsai et al., 2020) intends to parse a visual scene in an interpretable and hierarchical way. Sabour et al. (2017) represents parts and objects in vector-based capsules with a dynamic routing mechanism. Tsai et al. (2020) uses a stacked autoencoder architecture to model the hierarchical relation among parts, objects and scenes. Here our ConstellationNet maintains part modeling by enabling the joint learning of the convolution and constellation modules to simultaneously attain implicit and explicit representations.
attentionalconstellationnetsforfewshotlearning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc255e44791e375ec2afaf73db3f920975f7fc567daf497bd6aa0a9e0a2255c
3
+ size 645376
attentionalconstellationnetsforfewshotlearning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bcf0126fe772b8fe01599090a97e7ea523da1a63661032811403e1af44cca77
3
+ size 586788
auctionlearningasatwoplayergame/c67499e2-b68c-4964-a190-4aa8387dbeeb_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c113146f834c76558e9c99a9a0f1cd30819a278c1cfeae7faadff6277145ba8d
3
+ size 95095
auctionlearningasatwoplayergame/c67499e2-b68c-4964-a190-4aa8387dbeeb_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb220d7670271b49cd7be4f5ba5f8435199ab5965aaafbd78794cd925446e684
3
+ size 118710
auctionlearningasatwoplayergame/c67499e2-b68c-4964-a190-4aa8387dbeeb_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6866ec8d18e8f36340e4553a488ecdb1463c85bf87114a55321dc158e2318eea
3
+ size 1086404
auctionlearningasatwoplayergame/full.md ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Auction Learning as a TWO-PLAYER GAME
2
+
3
+ Jad Rahme*, Samy Jelassi, S. Matthew Weinberg
4
+
5
+ Princeton University
6
+
7
+ Princeton, NJ 08540, USA
8
+
9
+ {jrahme, sjelassi, smweinberg}@princeton.edu
10
+
11
+ # ABSTRACT
12
+
13
+ Designing an incentive compatible auction that maximizes expected revenue is a central problem in Auction Design. While theoretical approaches to the problem have hit some limits, a recent research direction initiated by Duetting et al. (2019) consists in building neural network architectures to find optimal auctions. We propose two conceptual deviations from their approach which result in enhanced performance. First, we use recent results in theoretical auction design to introduce a time-independent Lagrangian. This not only circumvents the need for an expensive hyper-parameter search (as in prior work), but also provides a single metric to compare the performance of two auctions (absent from prior work). Second, the optimization procedure in previous work uses an inner maximization loop to compute optimal misreports. We amortize this process through the introduction of an additional neural network. We demonstrate the effectiveness of our approach by learning competitive or strictly improved auctions compared to prior work. Both results together further imply a novel formulation of Auction Design as a two-player game with stationary utility functions.
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Efficiently designing truthful auctions is a core problem in Mathematical Economics. Concrete examples include the sponsored search auctions conducted by companies as Google or auctions run on platforms as eBay. Following seminal work of Vickrey (Vickrey, 1961) and Myerson (Myerson, 1981), auctions are typically studied in the independent private valuations model: each bidder has a valuation function over items, and their payoff depends only on the items they receive. Moreover, the auctioneer knows aggregate information about the population that each bidder comes from, modeled as a distribution over valuation functions, but does not know precisely each bidder's valuation (outside of any information in this Bayesian prior). A major difficulty in designing auctions is that valuations are private and bidders need to be incentivized to report their valuations truthfully. The goal of the auctioneer is to design an incentive compatible auction which maximizes expected revenue.
18
+
19
+ Auction Design has existed as a rigorous mathematical field for several decades and yet, complete characterizations of the optimal auction only exist for a few settings. While Myerson's Nobel prize-winning work provides a clean characterization of the single-item optimum (Myerson, 1981), optimal multi-item auctions provably suffer from numerous formal measures of intractability (including computational intractability, high description complexity, non-monotonicity, and others) (Daskalakis et al., 2014; Chen et al., 2014; 2015; 2018; Hart & Reny, 2015; Thanassoulis, 2004).
20
+
21
+ An orthogonal line of work instead develops deep learning architectures to find the optimal auction. Duetting et al. (2019) initiated this direction by proposing RegretNet, a feed-forward architecture. They frame the auction design problem as a constrained learning problem and lift the constraints into the objective via the augmented Lagrangian method. Training RegretNet involves optimizing this Lagrangian-penalized objective, while simultaneously updating network parameters and the Lagrangian multipliers themselves. This architecture produces impressive results: recovering near-optimal auctions in several known multi-item settings, and discovering new mechanisms when a theoretical optimum is unknown.
22
+
23
+ Yet, this approach presents several limitations. On the conceptual front, our main insight is a connection to an exciting line of recent works (Hartline & Lucier, 2010; Hartline et al., 2011; Bei &
24
+
25
+ Huang, 2011; Daskalakis & Weinberg, 2012; Rubinstein & Weinberg, 2018; Dughmi et al., 2017; Cai et al., 2019) on $\varepsilon$ -truthful-to-truthful reductions. On the technical front, we identify three areas for improvement. First, their architecture is difficult to train in practice as the objective is nonstationary. Specifically, the Lagrangian multipliers are time-dependent and they increase following a pre-defined schedule, which requires careful hyperparameter tuning (see §3.1 for experiments illustrating this). Leveraging the aforementioned works in Auction Theory, we propose a stationary Lagrangian objective. Second, all prior work inevitably finds auctions which are not precisely incentive compatible, and does not provide a metric to compare, say, an auction with revenue 1.01 which is 0.002-truthful, or one with revenue 1 which is 0.001-truthful. We argue that our stationary Lagrangian objective serves as a good metric (and that the second auction of our short example is "better" for our metric). Finally, their training procedure requires an inner-loop optimization (essentially, this inner loop is the bidders trying to maximize utility in the current auction), which is itself computationally expensive. We use amortized optimization to make this process more efficient.
26
+
27
+ # CONTRIBUTIONS
28
+
29
+ This paper leverages recent work in Auction Theory to formulate the learning of revenue-optimal auctions as a two-player game. We develop a new algorithm ALGnet (Auction Learning Game network) that produces competitive or better results compared to Duetting et al. (2019)'s RegretNet. In addition to the conceptual contributions, our approach yields the following improvements (as RegretNet is already learning near-optimal auctions, our improvement over RegretNet is not due to significantly higher optimal revenues).
30
+
31
+ - Easier hyper-parameter tuning: By constructing a time-independent loss function, we circumvent the need to search for an adequate parameter scheduling. Our formulation also involves less hyperparameters, which makes it more robust.
32
+ - A metric to compare auctions: We propose a metric to compare the quality of two auctions which are not incentive compatible.
33
+ - More efficient training: We replace the inner-loop optimization of prior work with a neural network, which makes training more efficient.
34
+ - Online auctions: Since the learning formulation is time-invariant, ALGnet is able to quickly adapt in auctions where the bidders' valuation distributions varies over time. Such setting appears for instance in the online posted pricing problem studied in Bubeck et al. (2017).
35
+
36
+ Furthermore, these technical contributions together now imply a novel formulation of auction learning as a two-player game (not zero-sum) between an auctioneer and a misreporter. The auctioneer is trying to design an incentive compatible auction that maximizes revenue while the misreporter is trying to identify breaches in the truthfulness of these auctions. The paper decomposes as follows. Section 2 introduces the standard notions of auction design. Section 3 presents our game formulation for auction learning. Section 4 provides a description of ALGnet and its training procedure. Finally, Section 5 presents numerical evidence for the effectiveness of our approach.
37
+
38
+ # RELATED WORK
39
+
40
+ Auction design and machine learning. Machine learning and computational learning theory have been used in several ways to design auctions from samples of bidder valuations. Machine learning has been used to analyze the sample complexity of designing optimal revenue-maximizing auctions. This includes the framework of single-parameter settings (Morgenstern & Roughgarden, 2015; Huang et al., 2018; Hartline & Taggart, 2019; Roughgarden & Schrijvers, 2016; Gonczarowski & Nisan, 2017; Guo et al., 2019), multi-item auctions (Dughmi et al., 2014; Gonczarowski & Weinberg, 2018), combinatorial auctions (Balcan et al., 2016; Morgenstern & Roughgarden, 2016; Syrgkanis, 2017) and allocation mechanisms (Narasimhan & Parkes, 2016). Other works have leveraged machine learning to optimize different aspects of mechanisms (Lahaie, 2011; Dütting et al., 2015). Our approach is different as we build a deep learning architecture for auction design.
41
+
42
+ Auction design and deep learning. While Duetting et al. (2019) is the first paper to design auctions through deep learning, several other papers followed-up this work. Feng et al. (2018) extended it to budget constrained bidders, Golowich et al. (2018) to the facility location problem. Tacchetti et al. (2019) built architectures based on the Vickrey-Clarke-Groves mechanism. Rahme et al. (2021) used permutation-equivariant networks to design symmetric auctions. Shen et al. (2019) and Duetting
43
+
44
+ et al. (2019) proposed architectures that exactly satisfy incentive compatibility but are specific to single-bidder settings. While all the previously mentioned papers consider a non-stationary objective function, we formulate a time-invariant objective that is easier to train and that makes comparisons between mechanisms possible.
45
+
46
+ # 2 AUCTION DESIGN AS A TIME-VARYING LEARNING PROBLEM
47
+
48
+ We first review the framework of auction design and the problem of finding truthful mechanisms. We then recall the learning problem proposed by Duetting et al. (2019) to find optimal auctions.
49
+
50
+ # 2.1 AUCTION DESIGN AND LINEAR PROGRAM
51
+
52
+ Auction design. We consider an auction with $n$ bidders and $m$ items. We will denote by $N = \{1, \dots, n\}$ and $M = \{1, \dots, m\}$ the set of bidders and items. Each bidder $i$ values item $j$ at a valuation denoted $v_{ij}$ . We will focus on additive auctions. These are auctions where the value of a set $S$ of items is equal to the sum of the values of the elements in that set at $\sum_{j \in S} v_{ij}$ . Additive auctions are perhaps the most well-studied setting in multi-item auction design (Hart & Nisan, 2012; Li & Yao, 2013; Daskalakis et al., 2014; Cai et al., 2016; Daskalakis et al., 2017).
53
+
54
+ The auctioneer does not know the exact valuation profile $V = (v_{ij})_{i \in N, j \in M}$ of the bidders in advance but he does know the distribution from which they are drawn: the valuation vector of bidder $i$ , $\vec{v}_i = (v_{i1}, \ldots, v_{im})$ is drawn from a distribution $D_i$ over $\mathbb{R}^m$ . We will further assume that all bidders are independent and that $D_1 = \dots = D_n$ . As a result $V$ is drawn from $D := \otimes_{i=1}^{n} D_i = D_1^{\otimes^n}$ .
55
+
56
+ Definition 1. An auction is defined by a randomized allocation rule $g = (g_{1},\ldots ,g_{n})$ and a payment rule $p = (p_{1},\dots ,p_{n})$ where $g_{i}\colon \mathbb{R}^{n\times m}\to [0,1]^{m}$ and $p_i\colon \mathbb{R}^{n\times m}\to \mathbb{R}_{\geqslant 0}$ . Additionally for all items $j$ and valuation profiles $V$ , the $g_{i}$ must satisfy $\sum_{i}[g_{i}(V)]_{j}\leqslant 1$ .
57
+
58
+ Given a bid matrix $B = (b_{ij})_{i \in N, j \in M}$ , $[g_i(B)]_j$ is the probability that bidder $i$ receives object $j$ and $p_i(B)$ is the price bidder $i$ has to pay to the auction. The condition $\sum_i [g_i(V)]_j \leqslant 1$ allows the possibility for an item to be not allocated.
59
+
60
+ Definition 2. The utility of bidder $i$ is defined by $u_{i}(\vec{v}_{i},B) = \sum_{j = 1}^{m}[g_{i}(B)]_{j}v_{ij} - p_{i}(B)$ .
61
+
62
+ Bidders seek to maximize their utility and may report bids that are different from their true valuations. In the following, we will denote by $B_{-i}$ the $(n - 1) \times m$ bid matrix without bidder $i$ , and by $(\vec{b}_i', B_{-i})$ the $n \times m$ bid matrix that inserts $\vec{b}_i'$ into row $i$ of $B_{-i}$ (for example: $B := (\vec{b}_i, B_{-i})$ ). We aim at auctions that incentivize bidders to bid their true valuations.
63
+
64
+ Definition 3. An auction $(g, p)$ is dominant strategy incentive compatible (DSIC) if each bidder's utility is maximized by reporting truthfully no matter what the other bidders report. For every bidder $i$ , valuation $\vec{v}_i \in D_i$ , bid $\vec{b}_i'\in D_i$ and bids $B_{-i} \in D_{-i}$ , $u_i(\vec{v}_i, (\vec{v}_i, B_{-i})) \geqslant u_i(\vec{v}_i, (\vec{b}_i', B_{-i}))$ .
65
+
66
+ Definition 4. An auction is individually rational (IR) if for all $i \in N$ , $\vec{v}_i \in D_i$ and $B_{-i} \in D_{-i}$ ,
67
+
68
+ $$
69
+ u _ {i} \left(\vec {v} _ {i}, \left(\vec {v} _ {i}, B _ {- i}\right)\right) \geqslant 0. \tag {IR}
70
+ $$
71
+
72
+ In a DSIC auction, the bidders have the incentive to truthfully report their valuations and therefore, the revenue on valuation profile $V$ is $\sum_{i=1}^{n} p_i(V)$ . Optimal auction design aims at finding a DSIC and IR auction that maximizes the expected revenue $\text{rev} \coloneqq \mathbb{E}_{V \sim D}[\sum_{i=1}^{n} p_i(V)]$ . Since there is no known characterization of DSIC mechanisms in the multi-item setting, we resort to the relaxed notion of ex-post regret. It measures the extent to which an auction violates DSIC.
73
+
74
+ Definition 5. The ex-post regret for a bidder $i$ is the maximum increase in his utility when considering all his possible bids and fixing the bids of others. For a valuation profile $V$ , it is given by $r_i(V) = \max_{\vec{b}_i' \in \mathbb{R}^m} u_i(\vec{v}_i, (\vec{b}_i', V_{-i})) - u_i(\vec{v}_i, (\vec{v}_i, V_{-i}))$ . In particular, DSIC is equivalent to
75
+
76
+ $$
77
+ r _ {i} (V) = 0, \forall i \in N, \forall V \in D. \tag {IC}
78
+ $$
79
+
80
+ The bid $\vec{b}_i^\prime$ that achieves $r_i(V)$ is called the optimal misreport of bidder $i$ for valuation profile $V$ . Therefore, finding an optimal auction is equivalent to the following linear program:
81
+
82
+ $$
83
+ \min _ {(g, p) \in \mathcal {M}} - \mathbb {E} _ {V \sim D} \left[ \sum_ {i = 1} ^ {n} p _ {i} (V) \right] \quad \text {s . t .} \quad r _ {i} (V) = 0, \quad \forall i \in N, \forall V \in D, \tag {LP}
84
+ $$
85
+
86
+ # 2.2 AUCTION DESIGN AS A LEARNING PROBLEM
87
+
88
+ As the space of auctions $\mathcal{M}$ may be large, we will set a parametric model. In what follows, we consider the class of auctions $(g^w,p^w)$ encoded by a neural network of parameter $w\in \mathbb{R}^d$ . The corresponding utility and regret function will be denoted by $u_{i}^{w}$ and $r_i^w$
89
+
90
+ Following Duetting et al. (2019), the formulation (LP) is relaxed: the IC constraint for all $V \in D$ is replaced by the expected constraint $\mathbb{E}_{V \sim D} [r_i^w(V)] = 0$ for all $i \in N$ . The justification for this relaxation can be found in Duetting et al. (2019). By replacing expectations with empirical averages, the learning problem becomes:
91
+
92
+ $$
93
+ \min _ {w \in \mathbb {R} ^ {d}} - \frac {1}{L} \sum_ {\ell = 1} ^ {L} \sum_ {i = 1} ^ {n} p _ {i} ^ {w} (V ^ {(\ell)}) \quad \text {s . t .} \quad \widehat {r} _ {i} ^ {w} := \frac {1}{L} \sum_ {\ell = 1} ^ {L} r _ {i} ^ {w} (V ^ {(\ell)}) = 0, \forall i \in N. \quad \quad (\widehat {\mathrm {L P}})
94
+ $$
95
+
96
+ The learning problem $(\widehat{\mathbf{LP}})$ does not ensure (IR). However, this constraint is usually built into the parametrization (architecture) of the model: by design, the only auction mechanism considered satisfy (IR). Implementation details can be found in Duetting et al. (2019); Rahme et al. (2021) or in Sec 4.
97
+
98
+ # 3 AUCTION LEARNING AS A TWO-PLAYER GAME
99
+
100
+ We first present the optimization and the training procedures for (LP) proposed by Duetting et al. (2019). We then demonstrate with numerical evidence that this approach presents two limitations: hyperparameter sensitivity and lack of interpretability. Using the concept of $\varepsilon$ -truthful to truthful reductions, we construct a new loss function that circumvents these two aspects. Lastly, we resort to amortized optimization and reframe the auction learning problem as a two-player game.
101
+
102
+ # 3.1 THE AUGMENTED LAGRANGIAN METHOD AND ITS SHORTCOMINGS
103
+
104
+ Optimization and training. We briefly review the training procedure proposed by Duetting et al. (2019) to learn optimal auctions. The authors apply the augmented Lagrangian method to solve the constrained problem $(\widehat{\mathrm{LP}})$ and consider the loss:
105
+
106
+ $$
107
+ \mathcal {L} (w; \lambda ; \rho) = - \frac {1}{L} \sum_ {\ell = 1} ^ {L} \sum_ {i \in N} p _ {i} ^ {w} (V ^ {(\ell)}) + \sum_ {i \in N} \lambda_ {i} r _ {i} ^ {w} (V ^ {(\ell)}) + \frac {\rho}{2} \left(\sum_ {i \in N} r _ {i} ^ {w} (V ^ {(\ell)})\right) ^ {2},
108
+ $$
109
+
110
+ where $\lambda \in \mathbb{R}^n$ is a vector of Lagrange multipliers and $\rho > 0$ is a parameter controlling the weight of the quadratic penalty. More details about the training procedure can be found in Appendix A.
111
+
112
+ Scheduling consistency problem. The parameters $\lambda$ and $\rho$ are time-varying. Indeed, their value changes according to a pre-defined scheduling of the following form: 1) Initialize $\lambda$ and $\rho$ with respectively $\lambda^0$ and $\rho^0$ , 2) Update $\rho$ every $T_{\rho}$ iterations: $\rho^{t + 1} \gets \rho^t + c$ , where $c$ is a pre-defined constant, 3) Update $\lambda$ every $T_{\lambda}$ iterations according to $\lambda_i^t \gets \lambda_i^t + \rho^t \hat{r}_i^{w^t}$ .
113
+
114
+ Therefore, this scheduling requires to set up five hyper parameters $(\lambda^0,\rho^0,c,T_\lambda ,T_\rho)$ . Some of the experiments found Duetting et al. (2019) were about learning an optimal mechanism for an $n$ -bidder $m$ -item auction $(n\times m)$ where the valuations are iid $\mathcal{U}[0,1]$ . Different scheduling parameters were used for different values of $n$ and $m$ . We report the values of the hyper parameters used for the $1\times 2$ , $3\times 10$ and $5\times 10$ settings in Table 1(a). A natural question is whether the choice of parameters heavily affects the performance. We proceed to a numerical investigation of this questions by trying different schedulings (columns) for different settings (rows) and report our the results in Table 1(b).
115
+
116
+ Table 1: (a): Scheduling parameters values set in Duetting et al. (2019) to reach optimal auctions in $n \times m$ settings with $n$ bidders, $m$ objects and i.i.d. valuations sampled from $\mathcal{U}[0,1]$ . (b): Revenue $rev := \mathbb{E}_{V \sim D}[\sum_{i=1}^{n} p_i(V)]$ and average regret per bidder $reg := \frac{1}{n} \mathbb{E}_{V \in D}[\sum_{i=1}^{n} r_i(V)]$ for $n \times m$ settings when using the different parameters values set reported in (a).
117
+
118
+ <table><tr><td></td><td>1 × 2</td><td>3 × 10</td><td>5 × 10</td></tr><tr><td>λ0</td><td>5</td><td>5</td><td>1</td></tr><tr><td>ρ0</td><td>1</td><td>1</td><td>0.25</td></tr><tr><td>c</td><td>50</td><td>1</td><td>0.25</td></tr><tr><td>Tλ</td><td>102</td><td>102</td><td>102</td></tr><tr><td>Tρ</td><td>104</td><td>104</td><td>105</td></tr><tr><td></td><td colspan="3">(a)</td></tr><tr><td></td><td></td><td></td><td>Scheduling</td></tr><tr><td></td><td></td><td></td><td>1 × 2</td></tr><tr><td></td><td></td><td></td><td>Setting</td></tr><tr><td></td><td></td><td></td><td>rev</td></tr><tr><td></td><td></td><td></td><td>rgt</td></tr><tr><td></td><td></td><td></td><td>rev</td></tr><tr><td></td><td></td><td></td><td>rgt</td></tr><tr><td></td><td></td><td></td><td>rev</td></tr><tr><td></td><td></td><td></td><td>rgt</td></tr><tr><td></td><td></td><td></td><td>0.552</td></tr><tr><td></td><td></td><td></td><td>0.0001</td></tr><tr><td></td><td></td><td></td><td>0.573</td></tr><tr><td></td><td></td><td></td><td>0.0012</td></tr><tr><td></td><td></td><td></td><td>0.332</td></tr><tr><td></td><td></td><td></td><td>0.0179</td></tr><tr><td></td><td></td><td></td><td>5.880</td></tr><tr><td></td><td></td><td></td><td>0.0047</td></tr><tr><td></td><td></td><td></td><td>6.749</td></tr></table>
119
+
120
+ (b)
121
+
122
+ The auction returned by the network dramatically varies with the choice of scheduling parameters. When applying the parameters of $1 \times 2$ to $5 \times 10$ , we obtain a revenue that is lower by $30\%$ . The performance of the learning algorithm strongly depends on the specific values of the hyperparameters. Finding an adequate scheduling requires an extensive and time consuming hyperparameter search.
123
+
124
+ Lack of interpretability. How should one compare two mechanisms with different expected revenue and regret? Is a mechanism $M_1$ with revenue $P_1 = 1.01$ and an average total regret $R_1 = 0.02$ better than a mechanism $M_2$ with $P_2 = 1.0$ and $R_2 = 0.01$ ? The approach in Duetting et al. (2019) cannot answer this question. To see that, notice that when $\lambda_1 = \dots = \lambda_n = \lambda$ we can rewrite $\mathcal{L}(w; \lambda; \rho) = -P + \lambda R + \frac{\rho}{2} R^2$ . Which mechanism is better depends on the values of $\lambda$ and $\rho$ . For example if $\rho = 1$ and $\lambda = 0.1$ we find that $M_1$ is better, but if $\rho = 1$ and $\lambda = 10$ then $M_2$ is better. Since the values of $\lambda$ and $\rho$ change with time, the Lagrangian approach in Duetting et al. (2019) cannot provide metric to compare two mechanisms.
125
+
126
+ # 3.2 A TIME-INDEPENDENT AND INTERPRETABLE LOSS FUNCTION FOR AUCTION LEARNING
127
+
128
+ Our first contribution consists in introducing a new loss function for auction learning that addresses the two first limitations of Duetting et al. (2019) mentioned in Section 3.1. We first motivate this loss in the one bidder case and then extend it to auctions with many bidders.
129
+
130
+ # 3.2.1 MECHANISMS WITH ONE BIDDER
131
+
132
+ Proposition 1. [Balcan et al. (2005), attributed to Nisan] Let $\mathcal{M}$ be an additive auction with 1 bidder and $m$ items. Let $P$ and $R$ denote the expected revenue and regret, $P = \mathbb{E}_{V\in D}[p(V)]$ and $R = \mathbb{E}_{V\in D}[r(V)]$ . There exists a mechanism $\mathcal{M}^*$ with expected revenue $P^{*} = (\sqrt{P} -\sqrt{R})^{2}$ and zero regret $R^{*} = 0$ .
133
+
134
+ A proof of this proposition can be found in Appendix C. Comparing two mechanisms is straightforward when both of them have zero-regret: the best one achieves the highest revenue. Prop. 1 allows a natural and simple extension of this criteria for non zero-regret mechanism with one bidder: we will say that $M_1$ is better than $M_2$ if and only if $M_1^*$ is better than $M_2^*$ :
135
+
136
+ $$
137
+ M _ {1} \geqslant M _ {2} \iff P ^ {*} (M _ {1}) \geqslant P ^ {*} (M _ {2}) \iff \sqrt {P _ {1}} - \sqrt {R _ {1}} \geqslant \sqrt {P _ {2}} - \sqrt {R _ {2}}
138
+ $$
139
+
140
+ Using our metric, we find that a one bidder mechanism with revenue of 1.00 and regret of 0.01 is "better" than one with revenue 1.01 and regret 0.02.
141
+
142
+ # 3.2.2 MECHANISMS WITH MULTIPLE BIDDERS
143
+
144
+ Let $M_1$ and $M_2$ be two mechanisms with $n$ bidders and $m$ objects. Let $P_i$ and $R_i$ denote their total expected revenue and regret, $P_i = \mathbb{E}_{V \in D} \left[ \sum_{j=1}^{n} p_j(V) \right]$ and $R_i = \mathbb{E}_{V \in D} \left[ \sum_{j=1}^{n} r_j(V) \right]$ . We can extend our metric derived in Section 3.2.1 to the multiple bidder by the following:
145
+
146
+ $$
147
+ M _ {1} \text {i s}" b e t t e r" t h a n} M _ {2} \iff M _ {1} \geqslant M _ {2} \iff \sqrt {P _ {1}} - \sqrt {R _ {1}} \geqslant \sqrt {P _ {2}} - \sqrt {R _ {2}}
148
+ $$
149
+
150
+ When $n = 1$ we recover the criteria from Section 3.2.1 that is backed by Prop. 1. When $n > 1$ , it is considered a major open problem whether the extension of Prop. 1 still holds. Note that a multibidder variant of Prop. 1 does hold under a different solution concept termed "Bayesian Incentive Compatible" (Rubinstein & Weinberg, 2018; Cai et al., 2019), supporting the conjecture that Prop. 1
151
+
152
+ indeed extends. $^2$ Independently of whether or not Prop. 1 holds, this reasoning implies a candidate loss function for the multi-bidder setting which we can evaluate empirically.
153
+
154
+ This way of comparing mechanisms motivates the use of loss function: $\mathcal{L}(P,R) = -(\sqrt{P} -\sqrt{R})$ instead of the Lagrangian from Section 3, and indeed this loss function works well in practice. We empirically find the loss function $\mathcal{L}_m(P,R) = -(\sqrt{P} -\sqrt{R}) + R$ further accelerates training, as it further (slightly) biases towards mechanisms with low regret. Both of these loss functions are time-independent and hyperparameter-free.
155
+
156
+ # 3.3 AMORTIZED MISREPORT OPTIMIZATION
157
+
158
+ To compute the regret $r_i^w (V)$ one has to solve the optimization problem: $\max_{\vec{v}_i' \in \mathbb{R}^m} u_i^w (\vec{v}_i, (\vec{v}_i', V_{-i})) - u_i^w (\vec{v}_i, (\vec{v}_i, V_{-i}))$ . In Duetting et al. (2019), this optimization problem is solved with an inner optimization loop for each valuation profile. In other words, computing the regret of each valuation profile is solved separately and independently, from scratch. If two valuation profiles are very close to each other, one should expect that the resulting optimization problems have close results. We leverage this to improve training efficiency.
159
+
160
+ We propose to amortize this inner loop optimization. Instead of solving all these optimization problems independently, we will instead learn one neural network $M^{\varphi}$ that tries to predict the solution of all of them. $M^{\varphi}$ takes as entry a valuation profile and maps it to the optimal misreport:
161
+
162
+ $$
163
+ M ^ {\varphi}: \left\{ \begin{array}{l l} \mathbb {R} ^ {n \times m} & \to \mathbb {R} ^ {n \times m} \\ V = [ \vec {v _ {i}} ] _ {i \in N} & \to \big [ \operatorname {a r g m a x} _ {\vec {v ^ {\prime}} \in D} u _ {i} (\vec {v _ {i}}, (\vec {v ^ {\prime}}, V _ {- i})) \big ] _ {i \in N} \end{array} \right.
164
+ $$
165
+
166
+ The loss $\mathcal{L}_r$ that $M^{\varphi}$ is trying to minimize follows naturally from that definition and is then given by: $\mathcal{L}_r(\varphi ,w) = -\mathbb{E}_{V\in D}\left[\sum_{i = 1}^{n}u_i^w (\vec{v_i},([M^\varphi (V)]_i,V_{-i}))\right]$ .
167
+
168
+ # 3.4 AUCTION LEARNING AS A TWO-PLAYER GAME
169
+
170
+ In this section, we combine the ideas from Sections 3.2 and 3.3 to obtain a new formulation for the auction learning problem as a two-player game between an Auctioneer with parameter $w$ and a Misreporter with parameter $\varphi$ . The optimal parameters for the auction learning problem $(w^{*},\varphi^{*})$ are a Nash Equilibrium for this game.
171
+
172
+ The Auctioneer is trying to design a truthful (IC) and rational (IR) auction that maximizes revenue. The Misreporter is trying to maximize the bidders' utility, for the current auction selected by Auctioneer, $w$ . This is achieved by minimizing the loss function $\mathcal{L}_r(\varphi, w)$ wrt to $\varphi$ (as discussed in Sec 3.3). The Auctioneer in turn maximizes expected revenue, for the current misreports as chosen by Misreporter. This is achieved by minimizing $\mathcal{L}_m(w, \varphi) = -\left( \sqrt{P^w} + \sqrt{R^{w,\varphi}} \right) + R^{w,\varphi}$ with respect to $w$ (as discussed in Sec 3.2). Here, $R^{w,\varphi}$ is an estimate of the total regret that auctioneer computes for the current Misreporter $\varphi$ , $R^{w,\varphi} = \frac{1}{L} \sum_{\ell=1}^{L} \sum_{i \in N} \left( u_i^w(\vec{v}_i, ([M^\varphi(V)]_i, V_{-i})) - u_i^w(\vec{v}_i, (\vec{v}_i, V_{-i})) \right)$ . This game formulation can be summarized as follows:
173
+
174
+ ![](images/553994d0a53342f2e59d0ca6327a1d24cf5a64a0ee328eccbb144c398aeb2085.jpg)
175
+
176
+ Remark 1. The game formulation (G) reminds us of Generative Adversarial Networks (Goodfellow et al., 2014). Contrary to GANs, it is not a zero-sum game.
177
+
178
+ # 4 ARCHITECTURE AND TRAINING PROCEDURE
179
+
180
+ We describe ALGnet, a feed-forward architecture solving for the game formulation (G) and then provide a training procedure. ALGnet consists in two modules that are the auctioneer's module and the misreporter's module. These components take as input a bid matrix $B = (b_{i,j}) \in \mathbb{R}^{n \times m}$ and are trained jointly. Their outputs are used to compute the regret and revenue of the auction.
181
+
182
+ Notation. We use $\mathrm{MLP}(d_{\mathrm{in}},n_l,h,d_{\mathrm{out}})$ to refer to a fully-connected neural network with input dimension $d_{\mathrm{in}}$ , output dimension $d_{\mathrm{out}}$ and $n_l$ hidden layers of width $h$ and tanh activation function. sig denotes the sigmoid activation function. Given a matrix $B = [\vec{b}_1,\dots ,\vec{b}_n]^\top \in \mathbb{R}^{n\times m}$ , we define for a fixed $i\in N$ , the matrix $B_{(i)}\coloneqq [\vec{b}_i,\vec{b}_1,\dots ,\vec{b}_{i - 1},\vec{b}_{i + 1},\dots ,\vec{b}_n]$ .
183
+
184
+ # 4.1 THE AUCTIONER'S MODULE
185
+
186
+ It is composed of an allocation network that encodes a randomized allocation $g^w \colon \mathbb{R}^{nm} \to [0,1]^{nm}$ and a payment network that encodes a payment rule $p^w \colon \mathbb{R}^{nm} \to \mathbb{R}^n$ .
187
+
188
+ Allocation network. It computes the allocation probability of item $j$ to bidder $i$ $[g^{w}(B)]_{ij}$ as $[g^{w}(B)]_{ij} = [f_{1}(B)]_{j} \cdot [f_{2}(B)]_{ij}$ where $f_{1} \colon \mathbb{R}^{n \times m} \to [0,1]^{m}$ and $f_{2} \colon \mathbb{R}^{n \times m} \to [0,1]^{m \times n}$ are functions computed by two feed-forward neural networks.
189
+
190
+ - $[f_1(B)]_j$ is the probability that object $j \in M$ is allocated and is given by $[f_1(B)]_j = \mathrm{sig}(\mathbf{MLP}(nm, n_a, h_a, n))$ .
191
+ - $[f_2(B)]_{ij}$ is the probability that item $j \in M$ is allocated to bidder $i \in N$ conditioned on object $j$ being allocated. A first MLP computes $l_j := \mathrm{MLP}(nm, n_a, h_a, m)(B_{(j)})$ for all $j \in M$ . The network then concatenates all these vectors $l_j$ into a matrix $L \in \mathbb{R}^{n \times m}$ . A softmax activation function is finally applied to $L$ to ensure feasibility i.e. for all $j \in M$ , $\sum_{i \in N} L_{ij} = 1$ .
192
+
193
+ Payment network. It computes the payment $[p^w (B)]_i$ for bidder $i$ as $[p^w (B)]_i = \tilde{p}_i\sum_{j = 1}^m B_{ij}[g^w (B)]_{ij}$ , where $\tilde{p}\colon \mathbb{R}^{n\times m}\to [0,1]^n$ . $\tilde{p}_i$ is the fraction of bidder's $i$ utility that she has to pay to the mechanism. We compute $\tilde{p}_i = \mathrm{sig}(\mathrm{MLP}(nm,n_p,h_p,1))(B_{(i)})$ . Finally, notice that by construction $[p^w (B)]_i\leqslant \sum_{j = 1}^m B_{ij}g^w (B)_{ij}$ which ensures that (IR) is respected.
194
+
195
+ # 4.2 THE MISREPORTER'S MODULE
196
+
197
+ The module consists in an $\mathrm{MLP}(nm, n_M, h_M, m)$ followed by a projection layer $\mathrm{Proj}$ that ensures that the output of the network is in the domain $D$ of the valuation. For example when the valuations are restricted to $[0,1]$ , we can take $\mathrm{Proj} = \mathrm{sig}$ , if they are non-negative numbers, we can take $\mathrm{Proj} = \mathrm{SoftPlus}$ . The optimal misreport for bidder $i$ is then given by $\mathrm{Proj} \circ \mathrm{MLP}(nm, n_M, h_M, m)(B_{(i)}) \in \mathbb{R}^m$ . Stacking these vectors gives us the misreport matrix $M^\varphi(B)$ .
198
+
199
+ # 4.3 TRAINING PROCEDURE AND OPTIMIZATION
200
+
201
+ We optimize the game (G) over the space of neural networks parameters $(w, \varphi)$ . The algorithm is easy to implement (Alg. 1).
202
+
203
+ At each time $t$ , we sample a batch of valuation profiles of size $B$ . The algorithm performs $\tau$ updates for the Misreporter's network (line 9) and one update on the Auctioneer's network (line 10). Moreover, we often reinitialize the Misreporter's network every $T_{init}$ steps in the early phases of the training ( $t \leqslant T_{limit}$ ). This step is not necessary but we found empirically that it speeds up training.
204
+
205
+ # Algorithm 1 ALGnet training
206
+
207
+ 1: Input: number of agents, number of objects.
208
+ 2: Parameter: $\gamma > 0$ ; $B, T, T_{init}, T_{limit}, \tau \in \mathbb{N}$ .
209
+ 3: Initialize misreport's and auctioneer's nets.
210
+ 4: for $t = 1, \dots, T$ do
211
+
212
+ 5: if $t \equiv 0 \mod T_{init}$ and $t < T_{Limit}$ then:
213
+ 6: Reinitialize Misreport Network
214
+ 7: Sample valuation batch $S$ of size $B$ .
215
+ 8: for $\bar{s} = 1,\dots ,\tau$ do
216
+ 9: $\varphi^{s + 1}\gets \varphi^s -\gamma \nabla_\varphi \mathcal{L}_r(\varphi^s,w^t)(S).$
217
+ 10: $w^{t + 1}\gets w^t -\gamma \nabla_w\mathcal{L}_m(w^t,\varphi)(S).$
218
+
219
+ # 5 EXPERIMENTAL RESULTS
220
+
221
+ We show that ALGnet can recover near-optimal auctions for settings where the optimal solution is known and that it can find new auctions for settings where analytical solutions are not known. Since RegretNet is already capable of discovering near optimal auctions, one cannot expect ALGnet to achieve significantly higher optimal revenue than RegretNet. The results obtained are competitive or better than the ones obtained in Duetting et al. (2019) while requiring much less hyperparameters (Section 3). We also evaluate ALGnet in online auctions and compare it to RegretNet.
222
+
223
+ For each experiment, we compute the total revenue $rev := \mathbb{E}_{V \sim D}[\sum_{i \in N} p_i^w(V)]$ and average regret $rgt := \frac{1}{n} \mathbb{E}_{V \sim D}[\sum_{i \in N} r_i^w(V)]$ on a test set of 10,000 valuation profiles. We run each experiment 5 times with different random seeds and report the average and standard deviation of these runs. In our comparisons we make sure that ALGnet and RegretNet have similar sizes for fairness (Appendix D).
224
+
225
+ # 5.1 AUCTIONS WITH KNOWN AND UNKNOWN OPTIMA
226
+
227
+ Known settings. We show that ALGnet is capable of recovering near optimal auction in different well-studied auctions that have an analytical solution. These are one bidder and two items auctions where the valuations of the two items $v_{1}$ and $v_{2}$ are independent. We consider the following settings. (A): $v_{1}$ and $v_{2}$ are i.i.d. from $\mathcal{U}[0,1]$ , (B): $v_{1} \sim \mathcal{U}[4,16]$ and $v_{2} \sim \mathcal{U}[4,7]$ , (C): $v_{1}$ has density $f_{1}(x) = 5 / (1 + x)^{6}$ and $v_{2}$ has density $f_{2}(y) = 6 / (1 + y)^{7}$ .
228
+
229
+ (A) is the celebrated Manelli-Vincent auction (Manelli & Vincent, 2006); (B) is a non-i.i.d. auction and (C) is a non-i.i.d. heavy-tail auction and both of them are studied in Daskalakis et al. (2017). We compare our results to the theoretical optimal auction (Table 2). (Duetting et al. (2019) does not evaluate RegretNet on settings (B) & (C)). During the training process, reg decreases to 0 while $rev$ and $P^*$ converge to the optimal revenue. For (A), we also plot $rev$ , $rgt$ and $P^*$ as function of the number of epochs and we compare it to RegretNet (Fig. 1).
230
+
231
+ Contrary to ALGnet, we observe that RegretNet overestimates the revenue in the early stages of training at the expense of a higher regret. As a consequence, ALGnet learns the optimal auction faster than RegretNet while being schedule-free and requiring less hyperparameters.
232
+
233
+ Table 2: Revenue & regret of ALGnet for settings (A)-(C).
234
+
235
+ <table><tr><td></td><td colspan="2">Optimal</td><td colspan="2">ALGnet (Ours)</td></tr><tr><td></td><td>rev</td><td>rgt</td><td>rev</td><td>rgt (×10-3)</td></tr><tr><td>(A)</td><td>0.550</td><td>0</td><td>0.555 (±0.0019)</td><td>0.55 (±0.14)</td></tr><tr><td>(B)</td><td>9.781</td><td>0</td><td>9.737 (±0.0443)</td><td>0.75 (±0.17)</td></tr><tr><td>(C)</td><td>0.1706</td><td>0</td><td>0.1712 (±0.0012)</td><td>0.14 (±0.07)</td></tr></table>
236
+
237
+ ![](images/0424ae0b7922bbdeb10334962f5f1bcac8a56eefc9db08bd83626d025791fd7f.jpg)
238
+ (a)
239
+
240
+ ![](images/129d2c69a51344d1808bb312b3447070b115e404898596102826f84496a45cc3.jpg)
241
+ (b)
242
+
243
+ ![](images/f536d49b377f80b9324768d52fa35ddf6ebe7a1ee11c73d192c9ac4abef3baad.jpg)
244
+ (c)
245
+
246
+ ![](images/fc672cd99e89019f268635a4bbb22c922e27d8c42142ef314215983754336c25.jpg)
247
+ (d)
248
+
249
+ ![](images/3e67cb26ff84bf451b40dcb41f9299f8af8632a8d015060d68c72615575fee3c.jpg)
250
+ (e)
251
+ Figure 1: (a-b-c) compares the evolution of the revenue, regret and $P^{*}$ as a function of the number of epoch for RegretNet and ALGnet for setting (A). (d-e-f) plots the revenue, regret and $P^{*}$ as a function of time for ALGnet and (offline & online) RegretNet for an online auction (Section 5.2).
252
+
253
+ ![](images/027651006e4d3e882eb333d292e83f8d1d3a0a8d468f70ee42b55ffe3cebe146.jpg)
254
+ (f)
255
+
256
+ Unknown and large-scale auctions. We now consider settings where the optimal auction is unknown. We look at $n$ -bidder $m$ -item additive settings where the valuations are sampled i.i.d from $\mathcal{U}[0,1]$ which we will denote by $n \times m$ . In addition to "reasonable"-scale auctions $(1 \times 10$ and $2 \times 2$ ), we investigate large-scale auctions $(3 \times 10$ and $5 \times 10$ ) that are much more complex. Only deep learning methods are able to solve them efficiently. Table 3 shows that ALGnet is able to discover auctions that yield comparable or better results than RegretNet.
257
+
258
+ # 5.2 ONLINE AUCTIONS
259
+
260
+ ALGnet is an online algorithm with a time-independent loss function. We would expect it to perform well in settings where the underlying distribution of the valuations changes over time. We consider a one bidder and two items additive auction with valuations $v_{1}$ and $v_{2}$ sampled i.i.d from $\mathcal{U}[0,1 + t]$ where $t$ in increased from 0 to 1 at a steady rate. The optimal auction at time $t$ has revenue
261
+
262
+ Table 3: Comparison of RegretNet and ALGnet. The values reported for RegretNet are found in Duetting et al. (2019), the numerical values for rgt and standard deviations are not available.
263
+
264
+ <table><tr><td rowspan="2">Setting</td><td colspan="2">RegretNet</td><td colspan="2">ALGnet (Ours)</td></tr><tr><td>rev</td><td>rgt</td><td>rev</td><td>rgt</td></tr><tr><td>1 × 2</td><td>0.554</td><td>&lt; 1.0 · 10-3</td><td>0.555 (±0.0019)</td><td>0.55 · 10-3(±0.14 · 10-3)</td></tr><tr><td>1 × 10</td><td>3.461</td><td>&lt; 3.0 · 10-3</td><td>3.487 (±0.0135)</td><td>1.65 · 10-3(±0.57 · 10-3)</td></tr><tr><td>2 × 2</td><td>0.878</td><td>&lt; 1.0 · 10-3</td><td>0.879 (±0.0024)</td><td>0.58 · 10-3(±0.23 · 10-3)</td></tr><tr><td>3 × 10</td><td>5.541</td><td>&lt; 2.0 · 10-3</td><td>5.562 (±0.0308)</td><td>1.93 · 10-3(±0.33 · 10-3)</td></tr><tr><td>5 × 10</td><td>6.778</td><td>&lt; 5.0 · 10-3</td><td>6.781 (±0.0504)</td><td>3.85 · 10-3(±0.43 · 10-3)</td></tr></table>
265
+
266
+ $0.55 \times (1 + t)$ . We use ALGnet and two versions of RegretNet, the original offline version (Appendix A) and our own online version (Appendix B) and plot $rev(t)$ , $rgt(t)$ and $P^{*}(t)$ (Fig. 1). The offline version learns from a fixed dataset of valuations sampled at $t = 0$ (i.e. with $V \sim \mathcal{U}[0,1]^{nm}$ ) while the online versions (as ALGnet) learn from a stream of data at each time $t$ . Overall, ALGnet performs better than the other methods. It learns an optimal auction faster at the initial (especially compared to RegretNet Online) and keep adapting to the distributional shift (contrary to vanilla RegretNet).
267
+
268
+ # 6 CONCLUSION
269
+
270
+ We identified two inefficiencies in previous approaches to deep auction design and propose solutions, building upon recent trends and results from machine learning (amortization) and theoretical auction design (stationary Lagrangian). This resulted in a novel formulation of auction learning as a two-player game between an Auctioneer and a Misreporter and a new architecture ALGnet. ALGnet requires significantly fewer hyperparameters than previous Lagrangian approaches. We demonstrated the effectiveness of ALGnet on a variety of examples by comparing it to the theoretical optimal auction when it is known, and to RegretNet when the optimal solution is not known.
271
+
272
+ Acknowledgements. Jad Rahme would like to thank Ryan P. Adams for helpful discussions and feedback on the manuscript. Samy Jelassi thanks Arthur Mensch for fruitful discussions on the subject and feedback on the manuscript. The work of Jad Rahme was funded by a Princeton SEAS Innovation Grant. The work of Samy Jelassi is supported by the NSF CAREER CIF 1845360. The work of S. Matthew Weinberg was supported by NSF CCF-1717899.
273
+
274
+ # REFERENCES
275
+
276
+ Maria-Florina Balcan, Avrim Blum, Jason D. Hartline, and Yishay Mansour. Mechanism design via machine learning. In 46th Annual IEEE Symposium on Foundations of Computer Science (FOCS 2005), 23-25 October 2005, Pittsburgh, PA, USA, Proceedings, pp. 605-614. IEEE Computer Society, 2005. doi: 10.1109/SFCS.2005.50. URL https://doi.org/10.1109/SFCS.2005.50.
277
+ Maria-Florina F Balcan, Tuomas Sandholm, and Ellen Vitercik. Sample complexity of automated mechanism design. In Advances in Neural Information Processing Systems, pp. 2083-2091, 2016.
278
+ Xiaohui Bei and Zhiyi Huang. Bayesian Incentive Compatibility via Fractional Assignments. In the Twenty-Second Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), 2011.
279
+ Sebastien Bubeck, Nikhil R Devanur, Zhiyi Huang, and Rad Niazadeh. Online auctions and multiscale online learning. In Proceedings of the 2017 ACM Conference on Economics and Computation, pp. 497-514, 2017.
280
+ Yang Cai, Nikhil Devanur, and S. Matthew Weinberg. A duality based unified approach to bayesian mechanism design. In Proceedings of the 48th ACM Conference on Theory of Computation(STOC), 2016.
281
+ Yang Cai, Argyris Oikonomou, Grigoris Velegkas, and Mingfei Zhao. An efficient $\varepsilon$ -bic to BIC transformation and its application to black-box reduction in revenue maximization. CoRR, abs/1911.10172, 2019. URL http://arxiv.org/abs/1911.10172.
282
+
283
+ Xi Chen, Ilias Diakonikolas, Dimitris Paparas, Xiaorui Sun, and Mihalis Yannakakis. The complexity of optimal multidimensional pricing. In Proceedings of the Twenty-Fifth Annual ACM-SIAM Symposium on Discrete Algorithms, SODA 2014, Portland, Oregon, USA, January 5-7, 2014, pp. 1319-1328, 2014. doi: 10.1137/1.9781611973402.97. URL http://dx.doi.org/10.1137/1.9781611973402.97.
284
+ Xi Chen, Ilias Diakonikolas, Anthi Orfanou, Dimitris Paparas, Xiaorui Sun, and Mihalis Yannakakis. On the complexity of optimal lottery pricing and randomized mechanisms. In 2015 IEEE 56th Annual Symposium on Foundations of Computer Science, pp. 1464-1479. IEEE, 2015.
285
+ Xi Chen, George Matikas, Dimitris Paparas, and Mihalis Yannakakis. On the complexity of simple and optimal deterministic mechanisms for an additive buyer. In Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, pp. 2036-2049. SIAM, 2018.
286
+ Constantinos Daskalakis and Seth Matthew Weinberg. Symmetries and optimal multi-dimensional mechanism design. In Proceedings of the 13th ACM Conference on Electronic Commerce, pp. 370-387, 2012.
287
+ Constantinos Daskalakis, Alan Deckelbaum, and Christos Tzamos. The complexity of optimal mechanism design. In Proceedings of the twenty-fifth annual ACM-SIAM symposium on Discrete algorithms, pp. 1302-1318. SIAM, 2014.
288
+ Constantinos Daskalakis, Alan Deckelbaum, and Christos Tzamos. Strong duality for a multiple-good monopolist. *Econometrica*, 85(3):735-767, 2017.
289
+ Paul Duetting, Zhe Feng, Harikrishna Narasimhan, David Parkes, and Sai Srivatsa Ravindranath. Optimal auctions through deep learning. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pp. 1706-1715, Long Beach, California, USA, 09-15 Jun 2019. PMLR. URL http://proceedings.mlr.press/v97/duetting19a.html.
290
+ Shaddin Dughmi, Li Han, and Noam Nisan. Sampling and representation complexity of revenue maximization. In International Conference on Web and Internet Economics, pp. 277-291. Springer, 2014.
291
+ Shaddin Dughmi, Jason D. Hartline, Robert Kleinberg, and Rad Niazadeh. Bernoulli factories and black-box reductions in mechanism design. In Proceedings of the 49th Annual ACM SIGACT Symposium on Theory of Computing, STOC 2017, Montreal, QC, Canada, June 19-23, 2017, pp. 158-169, 2017. doi: 10.1145/3055399.3055492. URL http://doi.acm.org/10.1145/3055399.3055492.
292
+ Paul Dütting, Felix Fischer, Pichayut Jirapinyo, John K Lai, Benjamin Lubin, and David C Parkes. Payment rules through discriminant-based classifiers. ACM Transactions on Economics and Computation (TEAC), 3(1):1-41, 2015.
293
+ Zhe Feng, Harikrishna Narasimhan, and David C Parkes. Deep learning for revenue-optimal auctions with budgets. In Proceedings of the 17th International Conference on Autonomous Agents and Multiagent Systems, pp. 354-362. International Foundation for Autonomous Agents and Multiagent Systems, 2018.
294
+ Noah Golowich, Harikrishna Narasimhan, and David C. Parkes. Deep learning for multi-facility location mechanism design. In Proceedings of the 17th International Joint Conference on Artificial Intelligence (IJCAI 2018), pp. 261-267, 2018. URL https://econcs.seas.harvard.edu/files/econcs/files/golowich_ijcai18.pdf.
295
+ Yannai A. Gonczarowski and Noam Nisan. Efficient empirical revenue maximization in single-parameter auction environments. In Proceedings of the 49th Annual ACM SIGACT Symposium on Theory of Computing, STOC 2017, Montreal, QC, Canada, June 19-23, 2017, pp. 856-868, 2017. doi: 10.1145/3055399.3055427. URL http://doi.acm.org/10.1145/3055399.3055427.
296
+ Yannai A. Gonczarowski and S. Matthew Weinberg. The sample complexity of up-to- $\varepsilon$ multidimensional revenue maximization. In 59th IEEE Annual Symposium on Foundations of Computer Science, FOCS, 2018.
297
+
298
+ Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, pp. 2672-2680, 2014.
299
+ Chenghao Guo, Zhiyi Huang, and Xinzhi Zhang. Settling the sample complexity of single-parameter revenue maximization. In Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing, STOC 2019, Phoenix, AZ, USA, June 23-26, 2019., pp. 662-673, 2019. doi: 10.1145/3313276.3316325. URL https://doi.org/10.1145/3313276.3316325.
300
+ Sergiu Hart and Noam Nisan. Approximate Revenue Maximization with Multiple Items. In the 13th ACM Conference on Electronic Commerce (EC), 2012.
301
+ Sergiu Hart and Philip J. Reny. Maximizing Revenue with Multiple Goods: Nonmonotonicity and Other Observations. Theoretical Economics, 10(3):893-922, 2015.
302
+ Jason D. Hartline and Brendan Lucier. Bayesian Algorithmic Mechanism Design. In the 42nd ACM Symposium on Theory of Computing (STOC), 2010.
303
+ Jason D. Hartline and Samuel Taggart. Sample complexity for non-truthful mechanisms. In Proceedings of the 2019 ACM Conference on Economics and Computation, EC 2019, Phoenix, AZ, USA, June 24-28, 2019., pp. 399-416, 2019. doi: 10.1145/3328526.3329632. URL https://doi.org/10.1145/3328526.3329632.
304
+ Jason D. Hartline, Robert Kleinberg, and Azarakhsh Malekian. Bayesian Incentive Compatibility via Matchings. In the Twenty-Second Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), 2011.
305
+ Zhiyi Huang, Yishay Mansour, and Tim Roughgarden. Making the most of your samples. SIAM Journal on Computing, 47(3):651-674, 2018.
306
+ Sébastien Lahaie. A kernel-based iterative combinatorial auction. In Twenty-Fifth AAAI Conference on Artificial Intelligence, 2011.
307
+ Xinye Li and Andrew Chi-Chih Yao. On revenue maximization for selling multiple independently distributed items. Proceedings of the National Academy of Sciences, 110(28):11232-11237, 2013.
308
+ Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.
309
+ Alejandro Manelli and Daniel Vincent. Bundling as an optimal selling mechanism for a multiple-good monopolist. Journal of Economic Theory, 127(1):1-35, 2006.
310
+ Jamie Morgenstern and Tim Roughgarden. Learning simple auctions. In Conference on Learning Theory, pp. 1298-1318, 2016.
311
+ Jamie H Morgenstern and Tim Roughgarden. On the pseudo-dimension of nearly optimal auctions. In Advances in Neural Information Processing Systems, pp. 136-144, 2015.
312
+ Roger B Myerson. Optimal auction design. Mathematics of operations research, 6(1):58-73, 1981.
313
+ Harikrishna Narasimhan and David C Parkes. A general statistical framework for designing strategy-proof assignment mechanisms. In UAI'16 Proceedings of the Thirty-Second Conference on Uncertainty in Artificial Intelligence, 2016.
314
+ Jad Rahme, Samy Jelassi, Joan Bruna, and S. Matthew Weinberg. A permutation-equivariant neural network architecture for auction design. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, 2021.
315
+ Tim Roughgarden and Okke Schrijvers. Ironing in the dark. In Proceedings of the 2016 ACM Conference on Economics and Computation, EC '16, Maastricht, The Netherlands, July 24-28, 2016, pp. 1-18, 2016. doi: 10.1145/2940716.2940723. URL http://doi.acm.org/10.1145/2940716.2940723.
316
+
317
+ Aviad Rubinstein and S Matthew Weinberg. Simple mechanisms for a subadditive buyer and applications to revenue monotonicity. ACM Transactions on Economics and Computation (TEAC), 6(3-4):1-25, 2018.
318
+ Weiran Shen, Pingzhong Tang, and Song Zuo. Automated mechanism design via neural networks. In Proceedings of the 18th International Conference on Autonomous Agents and Multiagent Systems, pp. 215-223. International Foundation for Autonomous Agents and Multiagent Systems, 2019.
319
+ Vasilis Syrgkanis. A sample complexity measure with applications to learning optimal auctions. In Advances in Neural Information Processing Systems, pp. 5352-5359, 2017.
320
+ Andrea Tacchetti, DJ Strouse, Marta Garnelo, Thore Graepel, and Yoram Bachrach. A neural architecture for designing truthful and efficient auctions. arXiv preprint arXiv:1907.05181, 2019.
321
+ John Thanassoulis. Haggling over substitutes. Journal of Economic Theory, 117:217-245, 2004.
322
+ William Vickrey. Counterspeculation, auctions, and competitive sealed tenders. The Journal of finance, 16(1):8-37, 1961.
323
+
324
+ # A TRAINING ALGORITHM FOR REGRET NET
325
+
326
+ We present the training algorithm for RegretNet, more details can be found in Duetting et al. (2019).
327
+
328
+ Algorithm 2 Training Algorithm.
329
+ 1: Input: Minibatches $S_{1},\ldots ,S_{T}$ of size $B$
330
+ 2: Parameters: $\gamma >0,\eta >0,c > 0,R\in \mathbb{N},T\in \mathbb{N},T_{\rho}\in \mathbb{N},T_{\lambda}\in \mathbb{N}.$
331
+ 3: Initialize Parameters: $\rho^0\in \mathbb{R},w^0\in \mathbb{R}^d,\lambda^0\in \mathbb{R}^n,$
332
+ 4: Initialize Misreports: $v_{i}^{\prime (\ell)}\in \mathcal{D}_{i},\forall \ell \in [B],i\in N.$
333
+ 5:
334
+ 6: for $t = 0,\dots ,T$ do
335
+ 7: Receive minibatch $S_{t} = \{V^{(1)},\dots ,V^{(B)}\}$
336
+ 8: for $r = 0,\dots ,R$ do $\forall \ell \in [B],i\in n:$ $v_{i}^{\prime (\ell)}\gets v_{i}^{\prime (\ell)} + \gamma \nabla_{v_{i}^{\prime}}u_{i}^{w^{t}}(v_{i}^{(\ell)};(v_{i}^{\prime (\ell)},V_{-i}^{(\ell)}))$
337
+ 10:
338
+ 11: Get Lagrangian gradient and update $w^{t}$ .
339
+ 12: $w^{t + 1}\gets w^t -\eta \nabla_w\mathcal{L}(w^t;\lambda^t;\rho^t).$
340
+ 13:
341
+ 14: Update $\rho$ once in $T_{\rho}$ iterations:
342
+ 15: if $t$ is a multiple of $T_{\rho}$ then
343
+ 16: $\rho^{t + 1}\leftarrow \rho^t +c$
344
+ 17: else
345
+ 18: $\rho^{t + 1}\leftarrow \rho^t$
346
+ 19:
347
+ 20: Update Lagrange multipliers once in $T_{\lambda}$ iterations:
348
+ 21: if $t$ is a multiple of $T_{\lambda}$ then
349
+ 22: $\lambda_i^{t + 1}\gets \lambda_i^t +\rho^t\hat{r}_i(w^t),\forall i\in N$
350
+ 23: else
351
+ 24: $\lambda^{t + 1}\gets \lambda^t$
352
+
353
+ # B TRAINING ALGORITHM FOR ONLINE REGRET NET
354
+
355
+ We present an online version of the training algorithm for RegretNet, more details can be found in Duetting et al. (2019). This version is mentioned in the original paper but the algorithm is not explicitly written there. The following code is our own adaptation of the original RegretNet algorithm for online settings.
356
+
357
+ Algorithm 3 Training Algorithm.
358
+ 1: Input: Valuation's Distribution $\mathcal{D}$
359
+ 2: Parameters: $\gamma > 0$ , $\eta > 0$ , $c > 0$ , $R \in \mathbb{N}$ , $T \in \mathbb{N}$ , $T_{\rho} \in \mathbb{N}$ , $T_{\lambda} \in \mathbb{N}$ , $B \in \mathbb{N}$
360
+ 3: Initialize Parameters: $\rho^0 \in \mathbb{R}$ , $w^0 \in \mathbb{R}^d$ , $\lambda^0 \in \mathbb{R}^n$
361
+ 4: for $t = 0, \ldots, T$ do
362
+ 5: Sample minibatch $S_t = \{V^{(1)}, \ldots, V^{(B)}\}$ from distribution $\mathcal{D}$ .
363
+ 6: Initialize Misreports: $v_i^{(\ell)} \in \mathcal{D}_i$ , $\forall \ell \in [B]$ , $i \in N$ .
364
+ 7:
365
+ 8: for $r = 0, \ldots, R$ do
366
+ 9: $\forall \ell \in [B]$ , $i \in n : v_i^{(\ell)} \gets v_i^{(\ell)} + \gamma \nabla_{v_i'} u_i^{w^t}(v_i^{(\ell)}; (v_i'^{(\ell)}, V_{-i}^{(\ell)}))$
367
+ 10:
368
+ 11: Get Lagrangian gradient and update $w^t$ : $w^{t+1} \gets w^t - \eta \nabla_w \mathcal{L}(w^t; \lambda^t; \rho^t)$ .
369
+ 12: Update $\rho$ once in $T_{\rho}$ iterations:
370
+ 13: if $t$ is a multiple of $T_{\rho}$ then
371
+ 14: Update $\rho$ once in $T_{\rho}$ iterations:
372
+ 15: if $t$ is a multiple of $T_{\rho}$ then
373
+ 16: else
374
+ 17: Update Lagrange multipliers once in $T_{\lambda}$ iterations:
375
+ 18: else
376
+ 19:
377
+ 20: Update Lagrange multipliers once in $T_{\lambda}$ iterations:
378
+ 21: if $t$ is a multiple of $T_{\lambda}$ then
379
+ 22: $\lambda_i^{t+1} \gets \lambda_i^t + \rho^t \widehat{r}_i(w^t)$ , $\forall i \in N$
380
+ 23: else
381
+ 24:
382
+
383
+ # C PROOF OF PROP. 1
384
+
385
+ Lemma 1. Let $M$ be a one bidder $m$ item mechanism with expected revenue $P$ and expected regret $R$ , then $\forall \varepsilon > 0$ , there exists a mechanism $M'$ with expected revenue $P' = (1 - \varepsilon)P - \frac{1 - \varepsilon}{\varepsilon}R$ and zero expected regret, $R' = 0$ .
386
+
387
+ Proof. For every valuation vector $v \in D$ , let $g(v)$ and $p(v)$ denote the allocation vector and price that $M$ assigns to $v$ .
388
+
389
+ We now consider the mechanism $M^{\prime}$ that does the following:
390
+
391
+ $g^{\prime}(v) = g(v^{\prime})$
392
+ $p^{\prime}(v) = (1 - \varepsilon)p(v^{\prime})$
393
+
394
+ Where $v'$ is given by: $v' = \operatorname{argmax}_{\tilde{v} \in D} \langle v, g(\tilde{v}) \rangle - (1 - \varepsilon)p(\tilde{v})$ . By construction, the mechanism $M'$ has zero regret, all we have to do now is bound its revenue. If we denote by $R(v)$ the regret of the profile $v$ in the mechanism $M$ , $R(v) = \max_{\tilde{v} \in D} \langle v, g(\tilde{v}) - g(v) \rangle - (p(\tilde{v}) - p(v))$ we have.
395
+
396
+ $$
397
+ \begin{array}{l} \langle v, g \left(v ^ {\prime}\right) \rangle - p \left(v ^ {\prime}\right) = \langle v, g (v) \rangle - p (v) + \langle v, g \left(v ^ {\prime}\right) - g (v) \rangle - \left(p \left(v ^ {\prime}\right) - p (v)\right) \\ \leqslant \langle v, g (v) \rangle - p (v) + R (v) \\ \end{array}
398
+ $$
399
+
400
+ Which we will write as:
401
+
402
+ $$
403
+ \langle v, g (v) \rangle - p (v) \geqslant \langle v, g \left(v ^ {\prime}\right) \rangle - p \left(v ^ {\prime}\right) - R (v)
404
+ $$
405
+
406
+ Second, we have by construction:
407
+
408
+ $$
409
+ \langle v, g \left(v ^ {\prime}\right) \rangle - (1 - \varepsilon) p \left(v ^ {\prime}\right) \geqslant \langle v, g (v) \rangle - (1 - \varepsilon) p (v)
410
+ $$
411
+
412
+ By summing these two relations we find :
413
+
414
+ $$
415
+ p \left(v ^ {\prime}\right) \geqslant p (v) - \frac {R (v)}{\varepsilon}
416
+ $$
417
+
418
+ Finally we get that:
419
+
420
+ $$
421
+ p ^ {\prime} (v) \geqslant (1 - \varepsilon) p (v) - \frac {1 - \varepsilon}{\varepsilon} R (v)
422
+ $$
423
+
424
+ Taking the expectation we get:
425
+
426
+ $$
427
+ P ^ {\prime} \geqslant (1 - \varepsilon) P - \frac {1 - \varepsilon}{\varepsilon} R
428
+ $$
429
+
430
+ Proposition 1. Let $\mathcal{M}$ be an additive auction with 1 bidders and $m$ items. Let $P$ and $R$ denote the total expected revenue and regret, $P = \mathbb{E}_{V\in D}[p(V)]$ and $R = \mathbb{E}_{V\in D}[r(V)]$ . There exists a mechanism $\mathcal{M}^*$ with expected revenue $P^{*} = \left(\sqrt{P} -\sqrt{R}\right)^{2}$ and zero regret $R^{*} = 0$ .
431
+
432
+ Proof. From Lemma 1 we know that $\forall \varepsilon > 0$ , we can find a zero regret mechanism with revenue $P' = (1 - \varepsilon)P - \frac{1 - \varepsilon}{\varepsilon}R$ . By optimizing over $\varepsilon$ we find that the best mechanism is the one corresponding to $\varepsilon = \sqrt{\frac{R}{P}}$ . The resulting optimal revenue is given by:
433
+
434
+ $$
435
+ P ^ {*} = (1 - \sqrt {\frac {R}{P}}) P - \frac {\sqrt {\frac {R}{P}}}{\sqrt {\frac {R}{P}}} R = P - 2 \sqrt {P R} + R = \left(\sqrt {P} - \sqrt {R}\right) ^ {2}
436
+ $$
437
+
438
+
439
+
440
+ # D IMPLEMENTATION AND SETUP
441
+
442
+ We implemented ALGnet in PyTorch and all our experiments can be run on Google's Colab platform (with GPU). In Alg. 1, we used batches of valuation profiles of size $B \in \{500\}$ and set $T \in \{160000, 240000\}$ , $T_{limit} \in \{40000, 60000\}$ , $T_{init} \in \{800, 1600\}$ and $\tau \in \{100\}$ .
443
+
444
+ We used the AdamW optimizer (Loshchilov & Hutter, 2017) to train the Auctioneer's and the Misreporter's networks with learning rate $\gamma \in \{0.0005, 0.001\}$ . Typical values for the architecture's parameters are $n_a = n_p = n_m \in [3, 7]$ and $h_p = h_n = h_m \in \{50, 100, 200\}$ . These networks are similar in size to the ones used for RegretNet in Duetting et al. (2019).
445
+
446
+ For each experiment, we compute the total revenue $rev \coloneqq \mathbb{E}_{V \sim D}[\sum_{i \in N} p_i^w(V)]$ and average regret $rgt \coloneqq 1 / n \mathbb{E}_{V \sim D}[\sum_{i \in N} r_i^w(V)]$ using a test set of 10,000 valuation profiles. We run each experiment 5 times with different random seeds and report the average and standard deviation of these runs.
auctionlearningasatwoplayergame/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bf216e00249350b5484b35514a1c01632fff47621fb1d35d21cdffa28bb8c0b
3
+ size 323682
auctionlearningasatwoplayergame/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e195497ee438399e4f7cb3f5627c2e93a77dd00c8630c2b45d2920509d54dfc9
3
+ size 729036
autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/d6d44d12-39ed-4868-9a0f-b1c48452e21c_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eb12319fe9d13006fec7b69995f20a6c4370338143ead0b9bfc2b1ba5cb02e5
3
+ size 138980
autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/d6d44d12-39ed-4868-9a0f-b1c48452e21c_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b1d0cb3ccb96f46a391e98699fb0b04d3022a83b1ecfa9493972aba1985a5a5
3
+ size 167318
autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/d6d44d12-39ed-4868-9a0f-b1c48452e21c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cf5361655e7979bf36cd5eb8aa21e530a404b009ac519d6077081f26dbecb3a
3
+ size 1033829
autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/full.md ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AUTOLRS: AUTOMATIC LEARNING-RATE SCHEDULE BY BAYESIAN OPTIMIZATION ON THE FLY
2
+
3
+ Yuchen Jin, Tianyi Zhou, Liangyu Zhao
4
+
5
+ University of Washington
6
+
7
+ {yuchenj, tianyizh, liangyu}@cs.washington.edu
8
+
9
+ Yibo Zhu, Chuanxiong Guo
10
+
11
+ ByteDance Inc.
12
+
13
+ {zhuyibo, guochuanxiong}@bytedance.com
14
+
15
+ Marco Canini
16
+
17
+ KAUST
18
+
19
+ marco@kaust.edu.sa
20
+
21
+ Arvind Krishnamurthy
22
+
23
+ University of Washington
24
+
25
+ arvind@cs.washington.edu
26
+
27
+ # ABSTRACT
28
+
29
+ The learning rate (LR) schedule is one of the most important hyper-parameters needing careful tuning in training DNNs. However, it is also one of the least automated parts of machine learning systems and usually costs significant manual effort and computing. Though there are pre-defined LR schedules and optimizers with adaptive LR, they introduce new hyperparameters that need to be tuned separately for different tasks/datasets. In this paper, we consider the question: Can we automatically tune the LR over the course of training without human involvement? We propose an efficient method, AutoLRS, which automatically optimizes the LR for each training stage by modeling training dynamics. AutoLRS aims to find an LR applied to every $\tau$ steps that minimizes the resulted validation loss. We solve this black-box optimization on the fly by Bayesian optimization (BO). However, collecting training instances for BO requires a system to evaluate each LR queried by BO's acquisition function for $\tau$ steps, which is prohibitively expensive in practice. Instead, we apply each candidate LR for only $\tau' \ll \tau$ steps and train an exponential model to predict the validation loss after $\tau$ steps. This mutual-training process between BO and the loss-prediction model allows us to limit the training steps invested in the BO search. We demonstrate the advantages and the generality of AutoLRS through extensive experiments of training DNNs for tasks from diverse domains using different optimizers. The LR schedules auto-generated by AutoLRS lead to a speedup of $1.22 \times$ , $1.43 \times$ , and $1.5 \times$ when training ResNet-50, Transformer, and BERT, respectively, compared to the LR schedules in their original papers, and an average speedup of $1.31 \times$ over state-of-the-art heavily-tuned LR schedules.
30
+
31
+ # 1 INTRODUCTION
32
+
33
+ In the regime of deep learning, the success of training largely depends on the choice of the learning rate (LR) schedule, since most optimizers will have difficulty traversing a non-smooth and non-convex loss landscape with multiple local minimums and possibly saddle points (Kawaguchi, 2016; Jin et al., 2017; Goodfellow et al., 2016; Li et al., 2018a). To achieve stable and fast convergence towards a solution with good generalization performance, one has to tune the LR schedules carefully for different tasks (Nar & Sastry, 2018; Jastrzebski et al., 2017). This tuning is usually non-trivial and requires many trial-and-error iterations that are computationally expensive. Moreover, the randomness of the widely-used mini-batch stochastic gradient descent (SGD) may introduce more uncertainty and difficulty in the tuning process. For the same reasons, it is also hard to directly formulate the search of the LR schedule as a well-posed optimization problem and address it through standard optimization.
34
+
35
+ The broadly-adopted strategy is to either pick one from a family of pre-defined LR schedules or apply an optimizer that has a built-in mechanism changing the LR adaptively. However, we have a limited number of choices for pre-defined LR schedules, most of which are simple functions such as exponent or cosine and thus cannot perfectly align with the non-smooth loss landscape. The latter set of adaptive optimizers, e.g., Adam (Kingma & Ba, 2015) and Adadelta (Zeiler, 2012), are extended from convex optimization and rely on strong assumptions to make the convergence properties hold. Moreover, the methods in both categories introduce new hyper-parameters that have to be tuned separately for different tasks or datasets, requiring significant human involvement.
36
+
37
+ In this paper, we study the question: can we automatically tune the LR over the course of training without human involvement? At the beginning of every $\tau$ steps (i.e., a "stage" in our method), we seek to identify an LR that optimizes the validation loss (i.e., an empirical estimate of the generalization error) at the end of the stage. To do so, we employ Bayesian optimization (BO) that treats the validation loss as a black-box function of LR. BO simultaneously updates a posterior estimation of the black-box function and searches for the best LR with respect to the posterior. This approach is, however, computationally expensive since estimating the posterior needs many (input, output) instances of the function, and acquiring each instance costs $\tau$ steps of training. We, therefore, develop a simple yet efficient approximation: for every LR that BO decides to evaluate, we train the model by using the LR for only $\tau' \ll \tau$ steps and use the validation loss over the $\tau'$ steps to train a time-series forecasting model that provides a prediction of the validation loss after $\tau$ steps. As we will show later, an exponential model suffices to produce accurate predictions when using a small $\tau' = \tau / 10$ . Then, AutoLRS can allow BO to explore ten different LRs in each stage and still bound the total running time to approximately twice the training cost associated with the generated schedule, i.e., the time spent to find the stage-specific LRs is roughly equal to the time spent training the model with the identified LRs.
38
+
39
+ AutoLRS does not depend on a pre-defined LR schedule, dataset, or a specified task and is compatible with almost all optimizers. Hence, it can be generally deployed across a broad range of ML tasks without much human involvement or expensive tuning over choices of LR schedules and their hyperparameters. Moreover, since it directly minimizes the validation loss, it does not only accelerate the convergence but also improves the generalization compared to just minimizing the training loss. Furthermore, AutoLRS only needs to update two extremely light-weight models, i.e., the BO posterior and the exponential forecasting model, and it is efficient in exploring the loss landscape. Hence, it does not result in notable extra costs in either memory or computation. Note that AutoLRS searches for better LRs based on the training dynamics, which can be seen as a form of self-supervision. The interaction between BO and the forecasting model is an example of mutual learning, where one produces training data for the other.
40
+
41
+ In experiments, we apply AutoLRS to train three representative DNNs widely used in practice, i.e., ResNet-50 (He et al., 2016a) on ImageNet classification (Russakovsky et al., 2015); Transformer (Vaswani et al., 2017) and BERT (Devlin et al., 2019) for NLP tasks. Though they have been extensively studied and have hand-tuned LR schedules, the LR schedules computed by AutoLRS are faster than the original, hand-tuned, LR schedules by $1.22 \times$ , $1.43 \times$ , and $1.5 \times$ for training ResNet-50, Transformer, and BERT, respectively, in terms of the training steps used to update the DNN (i.e., excluding the costs of the LR/hyperparameter search). It meanwhile achieves test-set performance better or on par with state-of-the-art results. We also carefully hand-tuned two state-of-the-art learning rate schedules, CLR (Smith, 2017) and SGDR (Loshchilov & Hutter, 2017), and conducted more than ten experiments with different CLR/SGDR hyperparameters on each model. AutoLRS still has an average speedup of $1.29 \times$ and $1.34 \times$ across the three models, in terms of training steps, compared to the best CLR and SGDR LR schedules, respectively. The AutoLRS implementation is available at https://github.com/YuchenJin/autolrs.
42
+
43
+ # 2 RELATED WORK
44
+
45
+ Learning rate scheduling: In contrast to traditional LR schedules with a monotone decreasing sequence of LRs and multi-step LR schedule, a recent class of LR schedules propose to apply multiple cycles of LR decay. Cyclical Learning Rate (CLR) changes LR from a maximal LR $(\eta_{\mathrm{max}})$ to a minimal LR $(\eta_{\mathrm{min}})$ at a pre-defined frequency and achieves faster convergence for some DNNs (Smith, 2017). The approach requires a "LR range test" to estimate the minimal and maximal LR. The $LR$ range test trains the model with a linearly-increasing LR between a low LR
46
+
47
+ and a high LR, and finds the LR range $([\eta_{\mathrm{min}},\eta_{\mathrm{max}}])$ over which the training loss decreases. The authors proposed three variants of CLR: triangular2 that halves the maximum LR bound after each cycle; $exp\_range$ that exponentially reduces the maximum LR bound after each cycle; and 1cycle containing only one triangular cycle (Smith, 2018). Similar to CLR, Stochastic Gradient Descent with Warm Restarts (SGDR) restarts the LR and then applies cosine annealing/decay at a pre-defined frequency (Loshchilov & Hutter, 2017). Neither CLR or SGDR is automatic, because they are quite sensitive to their hyperparameters, which require careful hand-tuning. CLR and SGDR may even cause undesirable divergence in loss during training with suboptimal hyperparameters (see §5).
48
+
49
+ Learning rate adaptation with hypergradient descent: Aiming for the same goal of automatically tuning the LR, the hypergradient based technique (Almeida et al., 1998; Franceschi et al., 2017; Baydin et al., 2018; Donini et al., 2020) optimizes the LR schedule by applying gradient descent of the objective function w.r.t. the LR during training. In addition to the initial value of the regular LR, it introduces an additional hypergradient LR whose initial value is another hyperparameter to be specified. We experimentally show that this technique is subject to overfitting, it is quite sensitive to its two hyperparameters, and it is unable to match the state-of-the-art test-set performance on the models we test (§A.5.1). We also compare its performance against AutoLRS (§A.5.2).
50
+
51
+ DNN hyperparameter optimization: Automatic hyperparameter searching for DNNs has been broadly studied in recent years. When applied to learning rates, they can determine an optimized value for LR that is kept constant (or constrained to be a pre-defined shape) through the entire training process, as opposed to determining an LR schedule. They can be primarily categorized into Bayesian optimization based approaches (Hutter et al., 2011; Snoek et al., 2012; Bergstra et al., 2013), bandit-based solutions (Li et al., 2017; 2018b), hybrid approaches that combine bandit-based and Bayesian optimization based approaches (Falkner et al., 2018; Zela et al., 2018), and population-based methods (Jaderberg et al., 2017; Parker-Holder et al., 2020). It might be possible to extend these techniques to determine a LR schedule with an optimized LR for each training stage, but it is not sample-efficient and time-efficient to do so since the LR schedule would correspond to hundreds or thousands of hyperparameters.
52
+
53
+ Optimization methods with adaptive LR: These optimizers can adaptively adjust LR for each training step by maintaining an estimate of a better learning rate separately for each parameter in the DNN. Adagrad (Duchi et al., 2011) applies lower LRs to parameters with larger accumulated gradients and higher learning rates to the ones with smaller accumulated gradients. RMSprop (Tieleman & Hinton, 2012), AdaDelta (Zeiler, 2012), and Adam (Kingma & Ba, 2015) were later proposed to address the issue in Adagrad that the model stops learning due to the continual decay of LR. These optimizers with adaptive LR are orthogonal to our automatic LR scheduler, and they still require a global learning rate schedule, which can be obtained from our AutoLRS. In particular, their default hyperparameters do not always work well and need careful tuning, e.g., Adam's default LR 0.001 performs poorly in training BERT and Transformer, and a better-tuned LR schedule can significantly reduce the training time (\$5). Recent optimization methods (Schaul et al., 2013; Mahsereci & Hennig, 2015) proposed to remove the need for LR tuning in SGD altogether, but they are not widely used potentially due to their limited applicability and sub-optimal performance (Baydin et al., 2018).
54
+
55
+ # 3 PROBLEM FORMULATION
56
+
57
+ Training of DNNs can be written in a general form of minimizing a loss function $L(x; \theta)$ over training samples $x \in D_{train}$ , where $\theta$ represents the model weights being optimized. The minimization is conducted by applying an optimizer that updates $\theta$ iteratively. For example, at each step $t$ , mini-batch SGD updates $\theta$ using the gradient computed on a mini-batch of samples $B_{train} \subseteq D_{train}$ :
58
+
59
+ $$
60
+ \theta_ {t + 1} = \theta_ {t} - \frac {\eta_ {t}}{\left| B _ {\text {t r a i n}} \right|} \sum_ {x \in B _ {\text {t r a i n}}} \nabla_ {\theta} L (x; \theta_ {t}), \tag {1}
61
+ $$
62
+
63
+ where $\eta_t$ is the learning rate (LR) at step $t$ and $\nabla_{\theta}L(x;\theta_t)$ denotes the gradient of the loss $L(x;\theta)$ w.r.t. $\theta_t$ at step $t$ . Given $B_{train}$ and $\theta_t, \theta_{t+1}$ can be represented as a function of LR $\eta_t$ , i.e., $\theta_{t+1}(\eta_t)$ .
64
+
65
+ Our ultimate goal is to search for an optimal schedule of LR, i.e., a sequence of LRs $\eta_{1:T} \triangleq (\eta_1, \eta_2, \dots, \eta_T)$ applied to the total $T$ training steps, such that the generalization error can be minimized. Ideally, we need to optimize the entire sequence of LRs. This, however, is intractable in practice given the large number of possible LR schedules and since evaluating each one of those possible LR schedules requires a full training of $T$ steps. Hence, we break down the LR schedule optimization into a dynamic optimization of a constant LR for every $\tau$ steps, which we refer to
66
+
67
+ as a "training stage". Since most tasks prefer a relatively small LR due to the non-smoothness of DNNs' loss landscapes, when $\tau$ is also small, the LR-resulted change on the validation loss might be too small and overwhelmed by the randomness of mini-batch SGD. Hence, in this case, we need to increase $\tau$ , so the effect of LR $\eta$ on the validation loss can be accumulated for more steps to overcome noise. A large $\tau$ also reduces the frequency of applying LR search and saves computation. On the other hand, setting $\tau$ to be too large might lose some optimality of the induced LR schedule. Therefore, we need to trade-off the above two issues to find an appropriate $\tau$ . In our final algorithm, we propose a curriculum for $\tau$ , i.e., we start from a small $\tau$ , in line with the greater volatility during early stages, and gradually increase $\tau$ as training proceeds (as described in §4.4). Since we mainly focus on LR search within a stage, for simplicity, we will use $\tau$ instead of $\tau_{t}$ for the exposition below.
68
+
69
+ We study a greedy approach and split the whole training process into multiple stages of $\tau$ steps each. We choose an LR at the beginning of each stage and apply $\tau$ steps of optimization using this LR, i.e., at step- $t = 0, \tau, 2\tau, \dots, T - \tau$ , we aim to find the LR $\eta_{t:t + \tau}$ that minimizes the validation loss on $D_{val}$ (i.e., an estimate of the generalization error) after step- $(t + \tau)$ . This can be formulated as:
70
+
71
+ $$
72
+ \min _ {\eta} \sum_ {x \in D _ {v a l}} L (x; \theta_ {t + \tau} (\eta)), t = 0, \tau , 2 \tau , \dots , T - \tau . \tag {2}
73
+ $$
74
+
75
+ We try to sequentially solve $\lfloor T / \tau \rfloor$ sub-problems of the above form. However, we cannot apply standard optimization to solve each sub-problem in practice because: (i) it is a high-order optimization of $\eta$ since we need to unroll $\theta_{t + \tau}$ in Eq. (2) backward for $\tau$ steps using Eq. (1), which requires prohibitive memory and is unstable for DNNs; (ii) one step of optimizing $\eta$ needs to apply $\tau$ steps of optimization on $\theta$ , which is costly and weakens the advantage of searching LR for better efficiency. To avoid these issues, we treat the objective function in Eq. (2) for $t:t + \tau$ as a black-box function $f_{t}(\eta)$ and study how to optimize it based on the observed training dynamics through Bayesian optimization (BO).
76
+
77
+ # 4 AUTOMATIC LEARNING RATE SCHEDULE SEARCH
78
+
79
+ We first elaborate on the details of our BO algorithm (§4.1) that identifies the LR for each stage<sup>1</sup>. However, collecting even one data point $(\eta, f(\eta))$ for BO requires us to train the model for $\tau$ steps, which is costly and impractical since the LR computed by the entire BO process is used for only $\tau$ steps. To reduce the cost of generating instances of $(\eta, f(\eta))$ , in §4.2 and §A.3, we propose to train a light-weight time-series forecasting model to predict $f(\eta)$ based on the validation loss observed during the first $\tau'$ ( $\tau' \ll \tau$ ) steps of applying LR $\eta$ . We find that a simple exponential model suffices to produce accurate predictions. Our LR search then reduces to a multi-training process between BO and the forecasting model, where one produces training instances for the other. The resulting algorithm can automatically find an LR schedule without introducing significant extra computation.
80
+
81
+ # 4.1 BAYESIAN OPTIMIZATION
82
+
83
+ BO (Shahriari et al., 2016) is one of the state-of-the-art techniques for black-box optimization. It applies exploration and exploitation to the objective by sequentially and actively querying the function values of some input instances. Specifically, BO uses Gaussian process as a surrogate model (prior) to fit the black-box objective function $f(\eta)$ . It sequentially updates a posterior of $f(\eta)$ by using its likelihood on newly evaluated $(\eta_i', y_i = f(\eta_i') + \epsilon)$ pairs $^2$ , where $y_i$ is a noisy observation of $f(\eta_i')$ and is the validation loss after $\tau$ steps. Then, it finds the next $\eta_{i+1}'$ to evaluate based on an acquisition function $u_i(\eta)$ defined by the posterior mean $\mu_i(\eta)$ and standard deviation $\sigma_i(\eta)$ . $u_i(\eta)$ performs a trade-off between exploration (i.e., large $\sigma_i(\eta)$ ) and exploitation (i.e., small $\mu_i(\eta)$ ). In AutoLRS, we use Lower Confidence Bound (LCB) (Cox & John, 1992; Auer, 2002) as $u_i(\eta)$ . Given $\eta_{1:i}^{\prime}$ and their corresponding validation loss $y_{1:i}$ , we determine the next LR $\eta_{i+1}$ by minimizing LCB, i.e.,
84
+
85
+ $$
86
+ \eta_ {i + 1} ^ {\prime} = \arg \min _ {\eta} u _ {i} (\eta), u _ {i} (\eta) \triangleq \mu_ {i} (\eta) - \kappa \sigma_ {i} (\eta), \tag {3}
87
+ $$
88
+
89
+ where $\mu_i(\eta)$ and $\sigma_i(\eta)$ are defined in Eq. (7) in §A.1, $\kappa$ is a positive hyper-parameter to balance exploration and exploitation. In experiments, $\kappa = 1000$ works consistently well. BO repeats the above process until it achieves a precise posterior distribution of $f(\eta)$ . See §A.1 for more details.
90
+
91
+ Algorithm 1: AutoLRS
92
+ Input: (1) Number of steps in each training stage, $\tau$ (2) Learning-rate search interval $(\eta_{\mathrm{min}},\eta_{\mathrm{max}})$ (3) Number of LRs to evaluate by BO in each training stage, k (4) Number of training steps to evaluate each LR in BO, $\tau^{\prime}$ (5) Trade-off weight in the acquisition function of BO, $\kappa$
93
+ 1 while not converge do
94
+ 2 initialize a GP prior: $\mu_0(\eta) = 0,\sigma_0^2 (\eta) = K(\eta ,\eta)$ defined in Eq. (4) in $\S \mathrm{A}.1$ .
95
+ 3 $c\gets$ checkpoint of model parameters and optimizer states;
96
+ 4 for $i\gets 1$ to k do /\* mutual-training loop between BO and loss forecasting model \*/
97
+ 5 choose the next LR to explore: $\eta_i^\prime = \arg \min_\eta \mu_{i - 1}(\eta) - \kappa \sigma_{i - 1}(\eta);$
98
+ 6 $y_{1:\tau '}\gets$ train the DNN with LR $\eta_i^\prime$ for $\tau '$ steps and record the corresponding validation loss series;
99
+ 7 $y_{n}\gets$ train an exponential forecasting model on $y_{1:\tau '}$ and predict the validation loss after $\tau$ steps;
100
+ 8 update the GP posterior by $(\eta_i^\prime ,y_i)$ and update new $\mu_{i}(\eta)$ and $\sigma_{i}(\eta)$ using Eq. (7) in $\S \mathrm{A}.1$
101
+ 9 restore the checkpoint $c$ of model parameters and optimizer states;
102
+ 10 end
103
+ 11 $\eta^{*}\gets$ the LR with the minimal predicted validation loss $\mu_k(\eta)$ among the k explored LRs $\eta_{1:k}^{\prime}$ above;
104
+ 12 train the DNN using LR $\eta^{*}$ for $\tau$ steps; /\* training model using BO-searched best learning rate \*/
105
+ 13 end
106
+
107
+ # 4.2 TIME-SERIES FORECASTING MODEL OF LOSS
108
+
109
+ Typically, BO would require $\tau$ training steps to measure the validation loss associated with every LR $\eta$ that it considers during a stage. This is computationally expensive. We now introduce a simple yet effective approach that substantially reduces the number of training steps required to evaluate each LR candidate: for each LR $\eta$ that is evaluated, we only apply it for $\tau'\ll \tau$ steps and use the validation loss observed in the $\tau'$ steps to train a short-term time-series forecasting model. We then use the resulting forecasting model to predict the validation loss after $\tau$ steps.
110
+
111
+ In numerous experiments, we observed that when a DNN is trained with a reasonable LR, the validation loss typically decreases exponentially and converges to a small value. We show examples of practical loss time series and their exponential-model fitting results in Figure 3. Moreover, recent deep learning theory (Allen-Zhu et al., 2019b) also proves the linear convergence of training DNNs. In addition, a simple model to fit the observed loss time-series can filter the noise and avoid possible overfitting. Hence, we propose to train an exponential model in the form of $L(t) = a\exp (bt) + c$ with parameters $a, c$ and $b < 0$ and for $t = 1,\dots ,\tau$ , as the forecasting model for the time series of the validation loss in a training stage of $\tau$ steps with a given LR $\eta$ . §A.2 describes how we estimate $a$ , $b$ , and $c$ based on the validation loss observed in the first $\tau^{\prime}$ steps, and §A.3 describes how we filter out noise and outliers.
112
+
113
+ # 4.3 MUTUAL TRAINING BETWEEN BO AND EXPONENTIAL PREDICTION
114
+
115
+ We present the complete procedure of AutoLRS in Algorithm 1. It sequentially optimizes LR for every training stage during the training of a DNN model, solely based on the observed training dynamics, and it can be seen as a form of self-supervision. For each training stage, it searches for the LR that leads to the largest improvement in the validation loss via an efficient black-box function optimization conducted by a mutual training loop between Bayesian optimization and a short-term forecasting model for each loss series. It then applies the best LR among the explored ones for $\tau$ steps and repeats the above process until convergence.
116
+
117
+ In line 5, the algorithm solves a constrained optimization problem over $\eta$ , in the range of $[\eta_{min}, \eta_{max}]$ . In practice, we prefer a large learning-rate search interval $(\eta_{\mathrm{min}}, \eta_{\mathrm{max}})$ , across orders of magnitude, but also need fine-grained optimization over small LRs. Hence, we operate on $\eta$ in its log-scale space, i.e., we replace $\eta$ by $\log \eta$ in Algorithm 1, except in lines 6 and 12 when we use the original LR (rather than $\log \eta$ ) to train the DNN.
118
+
119
+ At the end of each iteration in the mutual training loop (line 9), we restore the checkpoint $c$ of model parameters and optimizer states to the one saved at the beginning of the training stage<sup>3</sup>. By doing so,
120
+
121
+ we guarantee that the $k$ different LRs all start from the same model and their losses can be compared. $\S A.4$ illustrates how BO learns the underlying function in practice for early and late stages of training.
122
+
123
+ Hyperparameters: AutoLRS substantially reduces the amount of hyperparameters that need to be hand-tuned in existing LR schedules or policies. However, as shown in Algorithm 1, we still have hyperparameters in AutoLRS. First, we need to set a search interval $(\eta_{\mathrm{min}}, \eta_{\mathrm{max}})$ for LR. However, this interval can be reasonably wide by using an $LR$ range test (Loshchilov & Hutter, 2017) as we will show in §5. Secondly, our default settings of $k$ , $\tau'$ , $\tau$ , and $\kappa$ work well for a diverse set of DNN models from different domains and tasks, though it is possible to achieve further improvements by fine-tuning them.
124
+
125
+ # 4.4 PRACTICAL IMPROVEMENTS
126
+
127
+ We found the following modifications can further improve the performance of AutoLRS in practice.
128
+
129
+ Gradually increase $\tau$ over the course of training: Often, in DNN training, the loss and the model parameters experience rapid changes only during the first few epochs before they enter a phase of stable improvement. Our approach can adapt to this phenomenon. For the early stages, when the loss is less predictable for the time-series forecasting model, we use a small $\tau$ (and $\tau'$ ). As training proceeds and the model becomes stable, we gradually increase $\tau$ (and $\tau'$ ) and adjust the LR more lazily. This curriculum of increasing $\tau$ places more exploration in earlier stages and more exploitation in later stages. In practice, we start with $\tau = 1000$ and $\tau' = 100$ , and double them after every stage until it reaches $\tau_{\mathrm{max}}$ . $\tau_{\mathrm{max}}$ is a hyperparameter that limits the maximum number of steps in a stage. We will discuss more of $\tau_{\mathrm{max}}$ in §5. This gradual increase of $\tau$ can provide stability to the LR schedule search. Similar strategies have been widely used in previous pre-defined LR schedules, e.g., the multi-stage schedule with increasing epochs within each stage, and some recent cyclical LR schedules (Loshchilov & Hutter, 2017).
130
+
131
+ Minimizing training loss in early stages: Computing the validation loss series for a candidate $\eta^{\prime}$ requires considerable computation if we were to use the entire validation dataset at each step of mutual training. Recall, however, that the primary purpose of minimizing the validation loss instead of the training loss is to avoid overfitting on the training set when the training loss notoriously deviates from the generalization error. However, a variety of empirical evidence and recent theory (Allen-Zhu et al., 2019a) show that overfitting is unlikely while training over-parameterized DNNs due to the inductive bias of random initialization and SGD, especially during the early phase of training. Hence, in practice, for the first several training stages, we can safely approximate the validation loss in our method by the corresponding training loss, which is a by-product of forward propagation and free to obtain. In later stages (i.e., once $\tau$ reaches $\tau_{\mathrm{max}}$ ), since the model is stable and the loss changes smoothly, we can evaluate the validation loss on a small subset of the validation set without compromising robustness. In our experiments, this set is composed of merely 10 mini-batches, and we evaluate the validation loss on them every 50 training steps (as opposed to every step). Therefore, the evaluation of validation loss in our approach does not introduce notable extra computations<sup>4</sup>.
132
+
133
+ # 5 EXPERIMENTS
134
+
135
+ We now evaluate AutoLRS by applying it to three widely-used and representative DNNs: ResNet-50, Transformer, and BERT. Here are some highlights:
136
+
137
+ - The LR schedules computed by AutoLRS are $1.22 \times$ , $1.43 \times$ , and $1.5 \times$ faster, in terms of training steps, than the original, hand-tuned LR schedules for ResNet-50, Transformer, and BERT, respectively. Meanwhile, it improves or matches the test-set performance.
138
+ - For each model, we carefully hand-tuned CLR and SGDR using more than ten experiments with different CLR/SGDR hyperparameters. Across the three models, the LR schedules computed by AutoLRS achieve an average speedup of $1.29 \times$ and $1.34 \times$ , in terms of training steps, over the best tuned LR schedules under CLR and SGDR, respectively. While CLR and SGDR had to be run
139
+
140
+ for at least 10 trials to find a good LR schedule, AutoLRS only costs slightly over $2 \times$ the training time associated with the computed LR schedule even after accounting for the BO search cost.
141
+
142
+ - AutoLRS is robust to the change of hyperparameters and consistently finds better LR schedules than other baselines. In contrast, CLR and SGDR are sensitive to the choices of hyperparameters.
143
+ - We perform ablation studies in §A.5.4 to demonstrate that both BO and the exponential forecasting model are essential for AutoLRS to find good LR schedules.
144
+ - Hypergradient descent is subject to overfitting, and it is unable to match the state-of-the-art test-set performance using all the guideline values of its two hyperparameters on VGG-16 (Simonyan & Zisserman, 2015) and ResNet-50 ( $\S$ A.5.1). In contrast, AutoLRS can consistently improve or match the state-of-the-art test-set performance with different $\tau_{\mathrm{max}}$ values using fewer training steps than the hand-tuned LR schedules ( $\S$ A.5.2).
145
+ - Using Hyperband (Li et al., 2017) for LR schedule search incurs a high computational overhead. Moreover, it cannot find an LR schedule that matches the state-of-the-art accuracy (§A.5.3).
146
+
147
+ Baseline Setup: ML practitioners typically need to hand-tune the LR schedules carefully for a long time to achieve satisfying performance, so the LR schedule adopted in each model's original paper is a presumably tough-to-beat baseline to compare with. For CLR and SGDR, we hand-tune their hyperparameters separately for each DNN. Hyperparameters in CLR include the high/low LR for the $LR$ range test to sweep, the number of steps to perform the test, the number of steps in each triangular cycle, and the choice of variants (triangular2, exp_range, 1cycle) introduced in §2. Hyperparameters in SGDR include the number of steps/epochs in each cycle and the initial LR at the beginning of each cycle. We carefully tuned these hyperparameters separately for each DNN and chose the LR schedule producing the best validation-set performance among $\geq 10$ trials of different hyperparameters.
148
+
149
+ Hyperparameters in AutoLRS: In our default setting, we set $k = 10$ and $\tau' = \tau / 10$ so that the training steps spent on BO equals the training steps spent on updating the DNN model. We start from $\tau = 1000$ and $\tau' = 100$ and double $\tau$ and $\tau'$ after each stage until $\tau$ reaches $\tau_{\max}$ . We use $\tau_{\max} = 8000$ for ResNet-50 and Transformer, $\tau_{\max} = 32000$ for BERT. We also tried $\tau_{\max} = 8000$ , 16000, and 32000 for each DNN and found that the resulting LR schedules are not very sensitive to $\tau_{\max}$ . (An analysis of the sensitivity to $\tau_{\max}$ is in §A.5.2.) The LR search interval $(\eta_{\min}, \eta_{\max})$ for ResNet-50, Transformer, and BERT are $(10^{-3}, 1)$ , $(10^{-4}, 10^{-2})$ , and $(10^{-6}, 10^{-3})$ , respectively. These are easily found by an LR range test (Loshchilov & Hutter, 2017).
150
+
151
+ ResNet-50: ResNet (He et al., 2016a;b) is one of the most popular DNNs in computer vision tasks. We train ResNet-50 on ImageNet (Russakovsky et al., 2015) using SGD with momentum on 32 NVIDIA Tesla V100 GPUs with data parallelism and a mini-batch size of 1024. The LR schedule in the original paper adopts a warmup phase of 5 epochs at the beginning and performs a 3-step decay as in (Goyal et al., 2017). Figure 1a presents different LR schedules for training ResNet-50 on ImageNet. We report how their top-1 accuracy on the validation set changes during training in Figure 1b. AutoLRS achieves a speedup of $1.19 \times$ and $1.22 \times$ over SGDR and the original LR schedule respectively but is slightly (i.e., $5.4\%$ ) slower than CLR. Note that the best CLR result is achieved after 10 trials of heavy hand-tuning to hyperparameters. (In fact, 7 out of 10 CLR trials failed to achieve the best possible test-set accuracy, and the second best and the third best trials are $5.4\%$ and $7.9\%$ slower than AutoLRS). AutoLRS achieves competitive speed even though it invests a significantly lower search cost that is comparable to the overall model update time associated with the identified LR schedule.
152
+
153
+ Transformer: Transformer (Vaswani et al., 2017) is a neural machine translation (NMT) model that is built upon a multi-head self-attention mechanism to capture the contextual dependencies and achieves promising translation performance. We train $\mathrm{Transformer}^6$ on a standard benchmark, i.e., WMT 2014 English-German dataset, using 8 NVIDIA Tesla V100 GPUs. Following (Vaswani et al., 2017), we use Adam (Kingma & Ba, 2015) with $\beta_{1} = 0.9$ , $\beta_{2} = 0.98$ , and $\epsilon = 10^{-9}$ . The LR schedule in the original paper starts from a linear warmup of 4,000 steps from 0 to $7e^{-4}$ , followed by 96,000 steps of decaying the LR proportionally to $1/\sqrt{t}$ for step- $t$ . In AutoLRS, we also use the same linear warmup. The current AutoLRS does not search LR for warmup steps since warmup
154
+
155
+ ![](images/e7fb691beb46b0600cb596f5c7fc52108a9d9901d1ce3e20a6d39c8fd7a3270d.jpg)
156
+ (a) LR on ResNet-50.
157
+
158
+ ![](images/89f0568afc998189359234d64203846d597ca01e4f5f4e0ea97373f1abcd9573.jpg)
159
+ (b) Val.Acc. on ResNet-50.
160
+
161
+ ![](images/2b1b30bb2efcb2f12cb658ee6d1dc35eb10fba58534787cbf41732595e541801.jpg)
162
+ (c) LR for Transformer.
163
+
164
+ ![](images/907a3c2578ae9fa5512f508fe58201b9afb3892b5aac5b1073deb25c1c2f5ebd.jpg)
165
+ (d) BLEU of Transformer.
166
+
167
+ ![](images/34744b8b8980f021398cf82c0efc7c0b358ea89259cbe9c7536ce77b6b9f6452.jpg)
168
+ Figure 1: Comparison of different LR schedules in training ResNet-50 on ImageNet (a, b), and the Transformer base model (c, d). When training ResNet-50, AutoLRS, CLR, SGDR, and the original LR achieve $75.9\%$ top-1 accuracy at epoch 74, 70, 88, and 90, respectively. When training Transformer base, AutoLRS, SGDR, and original achieve 27.3 BLEU score (uncased) at step 69,000, 91,000, 98,000, respectively. CLR (the best we were able to find) achieves 27.2 BLEU score at step 99,000.
169
+ (a) LR schedules (Phase $1 + 2$
170
+ Figure 2: Comparison of different LR schedules and training loss in pre-training $\mathrm{BERT}_{\mathrm{BASE}}$
171
+
172
+ ![](images/9c47e0c63807c82d3391fa903c24f625871fec37acbf24143ccccc4cf739a40d.jpg)
173
+ (b) Training loss in Phase 1.
174
+
175
+ ![](images/f9731c283823513a7f5d82a8d3a179bf403f9ebf6b98150b6d091ee872672b57.jpg)
176
+ (c) Training loss in Phase 2.
177
+
178
+ does not have an explicit optimization objective, such as minimizing the validation loss. Warmup usually takes very few steps, and its main purpose is to prevent deeper layers in a DNN from creating training instability (Gotmare et al., 2019). Figure 1c visualizes different LR schedules in training the Transformer model. Their BLEU scores on the test set during training are reported in Figure 1d. Overall, the LR schedule searched by AutoLRS yields a $1.32 - 1.43 \times$ speedup over the hand-tuned LR schedules. AutoLRS consistently achieves a similar amount of speedup over three trials – they achieve 27.3 BLEU score (uncased) at step 69,000, 69,000, and 70,000, respectively. Interestingly, if we continue the LR search of AutoLRS, we can get 27.4 BLEU score (uncased) at step 99,000.
179
+
180
+ BERT Pre-training: BERT (Devlin et al., 2019) is a recent model that achieved state-of-the-art results on 11 NLP tasks. It first pre-trains a language representation model on a large text corpus by unsupervised learning and then fine-tunes it for downstream NLP tasks. The $\mathrm{BERT}_{\mathrm{BASE}}$ model has 110M parameters, which makes the pre-training phase expensive, and hand-tuning the LR schedule might be impractical. We pre-train $\mathrm{BERT}_{\mathrm{BASE}}$ with mixed precision (Micikevicius et al., 2018) on the English Wikipedia and the BooksCorpus dataset<sup>7</sup> (Zhu et al., 2015a). Following the original paper, we use Adam with L2 weight decay of 0.01 and $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ . The pre-training is divided into two phases: Phase 1 includes $90\%$ of the total training steps and uses a sequence length of 128, while Phase 2 uses a sequence length of 512 for the rest $10\%$ of training steps. We apply this two-phase training in the experiments of all LR schedules. We pre-train $\mathrm{BERT}_{\mathrm{BASE}}$ on 32 NVIDIA Tesla V100 GPUs using a mini-batch size of 1024 sequences, which is $4\times$ the batch size in the original paper. To adapt the original LR schedule to our batch size, we tried both the linear scaling rule (Goyal et al., 2017) and the square root scaling rule (Krizhevsky, 2014), and found that the square root scaling rule works better while the linear scaling rule made the loss diverge.
181
+
182
+ As shown in Figure 2, Phase 1 contains 150,000/225,000 steps and Phase 2 contains 16,000/25,000 steps respectively for AutoLRS and all baselines, since AutoLRS requires much less total steps. In both AutoLRS and SGDR, we apply a linear warmup in the first 2,500 steps to make the deeper layers of BERT stable. In Figures 2b and 2c, we report the training loss achieved by different schemes.
183
+
184
+ We fine-tune the pre-trained models on four downstream NLP tasks: Microsoft Research Paraphrase Corpus (MRPC) for identifying semantic textual similarity (Dolan & Brockett, 2005); Multi-Genre Natural Language Inference (MNLI) for entailment classification (Williams et al., 2018); Corpus of Linguistic Acceptability (CoLA) for predicting whether an English sentence is linguistically acceptable (Warstadt et al., 2019); and Stanford Question Answering Dataset (SQuAD) v1.1 (Rajpurkar et al., 2016). Table 1 reports the after-fine-tuning performance on the four tasks. Since fine-tuning performance is unstable on small datasets like MRPC, we fine-tuned on each task several times and report the best Dev-set performance. It shows that the model pre-trained by AutoLRS outperforms
185
+
186
+ Table 1: Fine-tuning $\mathrm{{BERT}}{}_{\mathrm{{BASE}}}$ that is pre-trained using different LR schedules on 4 downstream tasks. We report the accuracy on the Dev set of MRPC, MNLI, and CoLA, and F1 scores on the Dev set of SQuAD v1.1.
187
+
188
+ <table><tr><td>LR schedule (Phase 1/Phase 2)</td><td>MRPC</td><td>MNLI</td><td>CoLA</td><td>SQuAD v1.1</td></tr><tr><td>Original (225,000/25,000)</td><td>86.5</td><td>82.2</td><td>47.8</td><td>87.0</td></tr><tr><td>CLR (225,000/25,000)</td><td>86.0</td><td>80.7</td><td>44.4</td><td>86.5</td></tr><tr><td>SGDR (225,000/25,000)</td><td>84.8</td><td>81.6</td><td>38.7</td><td>86.2</td></tr><tr><td>AutoLRS (150,000/16,000)</td><td>88.0</td><td>82.5</td><td>47.6</td><td>87.1</td></tr></table>
189
+
190
+ Table 2: Performance comparison with LR schedules searched by prior solutions on CIFAR-10 training with VGG-16 (batch size = 128). Note that the hand-tuned LR schedule can achieve 93.70% top-1 test accuracy in 350 epochs. The Runtime column shows how long each method takes on one NVIDIA Titan RTX GPU to find the LR schedule shown in the previous column. The runtime of HD and MARTHE include trying the guideline values of their hyperparameters to get a decent LR schedule.
191
+
192
+ <table><tr><td>Method</td><td>Best top-1 accuracy achieved in 350 epochs</td><td>Runtime (seconds)</td></tr><tr><td>HD</td><td>91.31%</td><td>187,110</td></tr><tr><td>MARTHE</td><td>92.99%</td><td>67,578</td></tr><tr><td>Hyperband</td><td>93.24%</td><td>109,454</td></tr><tr><td>AutoLRS</td><td>94.13%</td><td>6,538</td></tr></table>
193
+
194
+ those using other LR schedules in most downstream tasks and meanwhile achieves a speedup of $1.5 \times$ . Note AutoLRS consistently achieves this speedup over 3 trials (details in §A.5.5). We also tried pre-training using other LR schedules for fewer steps but the fine-tuning performances were worse. Notably, when we use CLR and SGDR for pre-training $\mathrm{BERT}_{\mathrm{BASE}}$ , the training loss diverged after 100,000 steps in several trials, even as we decreased the maximal LR and increased the number of steps per cycle. This illustrates how difficult and computationally intensive it is to hand-tune the hyperparameters of existing LR schedules on complicated models and tasks. In contrast, AutoLRS significantly simplifies the process and saves human effort.
195
+
196
+ Experimental comparison to prior methods: Hypergradient descent (HD) (Baydin et al., 2018) is a hypergradient based method to adjust the learning rate in an online fashion by deriving the derivative of the training loss with respect to the learning rate, and performing gradient descent on the learning rate during training. MARTHE (Donini et al., 2020) is a generalization of two hypergradient based methods, HD and RTHO (Franceschi et al., 2017). One distinction between MARTHE and HD is that MARTHE computes the gradient of the validation loss instead of training loss with respect to the learning rate. Hyperband is a multi-armed bandit approach for DNN hyperparameter optimization. We use HD, MARTHE, and Hyperband to tune the LR schedules for CIFAR-10 training with VGG-16, and compare their performance with AutoLRS in Table 2. AutoLRS achieves higher best top-1 test accuracy than the other methods as well as the hand-tuned LR schedule, with much less overhead. Detailed descriptions of these methods and the experimental results are in §A.5.1 and §A.5.3.
197
+
198
+ # 6 CONCLUSION
199
+
200
+ We propose an automatic learning-rate schedule method, AutoLRS, as a more efficient and versatile alternative to hand-tuning that can be broadly applied to train different DNNs for tasks in diverse application domains. We break down the sequence optimization to learning rate search for minimizing validation loss in each training stage and then solve this sub-problem by Bayesian optimization (BO). To reduce the cost of BO exploration, we train a light-weight loss-forecasting model from the early-stage training dynamics of BO exploration. AutoLRS achieves a speedup of $1.22 \times 1.43 \times 1.5 \times$ on training ResNet-50, Transformer, and BERT compared to their highly hand-tuned schedules.
201
+
202
+ # ACKNOWLEDGMENTS
203
+
204
+ We would like to thank the anonymous ICLR reviewers for their valuable feedback. We would also like to thank Damien Fay for his suggestions on time series analysis. This work was partially supported by DARPA. For computer time, this research used the resources at ByteDance and the Supercomputing Laboratory at KAUST.
205
+
206
+ # REFERENCES
207
+
208
+ Zeyuan Allen-Zhu, Yuanzhi Li, and Yingyu Liang. Learning and generalization in overparameterized neural networks, going beyond two layers. In Advances in neural information processing systems, pp. 6158-6169, 2019a.
209
+ Zeyuan Allen-Zhu, Yanzhi Li, and Zhao Song. A convergence theory for deep learning via overparameterization. In Proceedings of the 36th International Conference on Machine Learning, volume 97, pp. 242-252, 2019b.
210
+ Luís B Almeida, Thibault Langlois, José D Amaral, and Alexander Plakhov. Parameter adaptation in stochastic optimization. On-Line Learning in Neural Networks, Publications of the Newton Institute, pp. 111-134, 1998.
211
+ Peter Auer. Using confidence bounds for exploitation-exploration trade-offs. Journal of Machine Learning Research, 3(Nov):397-422, 2002.
212
+ Attilim Güneş Baydin, Robert Cornish, David Martínez Rubio, Mark Schmidt, and Frank Wood. Online learning rate adaptation with hypergradient descent. In International Conference on Learning Representations, 2018.
213
+ James Bergstra, Dan Yamins, and David D Cox. Hyperopt: A python library for optimizing the hyperparameters of machine learning algorithms. In Proceedings of the 12th Python in science conference, pp. 13-20. CiteSeer, 2013.
214
+ Dennis D Cox and Susan John. A statistical method for global optimization. In Proceedings of the 1992 IEEE International Conference on Systems, Man, and Cybernetics, pp. 1241-1246. IEEE, 1992.
215
+ Zhongxiang Dai, Haibin Yu, Bryan Kian Hsiang Low, and Patrick Jaillet. Bayesian optimization meets Bayesian optimal stopping. In International Conference on Machine Learning, pp. 1496-1506, 2019.
216
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics.
217
+ William B Dolan and Chris Brockett. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005), 2005.
218
+ Tobias Domhan, Jost Tobias Springenberg, and Frank Hutter. Speeding up automatic hyperparameter optimization of deep neural networks by extrapolation of learning curves. In *IJCAI*, pp. 3460-3468, 2015.
219
+ Michele Donini, Luca Franceschi, Orchid Majumder, Massimiliano Pontil, and Paolo Frasconi. MARTHE: Scheduling the learning rate via online hypergradients. In *IJCAI-20*, pp. 2119–2125, 2020.
220
+ John Duchi, Elad Hazan, and Yoram Singer. Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research, 12(61):2121-2159, 2011.
221
+ Stefan Falkner, Aaron Klein, and Frank Hutter. BOHB: Robust and efficient hyperparameter optimization at scale. arXiv preprint arXiv:1807.01774, 2018.
222
+ Luca Franceschi, Michele Donini, Paolo Frasconi, and Massimiliano Pontil. Forward and reverse gradient-based hyperparameter optimization. In Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 1165-1173. PMLR, 2017.
223
+ Marc G Genton. Classes of kernels for machine learning: a statistics perspective. Journal of machine learning research, 2(Dec):299-312, 2001.
224
+ Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Deep Learning. MIT press, 2016.
225
+
226
+ Deepak Akhilesh Gotmare, Shirish Nitish Keskar, Caiming Xiong, and Richard Socher. A closer look at deep learning heuristics: Learning rate restarts, warmup and distillation. international conference on learning representations, 2019.
227
+ Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training ImageNet in 1 hour. arXiv preprint arXiv:1706.02677, 2017.
228
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2016a.
229
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Identity mappings in deep residual networks. In European conference on computer vision, pp. 630-645. Springer, 2016b.
230
+ Frank Hutter, Holger H Hoos, and Kevin Leyton-Brown. Sequential model-based optimization for general algorithm configuration. In International conference on learning and intelligent optimization, pp. 507-523. Springer, 2011.
231
+ Max Jaderberg, Valentin Dalibard, Simon Osindero, Wojciech M Czarnecki, Jeff Donahue, Ali Razavi, Oriol Vinyals, Tim Green, Iain Dunning, Karen Simonyan, et al. Population based training of neural networks. arXiv preprint arXiv:1711.09846, 2017.
232
+ Kevin Jamieson and Ameet Talwalkar. Non-stochastic best arm identification and hyperparameter optimization. In Artificial Intelligence and Statistics, pp. 240-248, 2016.
233
+ Stanisław Jastrzejbski, Zachary Kenton, Devansh Arpit, Nicolas Ballas, Asja Fischer, Yoshua Bengio, and Amos Storkey. Three factors influencing minima in SGD. arXiv preprint arXiv:1711.04623, 2017.
234
+ Chi Jin, Rong Ge, Praneeth Netrapalli, Sham M Kakade, and Michael I Jordan. How to escape saddle points efficiently. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 1724-1732. JMLR.org, 2017.
235
+ Kenji Kawaguchi. Deep learning without poor local minima. In Advances in neural information processing systems, pp. 586-594, 2016.
236
+ Diederick P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations, 2015.
237
+ A. Klein, Stefan Falkner, Jost Tobias Springenberg, and F. Hutter. Learning curve prediction with Bayesian neural networks. In International Conference on Learning Representations, 2017.
238
+ A. Krizhevsky and G. Hinton. Learning multiple layers of features from tiny images. Master's thesis, Department of Computer Science, University of Toronto, 2009.
239
+ Alex Krizhevsky. One weird trick for parallelizing convolutional neural networks. arXiv preprint arXiv:1404.5997, 2014.
240
+ Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. CIFAR-100 (Canadian Institute for Advanced Research). URL http://www.cs.toronto.edu/~kriz/cifar.html.
241
+ Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. In Advances in Neural Information Processing Systems, pp. 6389-6399, 2018a.
242
+ Liam Li, KG Jamieson, Afshin Rostamizadeh, Ekaterina Gonina, MH Jonathan Ben-Tzur, B Recht, and A Talwalkar. A system for massively parallel hyperparameter tuning. In Conference on Machine Learning and Systems, 2020a, volume 1, 2018b.
243
+ Lisha Li, Kevin Jamieson, Giulia DeSalvo, Afshin Rostamizadeh, and Ameet Talwalkar. Hyperband: A novel bandit-based approach to hyperparameter optimization. The Journal of Machine Learning Research, 18(1):6765-6816, 2017.
244
+ Ilya Loshchilov and Frank Hutter. SGDR: stochastic gradient descent with warm restarts. In International Conference on Learning Representations, 2017.
245
+
246
+ Maren Mahsereci and Philipp Hennig. Probabilistic line searches for stochastic optimization. In Advances in Neural Information Processing Systems, volume 28, 2015.
247
+ Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory Diamos, Erich Elsen, David Garcia, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, and Hao Wu. Mixed precision training. In International Conference on Learning Representations, 2018.
248
+ Kamil Nar and Shankar Sastry. Step size matters in deep learning. In Advances in Neural Information Processing Systems, pp. 3436-3444, 2018.
249
+ Jack Parker-Holder, Vu Nguyen, and Stephen Roberts. Provably efficient online hyperparameter optimization with population-based bandits. Advances in Neural Information Processing Systems, 2020.
250
+ Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 2383-2392, Austin, Texas, November 2016. Association for Computational Linguistics.
251
+ CE. Rasmussen and CKI. Williams. Gaussian Processes for Machine Learning. Adaptive Computation and Machine Learning. MIT Press, Cambridge, MA, USA, January 2006.
252
+ Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 115 (3):211-252, 2015.
253
+ Tom Schaul, Sixin Zhang, and Yann LeCun. No more pesky learning rates. In International Conference on Machine Learning, pp. 343-351, 2013.
254
+ Bobak Shahriari, Kevin Swersky, Ziyu Wang, Ryan P. Adams, and Nando De Freitas. Taking the human out of the loop: A review of Bayesian optimization. Proceedings of the IEEE, 104(1): 148-175, January 2016. ISSN 0018-9219.
255
+ Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, 2015.
256
+ Leslie N Smith. Cyclical learning rates for training neural networks. In 2017 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 464-472. IEEE, 2017.
257
+ Leslie N Smith. A disciplined approach to neural network hyper-parameters: Part 1-learning rate, batch size, momentum, and weight decay. arXiv preprint arXiv:1803.09820, 2018.
258
+ Jasper Snoek, Hugo Larochelle, and Ryan P Adams. Practical bayesian optimization of machine learning algorithms. In Advances in neural information processing systems, pp. 2951-2959, 2012.
259
+ Kevin Swersky, Jasper Snoek, and Ryan Prescott Adams. Freeze-thaw bayesian optimization. arXiv preprint arXiv:1406.3896, 2014.
260
+ T. Tieleman and G. Hinton. Lecture 6.5—RmsProp: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural Networks for Machine Learning, 2012.
261
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017.
262
+ Alex Warstadt, Amanpreet Singh, and Samuel R Bowman. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641, 2019.
263
+ Yeming Wen, Paul Vicol, Jimmy Ba, Dustin Tran, and Roger Grosse. Flipout: Efficient pseudo-independent weight perturbations on mini-batches. In International Conference on Learning Representations, 2018.
264
+
265
+ Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 1112–1122, New Orleans, Louisiana, June 2018. Association for Computational Linguistics.
266
+ Yuhuai Wu, Mengye Ren, Renjie Liao, and Roger Grosse. Understanding short-horizon bias in stochastic meta-optimization. In International Conference on Learning Representations, 2018.
267
+ Matthew D Zeiler. Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701, 2012.
268
+ Arber Zela, Aaron Klein, Stefan Falkner, and Frank Hutter. Towards automated deep learning: Efficient joint neural architecture and hyperparameter search. In ICML 2018 AutoML Workshop, July 2018.
269
+ Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the 2015 IEEE International Conference on Computer Vision (ICCV), ICCV '15, pp. 19-27, USA, 2015a. IEEE Computer Society. ISBN 9781467383912.
270
+ Yukun Zhu, Ryan Kiros, Richard Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. BookCorpus website. https://yknzhu.wixsite.com/web, 2015b.
271
+
272
+ # A APPENDIX
273
+
274
+ # A.1 BAYESIAN OPTIMIZATION (MORE DETAILS)
275
+
276
+ BO (Shahriari et al., 2016) is one of the state-of-the-art techniques for black-box optimization. It applies exploration and exploitation to the black-box objective by sequentially and actively querying the function values of some input instances. Specifically, BO uses Gaussian process as a surrogate model to fit the black-box objective function $f(\eta)$ . It updates a posterior distribution of $f(\eta)$ by using its likelihood on newly evaluated $(\eta, y = f(\eta) + \epsilon)$ pairs, where $y$ is a noisy observation of $f(\eta)$ and is the validation loss after $\tau$ steps in our case. Then, it determines the next LR $\eta$ to evaluate as the one maximizing an acquisition function, which is computed from the updated posterior. The acquisition function performs a trade-off between exploration and exploitation in evaluating the candidates of LR. BO repeats the above process until achieving a precise posterior predictive distribution of $f(\eta)$ .
277
+
278
+ Surrogate model (prior): We apply a commonly used surrogate model — Gaussian process (GP) (Rasmussen & Williams, 2006) as the prior of the black-box objective function in Eq. (2). A GP prior is specified by its mean function $\mu(\cdot)$ and its covariance function (i.e., kernel function) $K(\cdot, \cdot)$ . We adopt a common choice $\mu(\cdot) = 0$ and set $K(\cdot, \cdot)$ to be the Matern kernel (Genton, 2001) with smoothness factor $\nu = 2.5$ and length scale $l = 1$ , which is defined as
279
+
280
+ $$
281
+ K \left(\eta_ {i}, \eta_ {j}\right) = \frac {1}{\Gamma (\nu) 2 ^ {\nu - 1}} \left(\frac {\sqrt {2 \nu} \| \eta_ {i} - \eta_ {j} \| _ {2}}{l}\right) ^ {\nu} K _ {\nu} \left(\frac {\sqrt {2 \nu} \| \eta_ {i} - \eta_ {j} \| _ {2}}{l}\right), \tag {4}
282
+ $$
283
+
284
+ where $K_{\nu}(\cdot)$ is a modified Bessel function and $\Gamma (\cdot)$ is the gamma function, and $K(\eta_i,\eta_j)$ performs a convolution of the unit ball. Comparing to the radial basis function (RBF) kernel which always generates infinitely differentiable functions that might be overly smooth, GP with Matern kernel can control the smoothness of generated functions to be $\lceil \nu \rceil -1$ times differentiable (Rasmussen & Williams, 2006). This helps to capture the less-smooth local changes. In our case, $\nu = 2.5$ leads to twice-differentiable functions.
285
+
286
+ Posterior prediction: In the following, we will use simplified notations $\eta_{1:k}^{\prime}$ and $f(\eta_{1:k}^{\prime})$ for vectors composed of $\{\eta_i'\}_{i=1}^k$ and $\{f(\eta_i')\}_{i=1}^k$ , respectively. The GP prior indicates a Gaussian distribution over function values, i.e., $f(\eta_{1:k}') \sim \mathcal{N}(\mathbf{0}, \mathbf{K})$ where $\mathbf{K}_{i,j} = K(\eta_i', \eta_j')$ , $\forall i, j \in [k]$ . After $\tau$ training steps using LR $\eta_i'$ , we evaluate the validation loss denoted by $y_i$ as a noisy observation of $f(\eta_i')$ . i.e., $y_i = f(\eta_i') + \epsilon$ where Gaussian white noise $\epsilon \sim \mathcal{N}(0, \sigma^2)$ . Given the noisy observations $y_{1:k}$ , we can update the GP posterior of the black-box function $f(\cdot)$ as
287
+
288
+ $$
289
+ f \left(\eta_ {1: k} ^ {\prime}\right) \mid \eta_ {1: k} ^ {\prime}, y _ {1: k} \sim \mathcal {N} \left(y _ {1: k}, \mathbf {K} + \sigma^ {2} \mathbf {I}\right). \tag {5}
290
+ $$
291
+
292
+ Given a new LR $\eta$ , we can now use the above GP posterior to predict the distribution of $f(\eta)$ by the following reasoning based on Bayes' theorem, i.e.,
293
+
294
+ $$
295
+ P (f (\eta) \mid \eta_ {1: k} ^ {\prime}, y _ {1: k}) = \int P (f (\eta) \mid f \left(\eta_ {1: k} ^ {\prime}\right)) P (f \left(\eta_ {1: k} ^ {\prime}\right) \mid \eta_ {1: k} ^ {\prime}, y _ {1: k}) d f \left(\eta_ {1: k} ^ {\prime}\right), \tag {6}
296
+ $$
297
+
298
+ which yields the posterior predictive distribution of $f(\eta)$ as
299
+
300
+ $$
301
+ f (\eta) | \eta_ {1: k} ^ {\prime}, y _ {1: k} \sim \mathcal {N} \left(\mu_ {n} (\eta), \sigma_ {n} ^ {2} (\eta)\right),
302
+ $$
303
+
304
+ $$
305
+ \mu_ {n} (\eta) \triangleq \mathbf {k} (\mathbf {K} + \sigma^ {2} \mathbf {I}) ^ {- 1} y _ {1: k}, \tag {7}
306
+ $$
307
+
308
+ $$
309
+ \sigma_ {n} ^ {2} (\eta) \triangleq K (\eta , \eta) - \mathbf {k} ^ {T} (\mathbf {K} + \sigma^ {2} \mathbf {I}) ^ {- 1} \mathbf {k}.
310
+ $$
311
+
312
+ where $\mathbf{k}_i = K(\eta, \eta_i')$ . The above result about single LR $\eta$ can be trivially extended to multiple LRs.
313
+
314
+ Acquisition function: Given the posterior predictive distribution of $f(\eta)$ in Eq (7), BO finds the next $\eta_{i+1}'$ to evaluate based on an acquisition function $u_i(\eta)$ defined by the posterior mean $\mu_i(\eta)$ and standard deviation $\sigma_i(\eta)$ . A promising acquisition function should balance the trade-off between exploration (i.e., large $\sigma_i(\eta)$ ) and exploitation (i.e., small $\mu_i(\eta)$ ). In AutoLRS, we use Lower Confidence Bound (LCB) (Cox & John, 1992; Auer, 2002) as our acquisition function. In particular, given $\eta_{1:k}'$ and their corresponding validation loss $y_{1:k}$ , we determine the next LR $\eta_{i+1}'$ by minimizing LCB, i.e.,
315
+
316
+ $$
317
+ \eta_ {i + 1} ^ {\prime} = \underset {\eta} {\arg \min } u _ {i} (\eta), u _ {i} (\eta) \triangleq \mu_ {i} (\eta) - \kappa \sigma_ {i} (\eta), \tag {8}
318
+ $$
319
+
320
+ where $\mu_{i}(\eta)$ and $\sigma_{i}(\eta)$ were defined in Eq. (7), $\kappa$ is a positive hyper-parameter to balance the trade-off between exploration and exploitation. In experiments, we set $\kappa = 1000$ and it works consistently well.
321
+
322
+ ![](images/98dc143c9206801a2b6603680cad3a8f3ae383713bbf77bc62d4f60776881aed.jpg)
323
+ (a) Training loss during 100 training steps and fitting it by an exponential time-series forecasting model.
324
+
325
+ ![](images/1e55b415c1b03c7c782a9311d136390ca40c7d858b3051c4ef798b8e0a2408e1.jpg)
326
+ (b) Validation loss during 800 training steps and fitting it by an exponential time-series forecasting model.
327
+
328
+ ![](images/d5bffea81711484a8b1fe06beefb92fe4a2324a21c7decc3dd4c34770ddf03b8.jpg)
329
+ (c) A corner case when exponential model cannot fully capture the non-monotone change of the loss during the first 50 steps.
330
+ Figure 3: Fitting the time-series of loss by exponential model when training ResNet-50 on ImageNet.
331
+
332
+ # A.2 EXPONENTIAL MODEL (MORE DETAILS)
333
+
334
+ We take two steps to estimate $a, b$ , and $c$ in fitting the exponential model $L(t) = a\exp (bt) + c$ , based on the validation loss observed in the first $\tau^{\prime}$ steps, which is represented by $y_{t}, t = 1,\dots ,\tau^{\prime}$ . First, we reduce the fitting problem to an optimization problem. Define function $g(b)$ as the least squared error between predictions and observations w.r.t. $a$ and $c$ . We can write the original fitting problem in the following two-stage form.
335
+
336
+ $$
337
+ \min _ {b < 0} g (b), \quad g (b) \triangleq \min _ {a, c} \sum_ {t = 1} ^ {\tau^ {\prime}} \left(a \exp (b t) + c - y _ {t}\right) ^ {2} \tag {9}
338
+ $$
339
+
340
+ It is a 1-dimensional optimization problem. Moreover, with $b$ fixed, the minimization problem w.r.t. $a, c$ is a linear regression problem that has a closed-form solution. Hence, we apply a simple gradient descent method that starts from an initial $b$ , computes the linear least squares w.r.t. $a, c$ under $b$ , search for the next $b$ by the gradient descent method, and repeats these two steps. Thereby, in practice we can achieve a fast decrease on the regression error. In addition, to enforce the negative constraint for $b$ , we re-parameterize it to be $b \gets -\exp(b')$ . The problem now reduces to
341
+
342
+ $$
343
+ \min _ {b ^ {\prime}} \min _ {a, c} \sum_ {t = 1} ^ {\tau^ {\prime}} \left(a \exp \left(- \exp \left(b ^ {\prime}\right) t\right) + c - y _ {t}\right) ^ {2} \tag {10}
344
+ $$
345
+
346
+ Although there might exist other possible strategies to optimize Eq. (9), we find the above method is stable and fast in reducing the regression error and thus keeps the fitting process highly efficient.
347
+
348
+ We empirically test whether the exponential model obtained by our method can ideally fit the loss time-series in different cases. Figure 3a and Figure 3b are two typical examples fitting the time series of training loss and validation loss by the proposed model. They show that the model can precisely predict the main trends of the time-varying loss, though ruling out some less informative noises.
349
+
350
+ In Figure 3c, we also show a rare corner case when the model fails to fit the increasing loss in early steps. However, the loss-increasing stage usually does not last long and thus the inaccuracy is not so harmful to the prediction of later-stage loss, which is our major goal since $\tau$ is usually larger than the length of loss-increasing stage. To overcome such corner cases and outliers in the observed validation loss series, we present a pre-processing strategy to make stable exponential fitting in §A.3. Every time we predict the validation loss after $\tau$ steps, we first pre-process the loss observed in the $\tau'$ steps, and then fit the pre-processed loss series with the exponential model.
351
+
352
+ In our empirical study, we also tried other more sophisticated time-series forecasting models including Holt-Winters, autoregressive integrated moving average (ARIMA) and singular spectrum analysis (SSA). We show two examples to compare their performance with our simple exponential prediction model in Figure 4. Some prior works also fit and predict learning curves (Swersky et al., 2014; Domhan et al., 2015; Klein et al., 2017; Dai et al., 2019) for early termination of evaluations of poorly-performing hyperparameters when doing DNN hyperparameter optimization, but they need non-negligible time for training their models and performing inference. They are much more computationally intensive than our lightweight exponential prediction model, and this makes them less practical to be used in automatic LR schedule tuning.
353
+
354
+ ![](images/5d2e946573a538d4ee5c22296eb3e39e792ec51a9aa3d3048f9d61b8300333dd.jpg)
355
+ (a) Predict the training loss after 2000 steps.
356
+
357
+ ![](images/ebb2bbdf1c90a1d0684b54343a7becb556e525f25750cd230b119dd9feff593c.jpg)
358
+ (b) Predict the training loss after 4000 steps.
359
+
360
+ ![](images/8a1b46e833f6f10f61bccd2abbd77ef7c5e610ca6151e31efb1530fde1f59f28.jpg)
361
+ Figure 4: Examples of forecasting the loss series by various time-series forecasting models when training ResNet-50 on ImageNet. Our simple exponential prediction model yields the least mean squared error (MSE) among all the models.
362
+
363
+ ![](images/9314267c048d10516bd05678aa27f6265223718ec54b782d01a242e580047ce1.jpg)
364
+ Figure 5: The loss sequence in Figure 3c and its quadratic spline smoothing result after 1, 5, and 10 iterations of our spline smoothing.
365
+
366
+ ![](images/570df1adb88b8ec70e2ba15f309496355ce498233f8d4aa4338fb41380005a1f.jpg)
367
+
368
+ # A.3 PRE-PROCESS LOSS SERIES BY ITERATIVE SPLINE SMOOTHING
369
+
370
+ We show a corner case in Figure 3c where the loss decreases rapidly at first, then increases for a while, but finally decreases stably. It might be a result of a large LR or happens when escaping from a possibly poor local minimum or a saddle point (Goodfellow et al., 2016). Our exponential model cannot fully capture the loss change in early steps of this case. But we also consistently observe in our experiments that the early instability of loss only lasts for at most hundreds of steps after we switch to a new LR.
371
+
372
+ Nevertheless, we find that adding a pre-processing step to eliminate the noises, anomalies, and corner cases in the observed validation loss series makes the exponential fitting easier and more stable. Hence, we propose to apply an iterative spline smoothing to the validation loss observed in $\tau'$ steps before training the forecasting model. In particular, when evaluating a LR $\eta$ for a training stage, we firstly run $\tau'$ training steps and fit the observed sequence of validation loss by a quadratic spline. We do such spline smoothing for multiple iterations. At the end of each iteration, we remove the loss values that are among the farthest $3\%$ from the spline smoothing results if they are collected in the first $\tau'/2$ steps (when the corner cases like the one in Figure 3c might happen). So the next iteration's spline smoothing only aims to fit the rest loss values. After certain number of iterations, we use the final spline smoothing values to train the exponential forecasting model.
373
+
374
+ Empirically, we find that 10 iterations of the above spline smoothing are necessary before training the exponential forecasting model. Figure 5 shows the loss sequence from Figure 3c before smoothing and after 1, 5 and 10 iterations of smoothing. As shown in the plots, the iterative spline smoothing can effectively remove the unnecessary noise and unstable changes during the early phase.
375
+
376
+ # A.4 POSTERIOR LEARNED BY BO
377
+
378
+ Figure 6 shows how the BO posterior gradually learns an increasingly more accurate estimation of the underlying black-box objective. We also visualize the "learning progress" of BO in an earlier stage and a later stage during training. It shows that in both early and late stages, by exploring more LRs, BO can achieve a more accurate posterior estimation of the objective function, and $k = 10$ suffices to obtain a satisfying estimate. Moreover, the posteriors in the later stages have much smaller variance/uncertainty than in the earlier stages.
379
+
380
+ ![](images/005b1a7648ebad24261976aaa9de765c25bb16d65f7ca3a6d902d542f176533d.jpg)
381
+ Figure 6: BO's posterior of the black-box objective function after exploring $i$ LRs (red dots $\bullet$ ) determined by Eq. (3) at an early stage and a late stage during the training of ResNet-50 on ImageNet. The dashed lines show the mean function $\mu_i(\eta)$ (indicating the predicted validation loss of applying LR $\eta$ for $\tau$ steps) and the shaded areas show the standard deviation $\sigma_i(\eta)$ (indicating the prediction uncertainty) in the form of $\mu_i(\eta) \pm \sigma_i(\eta)$ .
382
+
383
+ # A.5 EXPERIMENTS (MORE DETAILS)
384
+
385
+ # A.5.1 ANALYSIS OF ONLINE LEARNING RATE ADAPTATION WITH HYPERGRADIENT-BASED METHODS
386
+
387
+ Hypergradient descent (HD) (Baydin et al., 2018) is a method to adjust the learning rate in an online fashion by performing gradient descent on the learning rate at the same time as the underlying DNN is optimized. For simplicity, we rewrite Eq. (1), which performs mini-batch SGD updates on model weights $\theta$ at each step $t$ , as:
388
+
389
+ $$
390
+ \theta_ {t + 1} = \theta_ {t} - \eta_ {t} \nabla L (\theta_ {t}), \tag {11}
391
+ $$
392
+
393
+ where $\eta_{t}$ is the learning rate (LR) at step $t$ and $\nabla L(\theta_t)$ denotes the gradient of the loss function $L$ w.r.t. the model weights $\theta_{t}$ at step $t$ . By making the assumption that the optimal value of LR does not change much between two consecutive iterations, HD derives the partial derivative of the loss function $L$ with respect to the learning rate $\eta$ :
394
+
395
+ $$
396
+ \frac {\partial L \left(\theta_ {t}\right)}{\partial \eta} = \nabla L \left(\theta_ {t}\right) \frac {\partial \left(\theta_ {t - 1} - \eta \nabla L \left(\theta_ {t - 1}\right)\right)}{\partial \eta} = \nabla L \left(\theta_ {t}\right) (- \nabla L \left(\theta_ {t - 1}\right)) \tag {12}
397
+ $$
398
+
399
+ An update rule for the learning rate is constructed as:
400
+
401
+ $$
402
+ \eta_ {t + 1} = \eta_ {t} - \beta \frac {\partial L \left(\theta_ {t}\right)}{\eta} = \eta_ {t} + \beta \nabla L \left(\theta_ {t}\right) \nabla L \left(\theta_ {t - 1}\right), \tag {13}
403
+ $$
404
+
405
+ which introduces a hyperparameter $\beta$ as the hypergradient learning rate. Updating the learning rate is a single vector multiplication between the gradient of the model weights at the previous step and the one at the current step. By updating both the learning rate $\eta_t$ and the model weights $\theta_t$ using Eq.(13) and Eq.(11) in each step, the HD algorithm performs gradient descent on both learning rate and the model weights during training.
406
+
407
+ HD can be applied to optimizers including SGD, SGD with Nesterov momentum, and Adam. The original paper empirically shows that these optimizers equipped with HD are much less sensitive to the choice of the initial regular learning rate and the convergence rate is improved on a set of tasks. However, the paper only compares HD with constant LR baselines on small models and small datasets. To study how HD compares to hand-tuned LR schedules on larger models and datasets, we train VGG-16 (Simonyan & Zisserman, 2015) and ResNet-50 neural networks on the CIFAR-10 image recognition dataset (Krizhevsky & Hinton, 2009) with a mini-batch size of 128 using a PyTorch implementation. A hand-tuned LR schedule consists of a total of 350 epochs, starting with 0.1 and multiplying the learning rate by 0.1 at epoch 150 and 250. This hand-tuned LR schedule can achieve around $93.70\%$ and $95.56\%$ top-1 accuracy on the test set for VGG-16 and ResNet-50, respectively when we train the models on one NVIDIA Titan RTX GPU. We apply SGD with HD (SGD-HD) to train the two models, sweep all the guideline values of the two hyperparameters (regular LR and
408
+
409
+ hypergradient LR) in SGD-HD, and report the best top-1 accuracy that SGD-HD can achieve for VGG-16 and ResNet-50 within 500 epochs in Table 4 and Table 5. We have three observations: (1) Hypergradient descent is very sensitive to the selection of the regular LR and the hypergradient LR. The top-1 accuracy ranges from $10.00\%$ to $91.80\%$ for VGG-16 and ranges from $10.00\%$ to $92.52\%$ for ResNet-50, with all suggested values of the two hyperparameters. (2) It cannot match the top-1 accuracy achieved with hand-tuned LR schedules: the best top-1 accuracy it can achieve among all the different hyperparameter settings are $1.90\%$ and $3.04\%$ behind the accuracy achieved with hand-tuned LR schedules for VGG-16 and ResNet-50, even though we ran each of them 150 epochs more than the hand-tuned LR schedule. (3) It is prone to overfitting. For example, when using regular $\mathrm{LR} = 10^{-3}$ and hypergradient $\mathrm{LR} = 10^{-5}$ to train VGG-16, the top-1 accuracy is only $90.74\%$ while the training accuracy is already $99.98\%$ .
410
+
411
+ MARTHE (Donini et al., 2020) adaptively interpolates between two hypergradient based methods, HD and RTHO (Franceschi et al., 2017), and it computes the gradient of the loss function on the validation set instead of training set w.r.t. the learning rate. Besides the two hyperparameters in HD, MARTHE introduces another hyperparameter $\mu$ that controls how quickly past history is forgotten. We sample $\mu$ between 0.9 and 0.999, sample the hypergradient LR in $[10^{-3}, 10^{-6}]$ log-uniformly, and set the initial LR to 0.1, as how the MARTHE paper sets its hyperparameters for training VGG-11 on CIFAR-10. We apply SGD with MARTHE $^{10}$ to train VGG-16 on CIFAR-10. The best top-1 accuracy MARTHE can achieve among all the hyperparameter settings in 350 epochs is $92.99\%$ , which is $0.71\%$ lower than the accuracy achieved with the hand-tuned LR schedule.
412
+
413
+ # A.5.2 SENSITIVITY TEST OF $\tau_{\mathrm{max}}$ IN AutoLRS AND MEASURE OF VARIABILITY
414
+
415
+ Recall from §4.4 that AutoLRS starts with $\tau = 1000$ and $\tau' = 100$ , and doubles them after every stage until it reaches $\tau_{\mathrm{max}}$ . We test the sensitivity of AutoLRS to this hyperparameter, $\tau_{\mathrm{max}}$ , by comparing the generated LR schedules with different $\tau_{\mathrm{max}}$ values for the VGG-16 neural network on CIFAR-10 as in §A.5.1. The LR search interval $(\eta_{\mathrm{min}}, \eta_{\mathrm{max}})$ we use is $(10^{-3}, 10^{-1})$ . We report the training epochs to reach the target $93.70\%$ top-1 accuracy using the LR schedules generated among 5 trials for different $\tau_{\mathrm{max}}$ values in Table 6. AutoLRS with different $\tau_{\mathrm{max}}$ values can consistently achieve the target top-1 accuracy achieved with the hand-tuned LR schedule (i.e., $93.70\%$ ) in fewer training steps. We also see that the best AutoLRS-generated LR schedule can achieve $94.13\%$ top-1 accuracy within 350 training epochs (excluding the costs of the LR search).
416
+
417
+ In the last column of Table 6, we report the mean and standard deviation of the top-1 accuracy achieved by AutoLRS over 5 trials for each $\tau_{\mathrm{max}}$ . To further measure the variability of AutoLRS, we train VGG-16 on CIFAR-100 (Krizhevsky et al.) with a mini-batch size of 128. A carefully hand-tuned LR schedule consists of a total of 200 epochs, starting with 0.1 and dividing the learning rate by 5 at epoch 60, 120, and 160. This hand-tuned LR schedule can achieve $72.93\%$ top-1 accuracy. We train VGG-16 on CIFAR-100 for 200 epochs with AutoLRS for 10 trials using different random seeds, and report the top-1 accuracy they achieve in Table 9. The LR search interval $(\eta_{\mathrm{min}}, \eta_{\mathrm{max}})$ we use is $(10^{-3}, 10^{-1})$ , and $\tau_{\mathrm{max}}$ is set to 8000. The top-1 accuracy achieved by AutoLRS-generated LR schedules over 10 trials are distributed with a mean of $73.05\%$ and a standard deviation of $0.14\%$ . The best AutoLRS-generated LR schedule can achieve $73.30\%$ top-1 accuracy, which is $0.37\%$ higher than the accuracy achieved using the hand-tuned LR schedule.
418
+
419
+ # A.5.3 LEARNING RATE SCHEDULE SEARCH WITH HYPERBAND
420
+
421
+ Hyperband is a multi-armed bandit approach for DNN hyperparameter optimization. It dynamically allocates resources to randomly sampled configurations and uses successive halving (Jamieson & Talwalkar, 2016) to early stop poorly-performing configurations. We attempt to use Hyperband to optimize the LR schedule on CIFAR-10 training with VGG-16 by searching for an exponential decay LR schedule, which can be parameterized with an initial learning rate and a decay factor. The learning rate is decayed by the decay factor every epoch. Exponential decay is a commonly used LR schedule and is also used in other DNN hyperparameter optimization methods (Falkner et al., 2018). We use the search space of $(10^{-3}, 10^{-1})$ for the initial LR, and the search space of $(0.9, 1)$ for the decay rate. The decay rate is uniformly random sampled, and the initial LR is uniformly random sampled in its log-scale space. We use the default setting of Hyperband that sets the maximum epochs that can be
422
+
423
+ allocated to a single configuration to 350 and discards two-thirds of the configurations in each round of successive halving. This results in evaluating 384 configurations with different numbers of epochs with a total of 12600 epochs, which has a computational overhead of $36 \times$ compared to a single run of training with the hand-tuned LR schedule. The best configuration found by Hyperband achieves $93.24\%$ top-1 accuracy, which is $0.46\%$ lower than the accuracy achieved with the hand-tuned LR schedule.
424
+
425
+ # A.5.4 ABLATION STUDY
426
+
427
+ To illustrate the effects of the exponential model and BO of AutoLRS, we perform ablation studies using the VGG-16 neural network on CIFAR-10 as in §A.5.1.
428
+
429
+ Exponential model: What if we remove the exponential forecasting model and simply use the validation loss at $\tau'$ step to update the BO posterior? Will the LR schedules generated by AutoLRS be significantly worse? We apply AutoLRS without the exponential forecasting to find the LR schedules for VGG-16 on CIFAR-10. With $\tau_{\mathrm{max}}$ being chosen from the set of $\{4000, 8000, 16000\}$ , the best top-1 test accuracy that AutoLRS can achieve within 350 training epochs are $91.73\%$ , $92.59\%$ , $92.24\%$ , respectively. Therefore, it is unable to match the target accuracy in reasonable training steps without the exponential forecasting model. The reason for this is that the objective of BO has become to minimize the validation loss at the $\tau'$ step, which will lead to short-horizon issues (Wu et al., 2018). As a consequence, it tends to select a conservative LR, which is often a small LR around $\eta_{\mathrm{min}}$ in the late stages. In contract, with the exponential forecasting model, the goal of the BO is to find the LR that minimizes the predicted validation loss in $\tau$ steps. This allows the LR selected in the current stage to be higher than that in the past stages, and the loss to even increase in a short period of time, as long as the predicted loss in $\tau$ steps is low. This phenomenon can be seen in Figure 1 and Figure 2.
430
+
431
+ Bayesian Optimization: What if we replace BO in AutoLRS with random search or grid search? Will the LR schedules generated by AutoLRS get worse? We replace the BO part in AutoLRS with random search and grid search while keeping the exponential forecasting part of it, and apply it to find the LR schedules for VGG-16 on CIFAR-10. The LR search interval is $(10^{-3}, 10^{-1})$ , the same as in §A.5.2. Table 7 and Table 8 show the results of random search and grid search with different $\tau_{\mathrm{max}}$ values, respectively. We observe that both random search and grid search have at least one trial that fails to match the hand-tuned LR schedule to achieve $93.70\%$ top-1 test accuracy within 350 epochs (denoted by N/A in the tables). The top-1 accuracy achieved on average across trials in 350 epochs by random search and grid search is $0.09\%$ and $0.24\%$ behind AutoLRS with BO, respectively. We also replace BO with grid search and apply it to find the LR schedules for VGG-16 on CIFAR-100. The top-1 accuracy achieved over 10 trials are distributed with a mean of $72.63\%$ and a standard deviation of $0.56\%$ . Compared to the AutoLRS-generated LR schedules in Table 9, the mean of the grid search accuracy is out of two standard deviations from the BO accuracy $73.05\% \pm 0.14\%$ .
432
+
433
+ # A.5.5 AutoLRS FINE-TUNING RESULTS OF BERTBASE ACROSS 3 TRIALS
434
+
435
+ We pre-trained $\mathrm{BERT}_{\mathrm{BASE}}$ with AutoLRS for 3 trials, and report their fine-tuning results in Table 3.
436
+
437
+ Table 3: Fine-tuning results of $\mathrm{{BERT}}{}_{\mathrm{{BASE}}}$ models pre-trained with AutoLRS for 3 trials. Accuracy scores on the Dev set are reported for MRPC, MNLI, and CoLA. F1 scores on the Dev set are reported for SQuAD v1.1.
438
+
439
+ <table><tr><td></td><td>MRPC</td><td>MNLI</td><td>CoLA</td><td>SQuAD v1.1</td></tr><tr><td>Trial 1</td><td>88.0</td><td>82.5</td><td>47.6</td><td>87.1</td></tr><tr><td>Trial 2</td><td>88.0</td><td>82.7</td><td>46.5</td><td>87.0</td></tr><tr><td>Trial 3</td><td>87.8</td><td>82.3</td><td>47.0</td><td>86.6</td></tr></table>
440
+
441
+ Table 4: The accuracy information of tuning the regular LR and the hypergradient LR of SGD-HD for CIFAR-10 training with VGG-16 (batch size = 128). We train the model for 500 epochs using SGD-HD with each suggested value for the regular LR and the hypergradient LR, and report the best top-1 test accuracy it can achieve, its corresponding training accuracy, and the epoch number. Note that a hand-tuned LR schedule can achieve $93.70\%$ top-1 test accuracy in 350 epochs.
442
+
443
+ <table><tr><td>regular LR</td><td>hypergradient LR</td><td>Top-1 Test Accuracy</td><td>Training Accuracy</td><td>Epoch</td></tr><tr><td>10-6</td><td>10-6</td><td>86.13%</td><td>99.77%</td><td>438</td></tr><tr><td>10-6</td><td>10-5</td><td>88.79%</td><td>99.98%</td><td>480</td></tr><tr><td>10-6</td><td>10-4</td><td>86.31%</td><td>98.10%</td><td>494</td></tr><tr><td>10-6</td><td>10-3</td><td>90.70%</td><td>99.95%</td><td>499</td></tr><tr><td>10-6</td><td>10-2</td><td>10.30%</td><td>9.90%</td><td>40</td></tr><tr><td>10-6</td><td>10-1</td><td>10.00%</td><td>10.00%</td><td>1</td></tr><tr><td>10-5</td><td>10-6</td><td>86.14%</td><td>99.73%</td><td>394</td></tr><tr><td>10-5</td><td>10-5</td><td>88.49%</td><td>99.95%</td><td>448</td></tr><tr><td>10-5</td><td>10-4</td><td>87.67%</td><td>98.78%</td><td>483</td></tr><tr><td>10-5</td><td>10-3</td><td>88.70%</td><td>99.49%</td><td>469</td></tr><tr><td>10-5</td><td>10-2</td><td>10.22%</td><td>9.92%</td><td>170</td></tr><tr><td>10-5</td><td>10-1</td><td>10.00%</td><td>10.00%</td><td>1</td></tr><tr><td>10-4</td><td>10-6</td><td>86.09%</td><td>99.84%</td><td>481</td></tr><tr><td>10-4</td><td>10-5</td><td>88.82%</td><td>99.94%</td><td>304</td></tr><tr><td>10-4</td><td>10-4</td><td>86.63%</td><td>95.37%</td><td>479</td></tr><tr><td>10-4</td><td>10-3</td><td>10.22%</td><td>10.13%</td><td>1</td></tr><tr><td>10-4</td><td>10-2</td><td>10.02%</td><td>10.00%</td><td>1</td></tr><tr><td>10-4</td><td>10-1</td><td>10.00%</td><td>10.00%</td><td>1</td></tr><tr><td>10-3</td><td>10-6</td><td>86.13%</td><td>99.73%</td><td>406</td></tr><tr><td>10-3</td><td>10-5</td><td>88.78%</td><td>99.94%</td><td>346</td></tr><tr><td>10-3</td><td>10-4</td><td>90.74%</td><td>99.98%</td><td>484</td></tr><tr><td>10-3</td><td>10-3</td><td>44.12%</td><td>43.02%</td><td>500</td></tr><tr><td>10-3</td><td>10-2</td><td>88.48%</td><td>99.55%</td><td>467</td></tr><tr><td>10-3</td><td>10-1</td><td>10.00%</td><td>10.00%</td><td>1</td></tr><tr><td>10-2</td><td>10-6</td><td>91.69%</td><td>99.97%</td><td>389</td></tr><tr><td>10-2</td><td>10-5</td><td>88.53%</td><td>99.89%</td><td>397</td></tr><tr><td>10-2</td><td>10-4</td><td>89.11%</td><td>99.92%</td><td>484</td></tr><tr><td>10-2</td><td>10-3</td><td>10.07%</td><td>9.90%</td><td>265</td></tr><tr><td>10-2</td><td>10-2</td><td>10.00%</td><td>10.02%</td><td>1</td></tr><tr><td>10-2</td><td>10-1</td><td>10.00%</td><td>9.99%</td><td>1</td></tr><tr><td>10-1</td><td>10-6</td><td>91.80%</td><td>99.93%</td><td>476</td></tr><tr><td>10-1</td><td>10-5</td><td>91.48%</td><td>99.85%</td><td>317</td></tr><tr><td>10-1</td><td>10-4</td><td>88.81%</td><td>99.57%</td><td>499</td></tr><tr><td>10-1</td><td>10-3</td><td>90.42%</td><td>99.80%</td><td>393</td></tr><tr><td>10-1</td><td>10-2</td><td>11.24%</td><td>10.45%</td><td>1</td></tr><tr><td>10-1</td><td>10-1</td><td>10.00%</td><td>10.02%</td><td>1</td></tr></table>
444
+
445
+ Table 5: The accuracy information of tuning the regular LR and the hypergradient LR of SGD-HD for CIFAR-10 training with ResNet-50 (batch size = 128). We train the model for 500 epochs using SGD-HD with each suggested value for the regular LR and the hypergradient LR, and report the best top-1 test accuracy it can achieve, its corresponding training accuracy, and the epoch number. Note that a hand-tuned LR schedule can achieve $95.56\%$ top-1 test accuracy in 350 epochs.
446
+
447
+ <table><tr><td>regular LR</td><td>hypergradient LR</td><td>Top-1 Test Accuracy</td><td>Training Accuracy</td><td>Epoch</td></tr><tr><td>10-6</td><td>10-6</td><td>83.67%</td><td>99.71%</td><td>410</td></tr><tr><td>10-6</td><td>10-5</td><td>88.75%</td><td>99.44%</td><td>490</td></tr><tr><td>10-6</td><td>10-4</td><td>83.77%</td><td>99.68%</td><td>494</td></tr><tr><td>10-6</td><td>10-3</td><td>71.03%</td><td>72.14%</td><td>491</td></tr><tr><td>10-6</td><td>10-2</td><td>10.11%</td><td>10.03%</td><td>261</td></tr><tr><td>10-6</td><td>10-1</td><td>10.0%</td><td>10.0%</td><td>1</td></tr><tr><td>10-5</td><td>10-6</td><td>83.99%</td><td>99.64%</td><td>420</td></tr><tr><td>10-5</td><td>10-5</td><td>89.15%</td><td>99.97%</td><td>460</td></tr><tr><td>10-5</td><td>10-4</td><td>10.12%</td><td>9.95%</td><td>206</td></tr><tr><td>10-5</td><td>10-3</td><td>19.73%</td><td>18.53%</td><td>13</td></tr><tr><td>10-5</td><td>10-2</td><td>10.03%</td><td>9.98%</td><td>137</td></tr><tr><td>10-5</td><td>10-1</td><td>10.0%</td><td>10.0%</td><td>1</td></tr><tr><td>10-4</td><td>10-6</td><td>84.98%</td><td>99.85%</td><td>488</td></tr><tr><td>10-4</td><td>10-5</td><td>89.27%</td><td>99.94%</td><td>482</td></tr><tr><td>10-4</td><td>10-4</td><td>84.36%</td><td>97.78%</td><td>424</td></tr><tr><td>10-4</td><td>10-3</td><td>88.72%</td><td>99.84%</td><td>484</td></tr><tr><td>10-4</td><td>10-2</td><td>10.00%</td><td>10.00%</td><td>1</td></tr><tr><td>10-4</td><td>10-1</td><td>10.00%</td><td>10.00%</td><td>1</td></tr><tr><td>10-3</td><td>10-6</td><td>83.22%</td><td>99.81%</td><td>487</td></tr><tr><td>10-3</td><td>10-5</td><td>88.56%</td><td>99.98%</td><td>492</td></tr><tr><td>10-3</td><td>10-4</td><td>86.00%</td><td>97.32%</td><td>440</td></tr><tr><td>10-3</td><td>10-3</td><td>10.10%</td><td>9.76%</td><td>367</td></tr><tr><td>10-3</td><td>10-2</td><td>42.80%</td><td>40.11%</td><td>497</td></tr><tr><td>10-3</td><td>10-1</td><td>10.00%</td><td>10.00%</td><td>1</td></tr><tr><td>10-2</td><td>10-6</td><td>92.40%</td><td>99.99%</td><td>459</td></tr><tr><td>10-2</td><td>10-5</td><td>88.51%</td><td>99.98%</td><td>440</td></tr><tr><td>10-2</td><td>10-4</td><td>90.72%</td><td>99.91%</td><td>452</td></tr><tr><td>10-2</td><td>10-3</td><td>10.19%</td><td>9.64%</td><td>315</td></tr><tr><td>10-2</td><td>10-2</td><td>10.05%</td><td>9.99%</td><td>8</td></tr><tr><td>10-2</td><td>10-1</td><td>10.00%</td><td>10.00%</td><td>1</td></tr><tr><td>10-1</td><td>10-6</td><td>92.18%</td><td>99.97%</td><td>487</td></tr><tr><td>10-1</td><td>10-5</td><td>92.52%</td><td>99.97%</td><td>494</td></tr><tr><td>10-1</td><td>10-4</td><td>87.74%</td><td>99.86%</td><td>492</td></tr><tr><td>10-1</td><td>10-3</td><td>84.32%</td><td>97.23%</td><td>477</td></tr><tr><td>10-1</td><td>10-2</td><td>10.00%</td><td>10.11%</td><td>1</td></tr><tr><td>10-1</td><td>10-1</td><td>10.00%</td><td>10.00%</td><td>1</td></tr></table>
448
+
449
+ Table 6: Performance of AutoLRS with different $\tau_{\mathrm{max}}$ values for CIFAR-10 training with VGG-16 (batch size = 128). Note that a hand-tuned LR schedule can achieve 93.70% top-1 test accuracy in 350 epochs. We report the top-1 accuracy achieved within 350 epochs for each trial, and the mean and standard deviation of the top-1 accuracy achieved by AutoLRS over 5 trials for each $\tau_{\mathrm{max}}$ .
450
+
451
+ <table><tr><td>τmax</td><td>Trial Number</td><td>Epoch to 93.70% Top-1 Accuracy</td><td>Top-1 Accuracy Achieved</td><td>Mean±std</td></tr><tr><td rowspan="5">4000</td><td>Trial 1</td><td>108</td><td>94.13%</td><td rowspan="5">94.01%±0.13%</td></tr><tr><td>Trial 2</td><td>181</td><td>93.96%</td></tr><tr><td>Trial 3</td><td>223</td><td>94.07%</td></tr><tr><td>Trial 4</td><td>315</td><td>93.82%</td></tr><tr><td>Trial 5</td><td>287</td><td>94.09%</td></tr><tr><td rowspan="5">8000</td><td>Trial 1</td><td>115</td><td>94.03%</td><td rowspan="5">93.96%±0.07%</td></tr><tr><td>Trial 2</td><td>265</td><td>93.92%</td></tr><tr><td>Trial 3</td><td>203</td><td>93.94%</td></tr><tr><td>Trial 4</td><td>194</td><td>94.02%</td></tr><tr><td>Trial 5</td><td>305</td><td>93.87%</td></tr><tr><td rowspan="5">16000</td><td>Trial 1</td><td>229</td><td>93.77%</td><td rowspan="5">93.80%±0.10%</td></tr><tr><td>Trial 2</td><td>250</td><td>93.95%</td></tr><tr><td>Trial 3</td><td>267</td><td>93.73%</td></tr><tr><td>Trial 4</td><td>313</td><td>93.71%</td></tr><tr><td>Trial 5</td><td>330</td><td>93.82%</td></tr></table>
452
+
453
+ Table 7: Experimental results after replacing BO in AutoLRS with random search for CIFAR-10 training with VGG-16 (batch size = 128). We also report the top-1 accuracy achieved within 350 epochs for each trial.
454
+
455
+ <table><tr><td>τmax</td><td>Trial Number</td><td>Epoch to 93.70% Top-1 Accuracy</td><td>Top-1 Accuracy Achieved</td></tr><tr><td rowspan="3">4000</td><td>Trial 1</td><td>199</td><td>93.80%</td></tr><tr><td>Trial 2</td><td>209</td><td>93.97%</td></tr><tr><td>Trial 3</td><td>298</td><td>93.84%</td></tr><tr><td rowspan="3">8000</td><td>Trial 1</td><td>344</td><td>93.71%</td></tr><tr><td>Trial 2</td><td>225</td><td>93.98%</td></tr><tr><td>Trial 3</td><td>175</td><td>93.91%</td></tr><tr><td rowspan="3">16000</td><td>Trial 1</td><td>N/A</td><td>93.64%</td></tr><tr><td>Trial 2</td><td>316</td><td>93.96%</td></tr><tr><td>Trial 3</td><td>310</td><td>93.86%</td></tr></table>
456
+
457
+ Table 8: Experimental results after replacing BO in AutoLRS with grid search for CIFAR-10 training with VGG-16 (batch size = 128). We also report the top-1 accuracy achieved within 350 epochs for each trial.
458
+
459
+ <table><tr><td>τmax</td><td>Trial Number</td><td>Epoch to 93.70% Top-1 Accuracy</td><td>Top-1 Accuracy Achieved</td></tr><tr><td rowspan="3">4000</td><td>Trial 1</td><td>304</td><td>93.88%</td></tr><tr><td>Trial 2</td><td>233</td><td>93.88%</td></tr><tr><td>Trial 3</td><td>180</td><td>93.91%</td></tr><tr><td rowspan="3">8000</td><td>Trial 1</td><td>239</td><td>93.72%</td></tr><tr><td>Trial 2</td><td>296</td><td>93.95%</td></tr><tr><td>Trial 3</td><td>N/A</td><td>93.32%</td></tr><tr><td rowspan="3">16000</td><td>Trial 1</td><td>N/A</td><td>93.02%</td></tr><tr><td>Trial 2</td><td>153</td><td>93.78%</td></tr><tr><td>Trial 3</td><td>288</td><td>93.70%</td></tr></table>
460
+
461
+ Table 9: Top-1 test accuracy achieved by AutoLRS-generated LR schedules for CIFAR-100 training with VGG-16 over 10 trials.
462
+
463
+ <table><tr><td>73.12%</td><td>73.20%</td><td>72.90%</td><td>72.93%</td><td>73.03%</td></tr><tr><td>73.16%</td><td>73.30%</td><td>72.85%</td><td>73.00%</td><td>72.97%</td></tr></table>
autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:652ce0c331319726ea7da92f484ff681a7c9b0297fc18c29265a6f80e4841010
3
+ size 795429
autolrsautomaticlearningrateschedulebybayesianoptimizationonthefly/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0238290dbc21347a0cbc3ca65358fdea4fad95fbe48eb1e67d0e96e0e93cf47e
3
+ size 857101
autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/f7793aa3-ff60-4924-84b7-8a987c0c05b6_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1a8392dd2d7d546528dae1da5efe0e6f223885ce5d1d7a3675d28b2a3a55353
3
+ size 104819
autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/f7793aa3-ff60-4924-84b7-8a987c0c05b6_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf11e08fc7895d809a2c30a79282ffc8938073bd5d5dad41c0165e144f506c17
3
+ size 121617
autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/f7793aa3-ff60-4924-84b7-8a987c0c05b6_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5260f0ef1dea6fa5fb249798e9b95c56037b7dc101f94cf906a324ea0b5294da
3
+ size 1878434
autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/full.md ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AUTOREGRESSIVE DYNAMICS MODELS FOR OFFLINE POLICY EVALUATION AND OPTIMIZATION
2
+
3
+ Michael R. Zhang\*1 Tom Le Paine2 Ofir Nachum3 Cosmin Paduraru2 George Tucker3 Ziyu Wang3 Mohammad Norouzi3
4
+
5
+ <sup>1</sup>University of Toronto <sup>2</sup>DeepMind <sup>3</sup>Google Brain
6
+ michael@cs.toronto.edu, mnorouzi@google.com
7
+
8
+ # ABSTRACT
9
+
10
+ Standard dynamics models for continuous control make use of feedforward computation to predict the conditional distribution of next state and reward given current state and action using a multivariate Gaussian with a diagonal covariance structure. This modeling choice assumes that different dimensions of the next state and reward are conditionally independent given the current state and action and may be driven by the fact that fully observable physics-based simulation environments entail deterministic transition dynamics. In this paper, we challenge this conditional independence assumption and propose a family of expressive autoregressive dynamics models that generate different dimensions of the next state and reward sequentially conditioned on previous dimensions. We demonstrate that autoregressive dynamics models indeed outperform standard feedforward models in log-likelihood on held-out transitions. Furthermore, we compare different model-based and model-free off-policy evaluation (OPE) methods on RL Unplugged, a suite of offline MuJoCo datasets, and find that autoregressive dynamics models consistently outperform all baselines, achieving a new state-of-the-art. Finally, we show that autoregressive dynamics models are useful for offline policy optimization by serving as a way to enrich the replay buffer through data augmentation and improving performance using model-based planning.
11
+
12
+ # 1 INTRODUCTION
13
+
14
+ Model-based Reinforcement Learning (RL) aims to learn an approximate model of the environment's dynamics from existing logged interactions to facilitate efficient policy evaluation and optimization. Early work on Model-based RL uses simple tabular (Sutton, 1990; Moore and Atkeson, 1993; Peng and Williams, 1993) and locally linear (Atkeson et al., 1997) dynamics models, which often result in a large degree of model bias (Deisenroth and Rasmussen, 2011). Recent work adopts feedforward neural networks to model complex transition dynamics and improve generalization to unseen states and actions, achieving a high level of performance on standard RL benchmarks (Chua et al., 2018; Wang et al., 2019). However, standard feedforward dynamics models assume that different dimensions of the next state and reward are conditionally independent given the current state and action, which may lead to a poor estimation of uncertainty and unclear effects on RL applications.
15
+
16
+ In this work, we propose a new family of autoregressive dynamics models and study their effectiveness for off-policy evaluation (OPE) and offline policy optimization on continuous control. Autoregressive dynamics models generate each dimension of the next state conditioned on previous dimensions of the next state, in addition to the current state and action (see Figure 1). This means that to sample the next state from an autoregressive dynamics model, one needs $n$ sequential steps, where $n$ is the number of state dimensions, and one more step to generate the reward. By contrast, standard feedforward dynamics models take current state and action as input and predict the distribution of the next state and reward as a multivariate Gaussian with a diagonal covariance structure (e.g., Chua et al. (2018); Janner et al. (2019)). This modeling choice assumes that different state dimensions are conditionally independent.
17
+
18
+ Autoregressive generative models have seen success in generating natural images (Parmar et al., 2018), text (Brown et al., 2020), and speech (Oord et al., 2016), but they have not seen use in Model-based RL for continuous control.
19
+
20
+ We find that autoregressive dynamics models achieve higher log-likelihood compared to their feedforward counterparts on heldout validation transitions of all DM continuous control tasks (Tassa et al., 2018) from the RL Unplugged dataset (Gulcehre et al., 2020). To determine the impact of improved transition dynamics models, we primarily focus on OPE because it allows us to isolate contributions of the dynamics model in value estimation vs. the many other factors of variation in policy optimization and data collection. We find that autoregressive dynamics models consistently outperform existing Model-based and Model-free OPE baselines on continuous control in both ranking and value estimation metrics. We expect that our advances in model-based OPE will improve offline policy selection for offline RL (Paine et al., 2020). Finally, we show that our autoregressive dynamics models can help improve offline policy optimization by model predictive control, achieving a new state-of-the-art on cheetah-run and fish-swim from RL Unplugged (Gulcehre et al., 2020).
21
+
22
+ Key contributions of this paper include:
23
+
24
+ - We propose autoregressive dynamics models to capture dependencies between state dimensions in forward prediction. We show that autoregressive models improve log-likelihood over non-autoregressive models for continuous control tasks from the DM Control Suite (Tassa et al., 2018).
25
+ - We apply autoregressive dynamics models to Off-Policy Evaluation (OPE), surpassing the performance of state-of-the-art baselines in median absolute error, rank correlation, and normalized top-5 regret across 9 control tasks.
26
+ - We show that autoregressive dynamics models are more useful than feedforward models for offline policy optimization, serving as a way to enrich experience replay by data augmentation and improving performance via model-based planning.
27
+
28
+ # 2 PRELIMINARIES
29
+
30
+ Here we introduce relevant notation and discuss off-policy (offline) policy evaluation (OPE). We refer the reader to Lange et al. (2012) and Levine et al. (2020) for background on offline RL, which is also known as batch RL in the literature.
31
+
32
+ A finite-horizon Markov Decision Process (MDP) is defined by a tuple $\mathcal{M} = (\mathcal{S},\mathcal{A},\mathcal{T},d_0,r,\gamma)$ , where $\mathcal{S}$ is a set of states $s\in S$ , $\mathcal{A}$ is a set of actions $a\in \mathcal{A}$ , $\mathcal{T}$ defines transition probability distributions $p(s_{t + 1}|s_t,a_t)$ , $d_0$ defines the initial state distribution $d_0\equiv p(s_0)$ , $r$ defines a reward function $r:\mathcal{S}\times \mathcal{A}\to \mathbb{R}$ , and $\gamma$ is a scalar discount factor. A policy $\pi (a\mid s)$ defines a conditional distribution over actions conditioned on states. A trajectory consists of a sequence of states and actions $\tau = (s_0,a_0,s_1,a_1,\dots ,s_H)$ of horizon length $H$ . We use $s_{t,i}$ to denote the $i$ -th dimension of the state at time step $t$ (and similarly for actions). In reinforcement learning, the objective is to maximize the expected sum of discounted rewards over the trajectory distribution induced by the policy:
33
+
34
+ $$
35
+ V _ {\gamma} (\pi) = \mathbb {E} _ {\tau \sim p _ {\pi} (\tau)} \left[ \sum_ {t = 0} ^ {H} \gamma^ {t} r \left(s _ {t}, a _ {t}\right) \right]. \tag {1}
36
+ $$
37
+
38
+ The trajectory distribution is characterized by the initial state distribution, policy, and transition probability distribution:
39
+
40
+ $$
41
+ p _ {\pi} (\tau) = d _ {0} \left(s _ {0}\right) \prod_ {t = 0} ^ {H - 1} \pi \left(a _ {t} \mid s _ {t}\right) p \left(s _ {t + 1} \mid s _ {t}, a _ {t}\right). \tag {2}
42
+ $$
43
+
44
+ In offline RL, we are given access to a dataset of transitions $\mathcal{D} = \{(s_t^i,a_t^i,r_{t + 1}^i,s_{t + 1}^i)\}_{i = 1}^N$ and a set of initial states $S_0$ . Offline RL is inherently a data-driven approach since the agent needs to optimize the same objective as in Eq. (1) but is not allowed additional interactions with the environment. Even though offline RL offers the promise of leveraging existing logged datasets, current offline RL algorithms (Fujimoto et al., 2019; Agarwal et al., 2020; Kumar et al., 2019) are typically evaluated using online interaction, which limits their applicability in the real world.
45
+
46
+ ![](images/7a185cc43a59f3aaec0464004b4367b0359c3bd71475ee99486d40eb7b44541e.jpg)
47
+ Standard Feedforward Dynamics Models
48
+
49
+ ![](images/0381dc385f60ecd791a781c5dabae1cc55329492b2aa98aea8faaf04f92678ef.jpg)
50
+ Proposed Autoregressive Dynamics Model
51
+ Figure 1: Standard probabilistic dynamics models (e.g., Chua et al. (2018)) use a neural network to predict the mean and standard deviation of different dimensions of the next state and reward simultaneously. By contrast, we use the same neural network architectures with several additional inputs and predict the mean and standard deviation of each dimension of the next state conditional on previous dimensions of the next state. As empirical results indicate, this small change makes a big difference in the expressive power of dynamics models. Note that reward prediction is not shown on the right to reduce clutter, but it can be thought of as $(n + 1)$ th state dimension.
52
+
53
+ The problem of off-policy (offline) policy evaluation (OPE) entails estimating $V_{\gamma}(\pi)$ , the value of a target policy $\pi$ , based on a fixed dataset of transitions denoted $\mathcal{D}$ , without access to the environment's dynamics. Some OPE methods assume that $\mathcal{D}$ is generated from a known behavior (logging) policy $\mu$ and assume access to $\mu$ in addition to $\mathcal{D}$ . In practice, the logged dataset $\mathcal{D}$ may be the result of following some existing system that does not have a probabilistic form. Hence, in our work, we will assume no access to the original behavior policy $\mu$ for OPE. That said, for methods that require access to $\mu$ , we train a behavior cloning policy on $\mathcal{D}$ .
54
+
55
+ # 3 PROBABILISTIC DYNAMICS MODELS
56
+
57
+ Feedforward dynamics model. In the context of our paper, we use the term "model" to jointly refer to the forward dynamics model $p_{s}(s_{t + 1}|s_{t},a_{t})$ and reward model $p_{r}(r_{t + 1}|s_{t},a_{t})$ . We use neural nets to parameterize both distributions since they are powerful function approximators that have been effective for model-based RL (Chua et al., 2018; Nagabandi et al., 2018; Janner et al., 2019).
58
+
59
+ Let $\theta$ denote the parameters of a fully connected network used to model $p_{\theta}(s_{t + 1},r_{t + 1}\mid s_t,a_t)$ . We expect joint modeling of the next state and reward to benefit from sharing intermediate network features. Similar to prior work (Janner et al., 2019), our baseline feedforward model outputs the mean and log variance of all state dimensions and reward simultaneously, as follows:
60
+
61
+ $$
62
+ p _ {\theta} \left(s _ {t + 1}, r _ {t + 1} \mid s _ {t}, a _ {t}\right) = \mathcal {N} \left(\mu \left(s _ {t}, a _ {t}\right), \operatorname {D i a g} \left(\exp \left\{l \left(s _ {t}, a _ {t}\right) \right\}\right)\right), \tag {3}
63
+ $$
64
+
65
+ where $\mu (s_t,a_t)\in \mathbb{R}^{n + 1}$ denotes the mean for the concatenation of the next state and reward, $l(s_{t},a_{t})\in \mathbb{R}^{n + 1}$ denotes the log variance, and $\operatorname {Diag}(v)$ is an operator that creates a diagonal matrix with the main diagonal specified by the vector $v$ . During training, we seek to minimize the negative log likelihood of the parameters given observed transitions in the dataset $\mathcal{D}$ ..
66
+
67
+ $$
68
+ \ell (\theta \mid \mathcal {D}) = - \sum_ {(s, a, r ^ {\prime}, s ^ {\prime}) \in \mathcal {D}} \log p _ {\theta} \left(s ^ {\prime}, r ^ {\prime} \mid s, a\right). \tag {4}
69
+ $$
70
+
71
+ While it is possible to place different weights on the loss for next state and reward prediction, we did not apply any special weighting and treated the reward as an additional state dimension in all of our experiments. This is straightforward to implement and does not require tuning an additional hyperparameter, which is challenging for OPE. Note that the input has $|s| + |a|$ dimensions.
72
+
73
+ Autoregressive dynamics model. We now describe our autoregressive model. We seek to demonstrate the utility of predicting state dimensions in an autoregressive way. Therefore, rather than using a complex neural network architecture, where improvements in log-likelihood and policy evaluation are confounded by architectural differences, we opt to make simple modifications to the feedforward model described above. This allows us to isolate the source of performance improvements.
74
+
75
+ The autoregressive model we use is a fully connected model that predicts the mean and log variance of a single state dimension. We augment the input space of the baseline with the previous predicted
76
+
77
+ state dimensions and a one-hot encoding to indicate which dimension to predict. This is illustrated in Figure 1. The autoregressive model therefore has $3|s| + |a|$ input dimensions. Hence, the autoregressive model has a small number of additional weights in the first fully connected layer, but as will be shown in our experiments, these extra parameters are not the reason for a performance gain.
78
+
79
+ At training time, the autoregressive model has a similar computational cost to the fully connected model as we can mask ground truth states and use data parallelism to compute all state dimensions simultaneously. At inference, the autoregressive model requires additional forward passes, on the order of the number of state dimensions in a given environment. We use the default ordering for the state dimensions in a given environment, though it is interesting to explore different orderings in future works. The negative log-likelihood for an autoregressive model takes the form of:
80
+
81
+ $$
82
+ \ell (\theta \mid \mathcal {D}) = - \sum_ {(s, a, r ^ {\prime}, s ^ {\prime}) \in \mathcal {D}} \left[ \log p _ {\theta} \left(r ^ {\prime} \mid s, a, s ^ {\prime}\right) + \sum_ {i = 1} ^ {n} \log p _ {\theta} \left(s _ {i} ^ {\prime} \mid s, a, s _ {1} ^ {\prime}, \dots , s _ {i - 1} ^ {\prime}\right) \right], \tag {5}
83
+ $$
84
+
85
+ where we use chain rule to factorize the joint probability of $p(s', r' \mid s, a)$ .
86
+
87
+ The main advantage of the autoregressive model is that it makes no conditional independence assumption between next state dimensions. This class of models can therefore capture non-unimodal dependencies, e.g., between different joint angles of a robot. Paduraru (2007) demonstrates this increased expressivity in the tabular setting, constructing an example on which a model assuming conditional independence fails. While the expressive power of autoregressive models have been shown in various generative models (Parmar et al., 2018; Oord et al., 2016), autoregressive dynamics models have not seen much use in Model-based RL for continuous control before this work.
88
+
89
+ Model-based OPE. Once a dynamics model is trained from offline data, OPE can be performed in a direct and primitive way. We let the policy and model interact—the policy generates the next action, the model plays the role of the environment and generates the next state and reward. Due to the stochasticity in the model and the policy, we estimate the return for a policy with Monte-Carlo sampling and monitor standard error. See Algorithm 1 for pseudocode.
90
+
91
+ # 4 RELATED WORK
92
+
93
+ Our work follows a long line of OPE research, which is especially relevant to many practical domains such as medicine (Murphy et al., 2001), recommendation systems (Li et al., 2011), and education (Mandel et al., 2014) in order to avoid the costs and risks associated
94
+
95
+ # Algorithm 1 Model-based OPE
96
+
97
+ <table><tr><td colspan="2">Require: Number of rollouts n, discount factor γ, horizon length H, policy π, dynamics model p, set of initial states S0</td></tr><tr><td colspan="2">for i = 1, 2, ... n do</td></tr><tr><td colspan="2">Ri← 0</td></tr><tr><td colspan="2">sample initial state s0 ~ S0</td></tr><tr><td colspan="2">for t = 0, 1, 2, ..., H-1 do</td></tr><tr><td colspan="2">sample from policy: at ∼ π(· | st)</td></tr><tr><td colspan="2">sample from the dynamics model: st+1, rt+1 ∼ p(·, · | st, at)</td></tr><tr><td colspan="2">Ri← Ri + γtrt+1</td></tr><tr><td colspan="2">end for</td></tr><tr><td colspan="2">end for</td></tr><tr><td colspan="2">return 1/n ∑i=1n Ri</td></tr></table>
98
+
99
+ with online evaluation. There exists a large body of work on OPE, including methods based on importance weighting (Precup, 2000; Li et al., 2014) and Lagrangian duality (Nachum et al., 2019; Yang et al., 2020; Uehara and Jiang, 2019). The model-based approach that we focus on in this paper lies within the class of algorithms referred to as the direct method (Kostrikov and Nachum, 2020; Dudík et al., 2011; Voloshin et al., 2019), which approximate the value of a new policy by either explicitly or implicitly estimating the transition and reward functions of the environment. While model-based policy evaluation has been considered by previous works (Paduraru, 2007; Thomas and Brunskill, 2016a; Hanna et al., 2017), it has largely been confined to simple domains with finite state and action spaces where function approximation is not necessary. By contrast, our work provides an extensive demonstration of model-based OPE in challenging continuous control benchmark domains. Previous instances of the use of function approximation for model-based OPE (Hallak et al., 2015) impose strong assumptions on the probabilistic dynamics models, such as factorability of the MDP. Our results indicate that even seemingly benign assumptions about the independence of different state dimensions can have detrimental consequences for the effectiveness of a model-based OPE estimate.
100
+
101
+ While the use of model-based principles in OPE has been relatively rare, it has been more commonly used for policy optimization. The field of model-based RL has matured in recent years to yield impressive results for both online (Nagabandi et al., 2018; Chua et al., 2018; Kurutach et al., 2018; Janner et al., 2019) and offline (Matsushima et al., 2020; Kidambi et al., 2020; Yu et al., 2020; Argenson and Dulac-Arnold, 2020) policy optimization. Several of the techniques we employ, such
102
+
103
+ Table 1: Summary of the offline datasets used. Dataset size indicates the number of $(s, a, r', s')$ tuples.
104
+
105
+ <table><tr><td></td><td>cartpole swingup</td><td>cheetah run</td><td>finger turn hard</td><td>fish swim</td><td>humanoid run</td><td>walker stand</td><td>walker walk</td><td>manipulator insert ball</td><td>manipulator insert peg</td></tr><tr><td>State dim.</td><td>5</td><td>17</td><td>12</td><td>24</td><td>67</td><td>24</td><td>24</td><td>44</td><td>44</td></tr><tr><td>Action dim.</td><td>1</td><td>6</td><td>2</td><td>5</td><td>21</td><td>6</td><td>6</td><td>5</td><td>5</td></tr><tr><td>Dataset size</td><td>40K</td><td>300K</td><td>500K</td><td>200K</td><td>3M</td><td>200K</td><td>200K</td><td>1.5M</td><td>1.5M</td></tr></table>
106
+
107
+ Table 2: Negative log-likelihood on heldout validation sets for different RL Unplugged tasks (lower is better). For both family of dynamics models, we train 48 models with different hyperparameters. We report the Top-1 NLL on the top and average of Top-5 models on the bottom. On all of the tasks autoregressive dynamics models significantly outperform feedforward models in terms of NLL for both Top-1 and Top-5.
108
+
109
+ <table><tr><td>Dynamics model architecture</td><td>cartpole swingup</td><td>cheetah run</td><td>finger turn hard</td><td>fish swim</td><td>humanoid run</td><td>walker stand</td><td>walker walk</td><td>manipulator insert ball</td><td>manipulator insert peg</td></tr><tr><td>Top1</td><td>Feedforward</td><td>-6.81</td><td>-4.90</td><td>-5.58</td><td>-4.91</td><td>-3.42</td><td>-4.52</td><td>-3.84</td><td>-4.74</td></tr><tr><td>Top5</td><td>Autoregressive</td><td>-7.21</td><td>-6.36</td><td>-6.14</td><td>-5.21</td><td>-4.18</td><td>-4.73</td><td>-4.17</td><td>-5.62</td></tr><tr><td>Top5</td><td>Feedforward</td><td>-6.75</td><td>-4.85</td><td>-5.50</td><td>-4.90</td><td>-3.40</td><td>-4.49</td><td>-3.81</td><td>-4.64</td></tr><tr><td>Top5</td><td>Autoregressive</td><td>-7.14</td><td>-6.32</td><td>-5.94</td><td>-5.18</td><td>-4.15</td><td>-4.71</td><td>-4.15</td><td>-5.58</td></tr></table>
110
+
111
+ as the normalization of the observation space, are borrowed from this previous literature (Nagabandi et al., 2018; Chua et al., 2018). Conversely, we present strong empirical evidence that the benefits of our introduced autoregressive generative models of state observations do carry over to model-based policy optimization, at least in the offline setting, and this is an interesting avenue for future work.
112
+
113
+ # 5 RESULTS
114
+
115
+ We conduct our experiments on the DeepMind control suite (Tassa et al., 2018), a set of control tasks implemented in MuJoCo (Todorov et al., 2012). We use the offline datasets from RL Unplugged (Gulcehre et al., 2020), the details of which are provided in Table 1. These environments capture a wide range of complexity, from 40K transitions in a 5-dimensional cartpole environment to 1.5 million transitions on complex manipulation tasks. We follow the evaluation protocol in the Deep OPE (Fu et al., 2021) benchmark and use policies generated by four different algorithms: behavioral cloning (Bain, 1995), D4PG (Barth-Maron et al., 2018), Critic Regularized Regression (Wang et al., 2020), and ABM (Siegel et al., 2019). With varied hyperparameters, these form a diverse set of policies of varying quality.
116
+
117
+ We perform a thorough hyperparameter sweep in the experiments and use standard practice from generative modeling to improve the quality of the models. We allocate $80\%$ of the data for training and $20\%$ of the data for model selection. We vary the depth and width of the neural networks (number of layers $\in \{3,4\}$ , layer size $\in \{512,1024\}$ ), add different amounts of noise to input states and actions, and consider two levels of weight decay for regularization (input noise $\in \{0,1\mathrm{e} - 6,1\mathrm{e} - 7\}$ , weight decay $\in \{0,1\mathrm{e} - 6\}$ ). For the choice of optimizer, we consider both Adam (Kingma and Ba, 2014) and SGD with momentum and find Adam to be more effective at maximizing log-likelihood across all tasks in preliminary experiments. We thus use Adam in all of our experiments with two learning rates $\in \{1\mathrm{e} - 3,3\mathrm{e} - 4\}$ . We decay the optimizer's learning rate linearly to zero throughout training, finding this choice to outperform a constant learning rate. Lastly, we find that longer training often improves log-likelihood results. We use 500 epochs for training final models.
118
+
119
+ For each task we consider in total 48 hyperparameter combinations (listed above) for both models and pick the best model in each model family based on validation log-likelihood. This model is then used for model-based OPE and policy optimization. Note that, in our experiments, $20\%$ of the transitions are used only for validation, but we believe one can re-train the models with the best hyperparameter configuration on the full transition datasets to improve the results even further.
120
+
121
+ # 5.1 AUTOREGRESSIVE DYNAMICS MODELS OUTPERFORM FEEDFORWARD MODELS IN NLL
122
+
123
+ To evaluate the effectiveness of autoregressive dynamics models compared to feedforward counterparts, Table 2 reports negative log-likelihood (NLL) on the heldout validation set for the best
124
+
125
+ ![](images/67c687b668b06a8163327833ace4230f2f0d91071db484349353788eb2f115ae.jpg)
126
+ Figure 2: Network parameter count vs. validation negative log-likelihood for autoregressive and feedforward models. Autoregressive models often have a lower validation NLL irrespective of parameter count.
127
+
128
+ ![](images/91e1fa63ef8217e8f47707a1717c73c3cd42c7ae50fb2ee1cddb890adaede2d4.jpg)
129
+
130
+ ![](images/3ed8d49a5e819b39b97750cc5e44ba9fbb7cc55695ad2f6615af6710c05f76b9.jpg)
131
+
132
+ ![](images/95d598fda8df92ab7592df5e26a5215a28f3a23fd8a0ae83bf450beb08e7f5ae.jpg)
133
+
134
+ ![](images/5952d6549cb6cded9557d192435093ddb03b52cfb357b4a5f18b0fa1dcff80a6.jpg)
135
+ Figure 3: Validation negative log-likelihood vs. OPE correlation coefficients on different tasks. On 4 RL Unplugged tasks, we conduct an extensive experiment in which 48 Autoregressive and 48 Feedforward Dynamics models are used for OPE. For each dynamics model, we calculate the correlation coefficient between model-based value estimates and ground truth values at a discount factor of 0.995. We find that low validation NLL numbers generally correspond to accurate policy evaluation, while higher NLL numbers are less meaningful.
136
+
137
+ ![](images/dbc4ad7e6d4bea2747b58572bc2492f11237692ab7e6fc1a39bf74ed7f8fab05.jpg)
138
+
139
+ ![](images/8589b99614a9c025a51bebfc216d2c4d17a98aae6b093e10253bf516421c3ead.jpg)
140
+
141
+ ![](images/c5c56537f5993f6e91c3a0506379ec24b12b1d82bc4510713d3450efb4e2feb2.jpg)
142
+
143
+ performing models from our hyperparameter sweep. For each environment, we report the NLL for the best-performing model (Top-1) and the average NLL across the Top-5 models. The autoregressive model has lower NLL on all environments, indicating that it generalizes better to unseen data.
144
+
145
+ To study the impact of model size on NLL, Figure 2 shows validation NLL as a function of parameter count. We find that on small datasets large models hurt, but more importantly autoregressive models outperform feedforward models regardless of the parameter count regime, i.e., even small autoregressive models attain a lower validation NLL compared to big feedforward models. This indicates that autoregressive models have a better inductive bias in modeling the transition dynamics than feedforward models that make a conditional independence assumption.
146
+
147
+ # 5.2 ARE DYNAMICS MODELS WITH LOWER NLL BETTER FOR MODEL-BASED OPE?
148
+
149
+ We ultimately care not just about the log-likelihood numbers, but also whether or not the dynamics models are useful in policy evaluation and optimization. To study the relationship of NLL and OPE performance for model-based methods, we compute OPE estimates via Algorithm 1 and compute the Pearson correlation between the OPE estimates and the true discounted returns. This serves as a measure of the effectiveness of the model for OPE. We repeat this for all 96 dynamics models we trained on a given environment and plot the correlation coefficients against validation NLL in Figure 3.
150
+
151
+ Models with low NLL are generally more accurate in OPE. Lambert et al. (2020) have previously demonstrated that in Model-based RL, "training cost does not hold a strong correlation to maximization of episode reward." We use validation NLL instead, and our results on policy evaluation decouple the model from policy optimization, suggesting a more nuanced picture: low validation NLL numbers generally correspond to accurate policy evaluation, while higher NLL numbers are generally less meaningful. In other words, if the dynamics model does not capture the transition dynamics accurately enough, then it is very hard to predict its performance on OPE. However, once the model starts to capture the dynamics faithfully, we conjecture that NLL starts to become a reasonable metric for model selection. For instance, validation NLL does not seem to be a great metric for ranking feedforward models, whereas it is more reasonable for autoregressive models.
152
+
153
+ ![](images/f52c5e171c66f2d781a6787f26ff5dff5b15df2a2948081ea7b06d1394cdf9d9.jpg)
154
+
155
+ ![](images/0e88856a6aed4d8bc4446e3dff8484bff7db03578c01c5bc66fd601d8341b07a.jpg)
156
+
157
+ ![](images/c1118c029520a682b8b60e01556851a54fb958bd786e3eb2a9467c0378da1a0b.jpg)
158
+
159
+ ![](images/765bd0e599bf822b400420936847bd845d434acbdce9a93c17c521f7bdf916c0.jpg)
160
+
161
+ ![](images/a7d66c32ce24e9b4d4a984136f0c9be8d42f95a5591ad88ea5c8ae852b101aa9.jpg)
162
+
163
+ ![](images/21679b0277f7c966e2f35db9ad4f644c114ba44245eeacfc8d1f1dd484e847ff.jpg)
164
+
165
+ ![](images/7ade3de1a425afaca4654416755d783204a13ce9fc4ad415bbcd24c1622e4441.jpg)
166
+
167
+ ![](images/4b98d3fa0f10818eea6ab3bd1fcd7ccb07cb2e7b3ce575b096fa9a0601214827.jpg)
168
+
169
+ ![](images/5426b5504a393e106ed2c24dfedfd779d10846c4d220f920e8c75db902c7a9fd.jpg)
170
+
171
+ ![](images/e8443f02b3be3df3a7f926d7502884d561fefcf510428645e87a48b5192ef91b.jpg)
172
+
173
+ ![](images/d4e35e73a3cc4750a36f0cf2e1e6afacf5094860478654e6693667b2e765ada7.jpg)
174
+
175
+ ![](images/0d22eebdc7609f82ed76c31b6ae6c4a8b4674fb7187f782c3de9ea2ca971b2a1.jpg)
176
+
177
+ ![](images/99e525231b8c65c06bb5de3b533a8f40cef19bbb4e86cf8077010f9808461244.jpg)
178
+ Figure 4: Comparison of model-based OPE using autoregressive and feedforward dynamics models with state-of-the-art FQE methods based on L2 and distributional Bellman error. We plot OPE estimates on the y-axis against ground truth returns with a discount of .995 on the x-axis. We report the Pearson correlation coefficient $(r)$ in the title. While feedforward models fall behind FQE on most tasks, autoregressive dynamics models are often superior. See Figure B.4 for additional scatter plots on the other environments.
179
+
180
+ ![](images/3e28b910c91b07d0cbacd57233a58b65aaf7e3950831e97fff52d263fab4b538.jpg)
181
+
182
+ ![](images/0338e4a731b79b255c1232611ec0973a8e2bfa7277e43e5363a6ddecc5fbc55c.jpg)
183
+
184
+ ![](images/2777ca571228cac2a4f9bb4a4b913d1f76adbe21e87cae92621f6b430a3b4287.jpg)
185
+
186
+ # 5.3 COMPARISON WITH OTHER OPE METHODS
187
+
188
+ We adopt a recently proposed benchmark for OPE (Fu et al., 2021) and compare our model-based approaches with state-of-the-art OPE baselines therein. Figures 4 and B.4 compare OPE estimates from two Fitted-Q Evaluation (FQE) baselines (Le et al., 2019; Kostrikov and Nachum, 2020; Paine et al., 2020), our feedforward models, and the autoregressive approach. Each plot reports the Pearson correlation between the OPE estimates and the true returns. The autoregressive model consistently outperforms the feedforward model and FQE methods on most environments. We report ensembling results in the appendix, but compare single models for fairness in the rest of the paper.
189
+
190
+ We compute summary statistics for OPE methods in Table 3, Table A.1, and Table A.2. These tables report the Spearman's rank correlation, regret, and absolute error, respectively. These metrics capture different desirable properties of OPE methods (Fu et al., 2021); more details about how they are computed are in the appendix. In all three metrics, the autoregressive model achieves the best median performance across nine environments, whereas the baseline model is not as good as FQE. The only environment in which the autoregressive model has negative rank correlation is manipulator insert ball. In addition, a major advantage of our model-based approach over FQE is that the model only needs to be trained once per environment—we do not need to perform additional policy-specific optimization, whereas FQE needs to optimize a separate Q-function approximator per policy.
191
+
192
+ <table><tr><td colspan="2"></td><td>Cartpole swingup</td><td>Cheetah run</td><td>Finger turn hard</td><td>Fish swim</td><td>Humanoid run</td></tr><tr><td rowspan="8">Rank Correlation btw. OPE and ground truth</td><td>Importance Sampling</td><td>-0.23±0.11</td><td>-0.01±0.12</td><td>-0.45±0.08</td><td>-0.17±0.11</td><td>0.91±0.02</td></tr><tr><td>Best DICE</td><td>-0.16±0.11</td><td>0.07±0.11</td><td>-0.22±0.11</td><td>0.44±0.09</td><td>-0.10±0.10</td></tr><tr><td>Variational power method</td><td>0.01±0.11</td><td>0.01±0.12</td><td>-0.25±0.11</td><td>0.56±0.08</td><td>0.36±0.09</td></tr><tr><td>Doubly Robust (IS, FQE)</td><td>0.55±0.09</td><td>0.56±0.08</td><td>0.67±0.05</td><td>0.11±0.12</td><td>-0.03±0.12</td></tr><tr><td>Feedforward Model</td><td>0.83±0.05</td><td>0.64±0.08</td><td>0.08±0.11</td><td>0.95±0.02</td><td>0.35±0.10</td></tr><tr><td>FQE (distributional)</td><td>0.69±0.07</td><td>0.67±0.06</td><td>0.94±0.01</td><td>0.59±0.10</td><td>0.74±0.06</td></tr><tr><td>FQE (L2)</td><td>0.70±0.07</td><td>0.56±0.08</td><td>0.83±0.04</td><td>0.10±0.12</td><td>-0.02±0.12</td></tr><tr><td>Autoregressive Model</td><td>0.91±0.02</td><td>0.74±0.07</td><td>0.57±0.09</td><td>0.96±0.01</td><td>0.90±0.02</td></tr><tr><td colspan="2"></td><td>Walker stand</td><td>Walker walk</td><td>Manipulator insert ball</td><td>Manipulator insert peg</td><td>Median ↑</td></tr><tr><td rowspan="8">Rank Correlation btw. OPE and ground truth</td><td>Importance Sampling</td><td>0.59±0.08</td><td>0.38±0.10</td><td>-0.72±0.05</td><td>-0.25±0.08</td><td>-0.17</td></tr><tr><td>Best DICE</td><td>-0.11±0.12</td><td>-0.58±0.08</td><td>0.19±0.11</td><td>-0.35±0.10</td><td>-0.11</td></tr><tr><td>Variational power method</td><td>-0.35±0.10</td><td>-0.10±0.11</td><td>0.61±0.08</td><td>0.41±0.09</td><td>0.01</td></tr><tr><td>Doubly Robust (IS, FQE)</td><td>0.88±0.03</td><td>0.85±0.04</td><td>0.42±0.10</td><td>-0.47±0.09</td><td>0.55</td></tr><tr><td>Feedforward Model</td><td>0.82±0.04</td><td>0.80±0.05</td><td>0.06±0.10</td><td>-0.56±0.08</td><td>0.64</td></tr><tr><td>FQE (distributional)</td><td>0.87±0.02</td><td>0.89±0.03</td><td>0.63±0.08</td><td>-0.23±0.10</td><td>0.69</td></tr><tr><td>FQE (L2)</td><td>0.96±0.01</td><td>0.94±0.02</td><td>0.70±0.07</td><td>-0.48±0.08</td><td>0.70</td></tr><tr><td>Autoregressive Model</td><td>0.96±0.01</td><td>0.98±0.00</td><td>-0.33±0.09</td><td>0.47±0.09</td><td>0.90</td></tr></table>
193
+
194
+ Table 3: Spearman's rank correlation $(\rho)$ coefficient (bootstrap mean $\pm$ standard deviation) between different OPE metrics and ground truth values at a discount factor of 0.995. In each column, rank correlation coefficients that are not significantly different from the best $(p > 0.05)$ are bold faced. Methods are ordered by median. Also see Table A.1 and Table A.2 for Normalized Regret@5 and Average Absolute Error results.
195
+
196
+ ![](images/2570983863efb56af66aafa2f3d7d581f27cfc8987cc88b33380dfe20ce21f31.jpg)
197
+ Figure 5: Model-based offline policy optimization results. With planning and data augmentation, we improve the performance over CRR exp (our baseline algorithm). When using autoregressive dynamics models (CRR-planning AR), we outperform state-of-the-art on Cheetah run and Fish swim. Previous SOTA results (Gulcehre et al., 2020; Wang et al., 2020) are obtained using different offline RL algorithms: Cheetah run - CRR exp, Fish swim - CRR binary max, Finger turn hard - CRR binary max, Cartpole swingup - BRAC (Wu et al., 2019).
198
+
199
+ # 5.4 AUTOREGRESSIVE DYNAMICS MODELS FOR OFFLINE POLICY OPTIMIZATION
200
+
201
+ Policy evaluation is an integral part of reinforcement learning. Improvement in policy evaluation can therefore be adapted for policy optimization. In this section, we explore two possibilities of using models to improve offline reinforcement learning. In all experiments, we use Critic Regularized Regression (CRR) as a base offline reinforcement learning algorithm (Wang et al., 2020).
202
+
203
+ First, we utilize the model during test time for planning by using a modified version of Model Predictive Path Integral (MPPI) (Williams et al., 2015). Unlike MPPI, we truncate the planning process after 10 steps of rollout and use the CRR critic to evaluate future discounted returns. We provide additional details in the appendix. Secondly, we use the model to augment the transition dataset to learn a better critic for CRR. More precisely, given $s_t^i \sim \mathcal{D}$ , and the current policy $\pi$ , we can generate additional data using the following process: $\hat{a}_t^i \sim \pi(\cdot | s_t^i)$ , $\hat{s}_{t+1}^i$ , $\hat{r}_{t+1}^i \sim p(\cdot, \cdot | s_t^i, \hat{a}_t)$ .
204
+
205
+ These two options are orthogonal and can be applied jointly. We implemented both techniques on top of the CRR exp variant (Wang et al., 2020) and show their combined effect in Figure 5. The
206
+
207
+ figure shows that autoregressive dynamics models also outperform feedforward ones in the policy optimization context. Notably, in the case of cheetah run and fish swim, using autoregressive models for planning as well as data augmentation enables us to outperform the previous state-of-the-art on these offline datasets. Additionally, when using autoregressive dynamics models, both techniques improve performance. In the appendix, we show this result as well as more ablations.
208
+
209
+ # 6 CONCLUSION
210
+
211
+ This paper shows the promise of autoregressive models in learning transition dynamics for continuous control, showing strong results for off-policy policy evaluation and offline policy optimization. Our contributions to offline model-based policy optimization are orthogonal to prior work that uses ensembles to lower the values when ensemble components disagree (Kidambi et al., 2020). Incorporating conservative value estimation into our method is an interesting avenue for future research. We use relatively primitive autoregressive neural architectures in this paper to enable a fair comparison with existing feedforward dynamics models. That said, it will be exciting to apply more sophisticated autoregressive neural network architectures with cross attention (Bahdanau et al., 2014) and self-attention (Vaswani et al., 2017) to Model-based RL for continuous control.
212
+
213
+ Acknowledgements We thank Jimmy Ba, William Chan, Rishabh Agarwal, Dale Schuurmans, and Silviu Pitis for fruitful discussions on our work. We are also grateful for the helpful comments from Lihong Li, Jenny Liu, Harris Chan, Keiran Paster, Sheng Jia, and Tingwu Wang on earlier drafts.
214
+
215
+ # REFERENCES
216
+
217
+ Rishabh Agarwal, Dale Schuurmans, and Mohammad Norouzi. An optimistic perspective on offline reinforcement learning. International Conference on Machine Learning, 2020.
218
+ Arthur Argenson and Gabriel Dulac-Arnold. Model-based offline planning. arXiv:2008.05556, 2020.
219
+ Christopher G Atkeson, Andrew W Moore, and Stefan Schaal. Locally weighted learning. In Lazy learning, pages 11-73. Springer, 1997.
220
+ Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473, 2014.
221
+ Michael Bain. A framework for behavioural cloning. In Machine Intelligence 15, pages 103-129, 1995.
222
+ Gabriel Barth-Maron, Matthew W Hoffman, David Budden, Will Dabney, Dan Horgan, Dhruva Tb, Alistair Muldal, Nicolas Heess, and Timothy Lillicrap. Distributed distributional deterministic policy gradients. arXiv:1804.08617, 2018.
223
+ Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. arXiv:2005.14165, 2020.
224
+ Kurtland Chua, Roberto Calandra, Rowan McAllister, and Sergey Levine. Deep reinforcement learning in a handful of trials using probabilistic dynamics models. In Advances in Neural Information Processing Systems, 2018.
225
+ Marc Deisenroth and Carl E Rasmussen. *Pilco: A model-based and data-efficient approach to policy search*. In *Proceedings of the 28th International Conference on machine learning* (ICML-11), pages 465–472, 2011.
226
+ Miroslav Dudík, John Langford, and Lihong Li. Doubly robust policy evaluation and learning. arXiv:1103.4601, 2011.
227
+ Justin Fu, Mohammad Norouzi, Ofir Nachum, George Tucker, Ziyu Wang, Alexander Novikov, Mengjiao Yang, Michael R. Zhang, Yutian Chen, Aviral Kumar, Cosmin Paduraru, Sergey Levine, and Thomas Paine. Benchmarks for deep off-policy evaluation. In International Conference on Learning Representations, 2021.
228
+
229
+ Scott Fujimoto, David Meger, and Doina Precup. Off-policy deep reinforcement learning without exploration. In International Conference on Machine Learning, pages 2052-2062, 2019.
230
+ Caglar Gulcehre, Ziyu Wang, Alexander Novikov, Tom Le Paine, Sergio Gomez Colmenarejo, Konrad Zolna, Rishabh Agarwal, Josh Merel, Daniel Mankowitz, Cosmin Paduraru, et al. Rl unplugged: Benchmarks for offline reinforcement learning. arXiv:2006.13888, 2020.
231
+ Assaf Hallak, François Schnitzler, Timothy Mann, and Shie Mannor. Off-policy model-based learning under unknown factored dynamics. In International Conference on Machine Learning, pages 711-719, 2015.
232
+ Josiah Hanna, Scott Niekum, and Peter Stone. Importance sampling policy evaluation with an estimated behavior policy. In International Conference on Machine Learning, pages 2605-2613. PMLR, 2019.
233
+ Josiah P Hanna, Peter Stone, and Scott Niekum. Bootstrapping with models: Confidence intervals for off-policy evaluation. In Thirty-First AAAI Conference on Artificial Intelligence, 2017.
234
+ Michael Janner, Justin Fu, Marvin Zhang, and Sergey Levine. When to trust your model: Model-based policy optimization. In Advances in Neural Information Processing Systems, 2019.
235
+ Rahul Kidambi, Aravind Rajeswaran, Praneeth Netrapalli, and Thorsten Joachims. MOReL: Model-based offline reinforcement learning. arXiv:2005.05951, 2020.
236
+ Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv:1412.6980, 2014.
237
+ Ilya Kostrikov and Ofir Nachum. Statistical bootstrapping for uncertainty estimation in off-policy evaluation, 2020.
238
+ Aviral Kumar, Justin Fu, Matthew Soh, George Tucker, and Sergey Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. In Advances in Neural Information Processing Systems, pages 11784-11794, 2019.
239
+ Thanard Kurutach, Ignasi Clavera, Yan Duan, Aviv Tamar, and Pieter Abbeel. Model-Ensemble Trust-Region Policy Optimization. In International Conference on Learning Representations, 2018.
240
+ Nathan Lambert, Brandon Amos, Omry Yadan, and Roberto Calandra. Objective mismatch in model-based reinforcement learning. arXiv:2002.04523, 2020.
241
+ Sascha Lange, Thomas Gabel, and Martin Riedmiller. Batch reinforcement learning. In Reinforcement learning, pages 45-73. Springer, 2012.
242
+ Hoang M Le, Cameron Voloshin, and Yisong Yue. Batch policy learning under constraints. arXiv preprint arXiv:1903.08738, 2019.
243
+ Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv:2005.01643, 2020.
244
+ Lihong Li, Wei Chu, John Langford, and Xuanhui Wang. Unbiased offline evaluation of contextual-bandit-based news article recommendation algorithms. In Proceedings of the fourth ACM international conference on Web search and data mining, pages 297-306. ACM, 2011.
245
+ Lihong Li, Remi Munos, and Csaba Szepesvári. On minimax optimal offline policy evaluation. arXiv:1409.3653, 2014.
246
+ Qiang Liu, Lihong Li, Ziyang Tang, and Dengyong Zhou. Breaking the curse of horizon: Infinite-horizon off-policy estimation. In Advances in Neural Information Processing Systems, pages 5356-5366, 2018.
247
+ Travis Mandel, Yun-En Liu, Sergey Levine, Emma Brunskill, and Zoran Popovic. Offline policy evaluation across representations with applications to educational games. In Proceedings of the 2014 international conference on Autonomous agents and multi-agent systems, pages 1077-1084. International Foundation for Autonomous Agents and Multiagent Systems, 2014.
248
+
249
+ Tatsuya Matsushima, Hiroki Furuta, Yutaka Matsuo, Ofir Nachum, and Shixiang Gu. Deployment-efficient reinforcement learning via model-based offline optimization. arXiv:2006.03647, 2020.
250
+ Andrew W Moore and Christopher G Atkeson. Memory-based reinforcement learning: Efficient computation with prioritized sweeping. In Advances in neural information processing systems, pages 263-270, 1993.
251
+ Susan A Murphy, Mark J van der Laan, James M Robins, and Conduct Problems Prevention Research Group. Marginal mean models for dynamic regimes. Journal of the American Statistical Association, 96(456):1410-1423, 2001.
252
+ Ofir Nachum, Yinlam Chow, Bo Dai, and Lihong Li. Dualdice: Behavior-agnostic estimation of discounted stationary distribution corrections. In Advances in Neural Information Processing Systems, pages 2318-2328, 2019.
253
+ Anusha Nagabandi, Gregory Kahn, Ronald S Fearing, and Sergey Levine. Neural network dynamics for model-based deep reinforcement learning with model-free fine-tuning. In 2018 IEEE International Conference on Robotics and Automation (ICRA), pages 7559-7566. IEEE, 2018.
254
+ Aaron van den Oord, Sander Dieleman, Heiga Zen, Karen Simonyan, Oriol Vinyals, Alex Graves, Nal Kalchbrenner, Andrew Senior, and Koray Kavukcuoglu. Wavenet: A generative model for raw audio. arXiv:1609.03499, 2016.
255
+ Cosmin Paduraru. Planning with approximate and learned models of markov decision processes. *MsC Thesis, University of Alberta, 2007.*
256
+ Tom Le Paine, Cosmin Paduraru, Andrea Michi, Caglar Gulcehre, Konrad Zolna, Alexander Novikov, Ziyu Wang, and Nando de Freitas. Hyperparameter selection for offline reinforcement learning. arXiv:2007.09055, 2020.
257
+ Niki Parmar, Ashish Vaswani, Jakob Uszkoreit, Lukasz Kaiser, Noam Shazeer, Alexander Ku, and Dustin Tran. Image transformer. arXiv:1802.05751, 2018.
258
+ Jing Peng and Ronald J Williams. Efficient learning and planning within the dyna framework. Adaptive behavior, 1(4):437-454, 1993.
259
+ Doina Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department Faculty Publication Series, page 80, 2000.
260
+ Noah Siegel, Jost Tobias Springenberg, Felix Berkenkamp, Abbas Abdelmaleki, Michael Neunert, Thomas Lampe, Roland Hafner, Nicolas Heess, and Martin Riedmiller. Keep doing what worked: Behavior modelling priors for offline reinforcement learning. In International Conference on Learning Representations, 2019.
261
+ Richard S Sutton. Integrated architectures for learning, planning, and reacting based on approximating dynamic programming. In Machine learning proceedings 1990, pages 216-224. Elsevier, 1990.
262
+ Yuval Tassa, Yotam Doron, Alistair Muldal, Tom Erez, Yazhe Li, Diego de Las Casas, David Budden, Abbas Abdelmaleki, Josh Merel, Andrew Lefrancq, et al. Deepmind control suite. arXiv:1801.00690, 2018.
263
+ P. Thomas and E. Brunskill. Data-efficient off-policy policy evaluation for reinforcement learning. In Proceedings of the 33rd International Conference on Machine Learning, pages 2139-2148, 2016a.
264
+ Philip Thomas and Emma Brunskill. Data-efficient off-policy policy evaluation for reinforcement learning. In International Conference on Machine Learning, pages 2139-2148, 2016b.
265
+ Emanuel Todorov, Tom Erez, and Yuval Tassa. Mujoco: A physics engine for model-based control. In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 5026-5033. IEEE, 2012.
266
+ Masatoshi Uehara and Nan Jiang. Minimax weight and q-function learning for off-policy evaluation. arXiv:1910.12809, 2019.
267
+
268
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008, 2017.
269
+ Cameron Voloshin, Hoang M Le, Nan Jiang, and Yisong Yue. Empirical study of off-policy policy evaluation for reinforcement learning. arXiv:1911.06854, 2019.
270
+ Tingwu Wang, Xuchan Bao, Ignasi Clavera, Jerrick Hoang, Yeming Wen, Eric Langlois, Shunshi Zhang, Guodong Zhang, Pieter Abbeel, and Jimmy Ba. Benchmarking model-based reinforcement learning. arXiv:1907.02057, 2019.
271
+ Ziyu Wang, Alexander Novikov, Konrad Zolna, Jost Tobias Springenberg, Scott Reed, Bobak Shahriari, Noah Siegel, Josh Merel, Caglar Gulcehre, Nicolas Heess, et al. Critic regularized regression. arXiv:2006.15134, 2020.
272
+ Junfeng Wen, Bo Dai, Lihong Li, and Dale Schuurmans. Batch stationary distribution estimation. arXiv preprint arXiv:2003.00722, 2020.
273
+ Grady Williams, Andrew Aldrich, and Evangelos Theodorou. Model predictive path integral control using covariance variable importance sampling. arXiv:1509.01149, 2015.
274
+ Yifan Wu, George Tucker, and Ofir Nachum. Behavior regularized offline reinforcement learning. arXiv:1911.11361, 2019.
275
+ Mengjiao Yang, Ofir Nachum, Bo Dai, Lihong Li, and Dale Schuurmans. Off-policy evaluation via the regularized lagrangian, 2020.
276
+ Tianhe Yu, Garrett Thomas, Lantao Yu, Stefano Ermon, James Zou, Sergey Levine, Chelsea Finn, and Tengyu Ma. MOPO: Model-based offline policy optimization. arXiv:2005.13239, 2020.
277
+
278
+ # A OFFLINE POLICY EVALUATION
279
+
280
+ We use the baseline results in Fu et al. (2021). For convenience, we replicate their description of the OPE baselines and metrics.
281
+
282
+ # A.1 OPE METRICS
283
+
284
+ To evaluate the OPE algorithms, we compute three different metrics between the estimated returns and the ground truth returns:
285
+
286
+ 1. Rank correlation This metric assesses how well estimated values rank policies. It is equal to the correlation between the ranking (sorted order) by the OPE estimates and the ranking by the ground truth values.
287
+ 2. Absolute Error: This metric measures the deviations of the estimates from the ground truth and does not directly access the usefulness for ranking.
288
+ 3. Regret@k This metric measures how much worse the best policies identified by the estimates are than the best policy in the entire set. Regret@k is the difference between the actual expected return of the best policy in the entire set, and the actual value of the best policy in the top-k set.
289
+
290
+ # A.2 OPE BASELINES
291
+
292
+ Fitted Q-Evaluation (FQE) As in Le et al. (2019), we train a neural network to estimate the value of the evaluation policy $\pi_{e}$ by bootstrapping from $Q(s^{\prime},\pi_{e}(s^{\prime}))$ . We tried two different implementations, one from Kostrikov and Nachum (2020) and another from Paine et al. (2020).
293
+
294
+ Importance Sampling (IS) We perform importance sampling with a learned behavior policy. We use the implementation from Kostrikov and Nachum (2020), which uses self-normalized (also known as weighted) step-wise importance sampling (Liu et al., 2018; Nachum et al., 2019). Since the behavior policy is not known explicitly, we learn an estimate of it via a max-likelihood objective over the dataset $\mathcal{D}$ , as advocated by Hanna et al. (2019). In order to be able to compute log-probabilities when the target policy is deterministic, we add artificial Gaussian noise with standard deviation 0.01 for all deterministic target policies.
295
+
296
+ Doubly-Robust (DR) We perform weighted doubly-robust policy evaluation based on Thomas and Brunskill (2016b) and using the implementation of Kostrikov and Nachum (2020). Specifically, this method combines the IS technique above with a value estimator for variance reduction. The value estimator is learned according to Kostrikov and Nachum (2020), using deep FQE with an L2 loss function.
297
+
298
+ DICE This method uses a saddle-point objective to estimate marginalized importance weights $d^{\pi}(s,a) / d^{\pi_B}(s,a)$ ; these weights are then used to compute a weighted average of reward over the offline dataset, and this serves as an estimate of the policy's value in the MDP. We use the implementation from Yang et al. (2020) corresponding to the algorithm BestDICE.
299
+
300
+ Variational Power Method (VPM) This method runs a variational power iteration algorithm to estimate the importance weights $d^{\pi}(s,a) / d^{\pi_B}(s,a)$ without the knowledge of the behavior policy. It then estimates the target policy value using weighted average of rewards similar to the DICE method. Our implementation is based on the same network and hyperparameters for OPE setting as in Wen et al. (2020). We further tune the hyperparameters including the regularization parameter $\lambda$ , learning rates $\alpha_{\theta}$ and $\alpha_{v}$ , and number of iterations on the Cartpole swingup task using ground-truth policy value, and then fix them for all other tasks.
301
+
302
+ # A.3 ENSEMBLING
303
+
304
+ As in Chua et al. (2018); Janner et al. (2019), we can form an ensemble using our best-performing models. We generate rollouts using the procedure detailed in Janner et al. (2019), forming an ensemble with 4 models. We see some improvement in policy evaluation results, as shown in Figure A.1. Ensembling could likely be further improved by forcing unique hyperparameter settings and seeds.
305
+
306
+ <table><tr><td colspan="2"></td><td>Cartpole swingup</td><td>Cheetah run</td><td>Finger turn hard</td><td>Fish swim</td><td>Humanoid run</td></tr><tr><td rowspan="8">Regret@5 for OPE vs. ground truth</td><td>Importance Sampling</td><td>0.73±0.16</td><td>0.40±0.21</td><td>0.64±0.05</td><td>0.12±0.05</td><td>0.31±0.09</td></tr><tr><td>Best DICE</td><td>0.68±0.41</td><td>0.27±0.05</td><td>0.44±0.04</td><td>0.35±0.24</td><td>0.84±0.22</td></tr><tr><td>Variational power method</td><td>0.50±0.13</td><td>0.37±0.04</td><td>0.45±0.13</td><td>0.02±0.02</td><td>0.56±0.08</td></tr><tr><td>Doubly Robust (IS, FQE)</td><td>0.28±0.05</td><td>0.09±0.05</td><td>0.56±0.12</td><td>0.61±0.12</td><td>0.99±0.00</td></tr><tr><td>FQE (L2)</td><td>0.06±0.04</td><td>0.17±0.05</td><td>0.30±0.11</td><td>0.50±0.03</td><td>0.99±0.00</td></tr><tr><td>Feedforward Model</td><td>0.02±0.02</td><td>0.24±0.12</td><td>0.43±0.04</td><td>0.00±0.00</td><td>0.44±0.02</td></tr><tr><td>FQE (distributional)</td><td>0.03±0.09</td><td>0.11±0.09</td><td>0.10±0.12</td><td>0.49±0.06</td><td>0.24±0.15</td></tr><tr><td>Autoregressive Model</td><td>0.00±0.02</td><td>0.01±0.02</td><td>0.63±0.11</td><td>0.03±0.02</td><td>0.32±0.06</td></tr><tr><td colspan="2"></td><td>Walker stand</td><td>Walker walk</td><td>Manipulator insert ball</td><td>Manipulator insert peg</td><td>Median ↓</td></tr><tr><td rowspan="8">Regret@5 for OPE vs. ground truth</td><td>Importance Sampling</td><td>0.54±0.11</td><td>0.54±0.23</td><td>0.83±0.05</td><td>0.22±0.03</td><td>0.54</td></tr><tr><td>Best DICE</td><td>0.24±0.07</td><td>0.55±0.06</td><td>0.44±0.07</td><td>0.75±0.04</td><td>0.44</td></tr><tr><td>Variational power method</td><td>0.41±0.02</td><td>0.39±0.02</td><td>0.52±0.20</td><td>0.32±0.02</td><td>0.41</td></tr><tr><td>Doubly Robust (IS, FQE)</td><td>0.02±0.01</td><td>0.05±0.07</td><td>0.30±0.10</td><td>0.73±0.01</td><td>0.30</td></tr><tr><td>FQE (L2)</td><td>0.04±0.02</td><td>0.00±0.02</td><td>0.37±0.07</td><td>0.74±0.01</td><td>0.30</td></tr><tr><td>Feedforward Model</td><td>0.18±0.10</td><td>0.03±0.05</td><td>0.83±0.06</td><td>0.74±0.01</td><td>0.24</td></tr><tr><td>FQE (distributional)</td><td>0.03±0.03</td><td>0.01±0.02</td><td>0.50±0.30</td><td>0.73±0.01</td><td>0.11</td></tr><tr><td>Autoregressive Model</td><td>0.04±0.02</td><td>0.04±0.02</td><td>0.85±0.02</td><td>0.30±0.04</td><td>0.04</td></tr></table>
307
+
308
+ Table A.1: Normalized Regret@5 (bootstrap mean ± standard deviation) for OPE methods vs. ground truth values at a discount factor of 0.995. In each column, normalized regret values that are not significantly different from the best $(p > 0.05)$ are bold faced. Methods are ordered by median.
309
+
310
+ <table><tr><td></td><td></td><td>Cartpole swingup</td><td>Cheetah run</td><td>Finger turn hard</td><td>Fish swim</td><td>Humanoid run</td></tr><tr><td rowspan="8">Absolute Error btw. OPE and ground truth</td><td>Variational power method</td><td>37.53 ±3.50</td><td>61.89 ±4.25</td><td>46.22 ±3.93</td><td>31.27 ±0.99</td><td>35.29 ±3.03</td></tr><tr><td>Importance Sampling</td><td>68.75 ±2.39</td><td>44.29 ±1.91</td><td>90.10 ±4.68</td><td>34.82 ±1.93</td><td>27.89 ±1.98</td></tr><tr><td>Best DICE</td><td>22.73 ±1.65</td><td>23.35 ±1.32</td><td>33.52 ±3.48</td><td>59.48 ±2.47</td><td>31.42 ±2.04</td></tr><tr><td>Feedforward Model</td><td>6.80 ±0.85</td><td>13.64 ±0.59</td><td>35.99 ±3.00</td><td>4.75 ±0.23</td><td>30.12 ±2.40</td></tr><tr><td>FQE (L2)</td><td>19.02 ±1.34</td><td>48.26 ±1.78</td><td>27.91 ±1.18</td><td>19.82 ±1.57</td><td>56.28 ±3.52</td></tr><tr><td>Doubly Robust (IS, FQE)</td><td>24.38 ±2.51</td><td>40.27 ±2.05</td><td>25.26 ±2.48</td><td>20.28 ±1.90</td><td>53.64 ±3.68</td></tr><tr><td>FQE (distributional)</td><td>12.63 ±1.21</td><td>36.50 ±1.62</td><td>10.23 ±0.93</td><td>7.76 ±0.95</td><td>32.36 ±2.27</td></tr><tr><td>Autoregressive Model</td><td>5.32 ±0.54</td><td>4.64 ±0.46</td><td>22.93 ±1.72</td><td>4.31 ±0.22</td><td>20.95 ±1.61</td></tr><tr><td></td><td></td><td>Walker stand</td><td>Walker walk</td><td>Manipulator insert ball</td><td>Manipulator insert peg</td><td>Median ↓</td></tr><tr><td rowspan="8">Absolute Error btw. OPE and ground truth</td><td>Variational power method</td><td>96.76 ±3.59</td><td>87.24 ±4.25</td><td>79.25 ±6.19</td><td>21.95 ±1.17</td><td>46.22</td></tr><tr><td>Importance Sampling</td><td>66.50 ±1.90</td><td>67.24 ±2.70</td><td>29.93 ±1.10</td><td>12.78 ±0.66</td><td>44.29</td></tr><tr><td>Best DICE</td><td>27.58 ±3.01</td><td>47.28 ±3.13</td><td>103.45 ±5.21</td><td>22.75 ±3.00</td><td>31.42</td></tr><tr><td>Feedforward Model</td><td>23.34 ±2.41</td><td>52.23 ±2.34</td><td>34.30 ±2.55</td><td>121.12 ±1.58</td><td>30.12</td></tr><tr><td>FQE (L2)</td><td>6.51 ±0.71</td><td>18.34 ±0.95</td><td>36.32 ±1.07</td><td>31.12 ±2.37</td><td>27.91</td></tr><tr><td>Doubly Robust (IS, FQE)</td><td>26.82 ±2.66</td><td>24.63 ±1.69</td><td>13.33 ±1.16</td><td>22.28 ±2.34</td><td>24.63</td></tr><tr><td>FQE (distributional)</td><td>21.49 ±1.41</td><td>27.57 ±1.54</td><td>9.75 ±1.10</td><td>12.66 ±1.39</td><td>12.66</td></tr><tr><td>Autoregressive Model</td><td>19.12 ±1.23</td><td>5.14 ±0.49</td><td>17.13 ±1.34</td><td>9.71 ±0.70</td><td>9.71</td></tr></table>
311
+
312
+ Table A.2: Average absolute error between OPE metrics and ground truth values at a discount factor of 0.995. In each column, absolute error values that are not significantly different from the best $(p > 0.05)$ are bold faced. Methods are ordered by median.
313
+
314
+ ![](images/7a13c0fa6283042d1d610889b4f991fa7be119ba7714cabbb36d9fc4d4ab67d8.jpg)
315
+
316
+ ![](images/afaf02057da00280e60e165da54bb8fe6ef3495facfcf1ec46dd024db0e74021.jpg)
317
+
318
+ ![](images/f788299ba5f4fc1b7caa0504407a043b101cef3ab4675af0f7dac066cbc4881a.jpg)
319
+
320
+ ![](images/6edd8128466a5a8dbc83d41f514d513b3277830581fee1072b944d3e042a44a6.jpg)
321
+
322
+ ![](images/a6dc6ca33c268201efa2612cee5b597c7d30ee7249f01b1b4d14929dbedc17cc.jpg)
323
+ Figure A.1: Estimates of returns using the top model versus estimates of returns using an ensemble of the top-4 models.
324
+
325
+ ![](images/7161d44adaebc829ccc46982b440908ae2cd419be1427bcc4a9aa36644ff4667.jpg)
326
+
327
+ ![](images/9b87787f00f521dd7fc499de416203312d4eab7ec8ca4d8091e9dafacb1749a1.jpg)
328
+
329
+ ![](images/9dec635be28d80725a67d4945bc25affba9807980a92779e671937ae43228e53.jpg)
330
+
331
+ Algorithm 2 Model Predictive Path Integral Planning
332
+ Require: state $s$ , policy $\pi$ , dynamics model $p$ , critic $Q$ , temperature $\beta$ , and noise variance $\sigma^2$ .
333
+ for $m = 1, \dots, M$ do
334
+ for $n = 1, \dots, N$ do
335
+ $s_0 \gets s$ $R_n \gets 0$
336
+ for $\tau = 0, \dots, H - 1$ do
337
+ $a_n^\tau \sim \pi(\cdot | s_n^\tau)$ $s_n^{\tau + 1}, r_n^{\tau + 1} \sim \pi(\cdot, \cdot | s_n^\tau, a_n^\tau)$ $R_n \gets R_n + \gamma^\tau r_n^{\tau + 1}$
338
+ end for
339
+ $a^H \sim \pi(\cdot | s^H)$ $R_n \gets R_n + \gamma^H Q(s_n^H, a_n^H)$
340
+ end for
341
+ Re-define $\pi$ such that $\pi(\cdot | \hat{s}^\tau) = \sum_{n} \frac{\exp(R_n / \beta)}{\sum_{m} \exp(R_m / \beta)} \mathcal{N}(\cdot | a_n^\tau, \sigma^2 I)$ . ( $\pi$ depends on $\tau$ and not $\hat{s}$ .)
342
+ end for
343
+ sample final action $a \sim \sum_{n} \frac{\exp(R_n / \beta)}{\sum_{m} \exp(R_m / \beta)} \delta(a_n^0)$
344
+ return $a$
345
+
346
+ # B ADDITIONAL DETAILS REGARDING POLICY OPTIMIZATION
347
+
348
+ To test dynamic models for policy optimization, we implement the two methods discussed in Section 5.4 on top of CRR $exp$ , one of the CRR variants (Wang et al., 2020). We use the RL Unplugged datasets (Gulcehre et al., 2020) for all environments studied in this section. When using data augmentation, we adopt a 1-to-1 ratio between the original dataset and the augmented dataset.
349
+
350
+ To take advantage of the dynamics models at test time, we use a variant of Model Predictive Path Integral (MPPI) for planning. To reduce the planning horizon, we truncate the model rollout using CRR critics. The details of the planning procedure is summarized in Algorithm 2. All hyperparameter tuning for the planning process is conducted on the "cartpole swingup" task. The hyperparameters used in the planning process are $M = 3$ , $N = 16$ , $H = 10$ , $\beta = 0.1$ , and $\sigma^2 = 0.01$ . To match the temperature used in the planning component, we choose $\beta = 0.1$ for the CWP component of CRR. This change, however, does not impact the baseline CRR agent performance much. With the exception of $\beta$ and the planning component, all hyperparameters are kept the same as CRR exp.
351
+
352
+ ![](images/edb0aab7540567ced046adfa9d85117683f36672890400dacca7f005842d1e3d.jpg)
353
+ Figure B.2: Effects of the planning procedure. Here we compare using planning (CRR-planning AR) vs not ((CRR AR)) while using augmented data generated by the autoregressive model. Planning with autoregressive models helps in all environments tested.
354
+
355
+ ![](images/c801e1d06452ed86c4df95c67f0a6dbf23dd01ef2b470cb842d874f1fe178b93.jpg)
356
+
357
+ ![](images/217c05f29441b41638dc91386f9b64e3bc7f93d3da7ce8cdba50f8ce9c1c3bc7.jpg)
358
+
359
+ ![](images/f0748027b1b815691ba36cdc7be9881f0dff478ac50d7597b8cb856e0b9a602b.jpg)
360
+
361
+ ![](images/13c95538381bdc62f33c972daa30bb452e06af4d71355931f49992c27d2b083b.jpg)
362
+ Figure B.3: Effects of the data augmentation on cheetah run. [LEFT] In the absence of planning, data augmentation significantly increase the performance of CRR agent. [RIGHT] With the planning procedure, data augmentation is still effective albeit to a lesser extent.
363
+
364
+ ![](images/cd9c408bd47e01d9a6bcb77a1e0d6cbaa26cdac0ce48286f10e8f0eeedc1cf69.jpg)
365
+
366
+ We compare the agents' performance with and without the planning procedure to test its effects. As shown in Figure B.2, planning using an autoregressive model significantly increases performance.
367
+
368
+ Data augmentation does not change the agents' performance on cartpole swingup, fish swim, or finger turn hard. It, however, boosts performance considerably on cheetah run. In Figure B.3, we show the effects of data augmentation on cheetah run.
369
+
370
+ ![](images/08e3d8133268b7b1573867cf24cfe16c6aa323e8460de80e63fc0a98924d9e97.jpg)
371
+
372
+ ![](images/822d63d0301047861d2fa8db91a1b7b3530cf25deffc858ba63f2e8ad3bf43ea.jpg)
373
+
374
+ ![](images/b59729696f9f172bdcc03986ee841a2238b2d57fa29bae24be02b4f68846f269.jpg)
375
+
376
+ ![](images/9ee36fb248988b55583644f8ac7f517ff8bf89886558289b0c2cfcc903c6ec03.jpg)
377
+
378
+ ![](images/ee8f9332e4ff88f71b2acd9f2b6e26f815d30cb1be1875d31eb6e5b09ecfee43.jpg)
379
+
380
+ ![](images/94e8efc264ac30b6ebf010e1732e921b3c5a184eb171bdb4abf894c1499f5470.jpg)
381
+
382
+ ![](images/9232ae8aecb6fef96f7e5978536d3228ca5c14da30f646979c3856e07513508f.jpg)
383
+
384
+ ![](images/fcc415919de9025a813ee46d6a7defa1c1b3dcb03e7a0438d396cee5257a5547.jpg)
385
+
386
+ ![](images/aafe55240cf296e851ba68888dd078ed2142865fb59353b56f0ff91229f909cb.jpg)
387
+
388
+ ![](images/92fe14c9f745b4e4a867b85cfca8c81ef21eabc1c8a73a6d07e0916a2c7a389b.jpg)
389
+
390
+ ![](images/1937c046f9e56c2b6c14c2c88203997a488c8a8ce75ee5e7deadcba7ddff7bb5.jpg)
391
+
392
+ ![](images/49918ff47b03ef87a1498923010210ac316c219ddda6d18c803b0e83cf25d803.jpg)
393
+
394
+ ![](images/f5e37bd1a26b2d2d6e4cf2207faae2b939601c7c4b8058cf18332c1be8544ce5.jpg)
395
+
396
+ ![](images/6aeb8ad342a910dd0a6475d766d3e16e511b2f06171fcb99131f0bf82ae14969.jpg)
397
+
398
+ ![](images/0ab452a9915bdf1a4694755b446f60a1b951153dd4878a7117e3fd0b472b50c7.jpg)
399
+
400
+ ![](images/c3e16b2e4cd123d7a5251534a86f3cf820159636c554034056b6398b89e2f30a.jpg)
401
+
402
+ ![](images/7ec5606f4007fa1d5aeb638e595d4b4ac376ff29490342ccb3c70e06724b2032.jpg)
403
+ Figure B.4: Comparison of model-based OPE using autoregressive and feedforward dynamics models with state-of-the-art FQE methods based on L2 and distributional Bellman error. We plot ground truth returns on the x-axis against estimates of returns from various OPE methods on the y-axis. While feedforward models fall behind FQE on most tasks, autoregressive dynamics models are often superior. The remaining environments are plotted in Figure 4.
404
+
405
+ ![](images/f20ad0ff4d1fafc066721c9a3309d4422eadb578c8e573b7e1f194d68e5f9b3a.jpg)
406
+
407
+ ![](images/a44cedf434b657aa43725e5e81f7e700c7e444f86707f3ab7c607d6b03498e4c.jpg)
408
+
409
+ ![](images/d9e3102544de6d9d5a0d96291cd3a67d1298637e4836dd87adbd7a6b3c7076fd.jpg)
autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc1e044f1834adb1bf0191b85208ee55dbd22189863a9663e94185d1f7046368
3
+ size 1321975
autoregressivedynamicsmodelsforofflinepolicyevaluationandoptimization/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c51b6db0dfb110e5cc4e227e782e59fcb08f40a8be0ee4fd5dbe28cd9a95442
3
+ size 523913
autoseglosssearchingmetricsurrogatesforsemanticsegmentation/2802d650-3a45-4a26-97d7-c37c1d6ee49e_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93d27ff0e079a88798e11aac93b71bff9e6c506d682e2b63fe0fe5a7409795be
3
+ size 79211
autoseglosssearchingmetricsurrogatesforsemanticsegmentation/2802d650-3a45-4a26-97d7-c37c1d6ee49e_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:394d1324a1e893457fc4a3a01cf3deec564f0279ed8fe0a6470321fc26d0c4f4
3
+ size 94414
autoseglosssearchingmetricsurrogatesforsemanticsegmentation/2802d650-3a45-4a26-97d7-c37c1d6ee49e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef08494e796d6dab6efc56d326468a7ce54ee2feac99a1be8e00608ba5858c6f
3
+ size 1617393
autoseglosssearchingmetricsurrogatesforsemanticsegmentation/full.md ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AUTO SEG-LOSS: SEARCHING METRIC SURROGATES FOR SEMANTIC SEGMENTATION
2
+
3
+ Hao Li $^{1*†}$ , Chenxin Tao $^{2*†}$ , Xizhou Zhu $^{3}$ , Xiaogang Wang $^{1,3}$ , Gao Huang $^{2}$ , Jifeng Dai $^{3,4‡}$
4
+
5
+ <sup>1</sup>The Chinese University of Hong Kong <sup>2</sup>Tsinghua University
6
+
7
+ $^{3}$ SenseTime Research $^{4}$ Qing Yuan Research Institute, Shanghai Jiao Tong University
8
+
9
+ haoli@link.cuhk.edu.hk, tcx20@mails.tsinghua.edu.cn
10
+
11
+ {zhuwalter, daijifeng}@sensetime.com
12
+
13
+ xgwang@ee.cuhk.edu.hk, gaohuang@tsinghua.edu.cn
14
+
15
+ # ABSTRACT
16
+
17
+ Designing proper loss functions is essential in training deep networks. Especially in the field of semantic segmentation, various evaluation metrics have been proposed for diverse scenarios. Despite the success of the widely adopted cross-entropy loss and its variants, the mis-alignment between the loss functions and evaluation metrics degrades the network performance. Meanwhile, manually designing loss functions for each specific metric requires expertise and significant manpower. In this paper, we propose to automate the design of metric-specific loss functions by searching differentiable surrogate losses for each metric. We substitute the non-differentiable operations in the metrics with parameterized functions, and conduct parameter search to optimize the shape of loss surfaces. Two constraints are introduced to regularize the search space and make the search efficient. Extensive experiments on PASCAL VOC and Cityscapes demonstrate that the searched surrogate losses outperform the manually designed loss functions consistently. The searched losses can generalize well to other datasets and networks. Code shall be released at https://github.com/fundamentalvision/ Auto-Seg-Loss.
18
+
19
+ # 1 INTRODUCTION
20
+
21
+ Loss functions are of indispensable components in training deep networks, as they drive the feature learning process for various applications with specific evaluation metrics. However, most metrics, like the commonly used 0-1 classification error, are non-differentiable in their original forms and cannot be directly optimized via gradient-based methods. Empirically, the cross-entropy loss serves well as an effective surrogate objective function for a variety of tasks concerning categorization. This phenomenon is especially prevailing in image semantic segmentation, where various evaluation metrics have been designed to address the diverse task focusing on different scenarios. Some metrics measure the accuracy on the whole image, while others focus more on the segmentation boundaries. Although cross-entropy and its variants work well for many metrics, the mis-alignment between network training and evaluation still exist and inevitably leads to performance degradation.
22
+
23
+ Typically, there are two ways for designing metric-specific loss functions in semantic segmentation. The first is to modify the standard cross-entropy loss to meet the target metric (Ronneberger et al., 2015; Wu et al., 2016). The other is to design other clever surrogate losses for specific evaluation metrics (Rahman & Wang, 2016; Milletari et al., 2016). Despite the improvements, these handcrafted losses need expertise and are non-trivial to extend to other evaluation metrics.
24
+
25
+ In contrast to designing loss functions manually, an alternative approach is to find a framework that can design proper loss functions for different evaluation metrics in an automated manner, motivated by recent progress in AutoML (Zoph & Le, 2017; Pham et al., 2018; Liu et al., 2018; Li et al., 2019). Although automating the design process for loss functions is attractive, it is non-trivial to apply an
26
+
27
+ AutoML framework to loss functions. Typical AutoML algorithms require a proper search space, in which some search algorithms are conducted. Previous search spaces are either unsuitable for loss design, or too general to be searched efficiently. Recently Li et al. (2019) and Wang et al. (2020) proposed search spaces based on existing handcrafted loss functions. And the algorithm searches for the best combination. However, these search spaces are still limited to the variants of cross-entropy loss, and thus do not address the mis-alignment problem well.
28
+
29
+ In this paper, we propose a general framework for searching surrogate losses for mainstream non-differentiable segmentation metrics. The key idea is that we can build the search space according to the form of evaluation metrics. In this way, the training criteria and evaluation metrics are unified. Meanwhile, the search space is compact enough for efficient search. Specifically, the metrics are first relaxed to the continuous domain by substituting the one-hot prediction and logical operations, which are the non-differentiable parts in most metrics, with their differentiable approximations. Parameterized functions are introduced to approximate the logical operations, ensuring that the loss surfaces are smooth while effective for training. The loss parameterization functions can be of arbitrary families defined on $[0,1]$ . Parameter search is further conducted on the chosen family so as to optimize the network performance on the validation set with the given evaluation metric. Two essential constraints are introduced to regularize the parameter search space. We find that the searched surrogate losses can effectively generalize to different networks and datasets. Extensive experiments on Pascal VOC (Everingham et al., 2015) and Cityscapes (Cordts et al., 2016) show our approach delivers accuracy superior than the existing losses specifically designed for individual segmentation metrics with a mild computational overhead.
30
+
31
+ Our contributions can be summarized as follows: 1) Our approach is the first general framework of surrogate loss search for mainstream segmentation metrics. 2) We propose an effective parameter regularization and parameter search algorithm, which can find loss surrogates optimizing the target metric performance with mild computational overhead. 3) The surrogate losses obtained via the proposed searching framework promote our understandings on loss function design and by themselves are novel contributions, because they are different from existing loss functions specifically designed for individual metrics, and are transferable across different datasets and networks.
32
+
33
+ # 2 RELATED WORK
34
+
35
+ Loss function design is an active topic in deep network training (Ma, 2020). In the area of image semantic segmentation, cross-entropy loss is widely used (Ronneberger et al., 2015; Chen et al., 2018). But the cross-entropy loss is designed for optimizing the global accuracy measure (Rahman & Wang, 2016; Patel et al., 2020), which is not aligned with many other metrics. Numerous studies are conducted to design proper loss functions for the prevalent evaluation metrics. For the mIoU metric, many works (Ronneberger et al., 2015; Wu et al., 2016) incorporate class frequency to mitigate the class imbalance problem. For the boundary F1 score, the losses at boundary regions are up-weighted (Caliva et al., 2019; Qin et al., 2019), so as to deliver more accurate boundaries. These works carefully analyze the property of specific evaluation metrics, and design the loss functions in a fully handcrafted way, which needs expertise. By contrast, we propose a unified framework for deriving parameterized surrogate losses for various evaluation metrics. Wherein, the parameters are searched by reinforcement learning in an automatic way. The networks trained with the searched surrogate losses deliver accuracy on par or even superior than those with the best handcrafted losses.
36
+
37
+ Direct loss optimization for non-differentiable evaluation metrics has long been studied for structural SVM models (Joachims, 2005; Yue et al., 2007; Ranjbar et al., 2012). However, the gradients w.r.t. features cannot be derived from these approaches. Therefore, they cannot drive the training of deep networks through back-propagation. Hazan et al. (2010) proposes to optimize structural SVM with gradient descent, where loss-augmented inference is applied to get the gradients of the expectation of evaluation metrics. Song et al. (2016) further extends this approach to non-linear models (e.g., deep neural networks). However, the computational complexity is very high during each step in gradient descent. Although Song et al. (2016) and Mohapatra et al. (2018) have designed efficient algorithms for the Average Precision (AP) metric, other metrics still need specially designed efficient algorithms. Our method, by contrast, is general for the mainstream segmentation metrics. Thanks to the good generalizability, our method only needs to perform the search process
38
+
39
+ once for a specific metric, and the searched surrogate loss can be directly used henceforth. Applying the searched loss for training networks brings very little additional computational cost.
40
+
41
+ Surrogate loss is introduced to derive loss gradients for the non-differentiable evaluation metrics. There are usually two ways for designing surrogate losses. The first is to handcraft an approximated differentiable metric function. For the IoU measure, Rahman & Wang (2016) propose to approximate the intersection and union separately using the softmax probabilities in a differentiable form, and show its effectiveness on binary segmentation tasks. Berman et al. (2018) further deal with multi-class segmentation problems by extending mIoU from binary inputs to the continuous domain with the convex Lovász extension, and their method outperforms standard cross entropy loss in multi-class segmentation tasks. For the F1 measure, dice loss is proposed by Milletari et al. (2016) as a direct objective by substituting the binary prediction with the softmax probability. In spite of the success, they do not apply for other metrics.
42
+
43
+ The second solution is to train a network to approximate the target metric. Nagendar et al. (2018) train a network to approximate mIoU. Patel et al. (2020) design a neural network to learn embeddings for predictions and ground truths for tasks other than segmentation. This line of research focuses on minimizing the approximation error w.r.t. the target metrics. But there is no guarantee that their approximations provide good loss signals for training. These approximated losses are just employed in a post-tuning setup, still relying on cross-entropy pre-trained models. Our method significantly differs in that we search surrogate losses to directly optimize the evaluation metrics in applications.
44
+
45
+ AutoML is a long-pursued target of machine learning (He et al., 2019). Recently a sub-field of AutoML, neural architecture search (NAS), has attracted much attention due to its success in automating the process of neural network architecture design (Zoph & Le, 2017; Pham et al., 2018; Liu et al., 2018). As an essential element, loss function has also raised the interest of researchers to automate its design process. Li et al. (2019) and Wang et al. (2020) design search spaces based on existing human-designed loss functions and search for the best combination parameters. There are two issues: a) the search process outputs whole network models rather than loss functions. For every new network or dataset, the expensive search procedure is conducted again, and b) the search space are filled with variants of cross-entropy, which cannot solve the mis-alignment between cross-entropy loss and many target metrics. By contrast, our method outputs the searched surrogate loss functions of close form with the target metrics, which are transferable between networks and datasets.
46
+
47
+ # 3 REVISITING EVALUATION METRICS FOR SEMANTIC SEGMENTATION
48
+
49
+ Various evaluation metrics are defined for semantic segmentation, to address the diverse task focusing on different scenarios. Most of them are of three typical classes: Acc-based, IoU-based, and F1-score-based. This section revisits the evaluation metrics, under a unified notation set.
50
+
51
+ Table 1 summarizes the mainstream evaluation metrics. The notations are as follows: suppose the validation set is composed of $N$ images, labeled with categories from $C$ classes (background included). Let $I_{n}, n \in \{1, \ldots, N\}$ be the $n$ -th image, and $Y_{n}$ be the corresponding ground-truth segmentation mask. Here $Y_{n} = \{y_{n,c,h,w}\}_{c,h,w}$ is a one-hot vector, where $y_{n,c,h,w} \in \{0,1\}$ indicates whether the pixel at spatial location $(h,w)$ belongs to the $c$ -th category $(c \in \{1, \ldots, C\})$ . In evaluation, the ground-truth segmentation mask $Y_{n}$ is compared to the network prediction $\hat{Y}_{n} = \{\hat{y}_{n,c,h,w}\}_{c,h,w}$ , where $\hat{y}_{n,c,h,w} \in \{0,1\}$ . $\hat{y}_{n,c,h,w}$ is quantized from the continuous scores produced by the network (by argmax operation).
52
+
53
+ Acc-based metrics. The global accuracy measure (gAcc) counts the number of pixels correctly classified. It can be written with logical operator AND as Eq. (1). The gAcc metric counts each pixel equally, so the results of the long-tailed categories have little impact on the metric number. The mean accuracy (mAcc) metric mitigates this by normalizing within each category as in Eq. (2).
54
+
55
+ IoU-based metrics. The evaluation is on set similarity rather than pixel accuracy. The intersection-over-union (IoU) score is evaluated between the prediction and the ground-truth mask of each category. The mean IoU (mIoU) metric averages the IoU scores of all categories, as in Eq. (3).
56
+
57
+ In the variants, the frequency weighted IoU (FWIoU) metric weighs each category IoU score by the category pixel number, as in Eq. (4). The boundary IoU (BIoU) (Kohli et al., 2009) metric only cares about the segmentation quality around the boundary, so it picks the boundary pixels out in evaluation
58
+
59
+ Table 1: Revisiting mainstream metrics for semantic segmentation. The metrics with $\dagger$ measure the segmentation accuracy on the whole image. The metrics with * focus on the boundary quality.
60
+
61
+ <table><tr><td>Type</td><td>Name</td><td>Formula</td></tr><tr><td rowspan="2">Acc-based</td><td>Global Accuracy†</td><td>gAcc = ∑n,c,h,w帽子n,c,h,w AND yn,c,h,w / ∑n,c,h,w yn,c,h,w (1)</td></tr><tr><td>Mean Accuracy†</td><td>mAcc = 1/C ∑c ∑n,h,w帽子n,c,h,w AND yn,c,h,w / ∑n,h,w yn,c,h,w (2)</td></tr><tr><td rowspan="3">IoU-based</td><td>Mean IoU†</td><td>mIoU = 1/C ∑c ∑n,h,w帽子n,c,h,w AND yn,c,h,w OR yn,c,h,w (3)</td></tr><tr><td>Frequency Weighted IoU†</td><td>FWIoU = ∑c ∑n,h,w yn,c,h,w ∑n,h,w帽子n,c,h,w AND yn,c,h,w / ∑n,c,h,w yn,c,h,w OR yn,c,h,w (4)</td></tr><tr><td>Boundary IoU*</td><td>BIoU = 1/C ∑c ∑n,h,w∈BD(yn)帽子n,c,h,w AND yn,c,h,w OR yn,c,h,w where BD(y) = y XOR Min-Pooling(y) (5)</td></tr><tr><td>F1-score-based</td><td>Boundary F1 Score*</td><td>BF1-score = 1/C ∑c ∑n,h,w 2×precc×recallc / (precc + recallc) where precc = ∑n,h,w BD(ŷn)c,h,w AND Max-Pooling(BD(yn)c,h,w) / ∑n,h,w BD(ŷn)c,h,w recallc = ∑n,h,w Max-Pooling(BD(ŷn)c,h,w) AND(BD(yn)c,h,w) / ∑n,h,w BD(yn)c,h,w</td></tr></table>
62
+
63
+ and ignores the rest pixels. It can be calculated with Eq. (5), in which $\mathrm{BD}(y_n)$ denotes the boundary region in map $y_{n}$ . $\mathrm{BD}(y_n)$ is derived by applying XOR operation on the min-pooled ground-truth mask. The stride of the Min-Pooling(·) is 1.
64
+
65
+ F1-score-based metrics. F1-score is a criterion that takes both precision and recall into consideration. A well-known metric of this type is boundary F1-score (BF1-score) (Csurka et al., 2013), which is widely used for evaluating boundary segmentation accuracy. The computation of precision and recall in BF1-score is as in Eq. (6), where $\mathrm{BD}(\hat{y}_n)$ and $\mathrm{BD}(y_n)$ are derived from Eq. (5). Max pooling with stride 1, Max-Pooling( $\cdot$ ), is applied on the boundary regions to allow error tolerance.
66
+
67
+ # 4 AUTO SEG-LOSS FRAMEWORK
68
+
69
+ In the Auto Seg-Loss framework, the evaluation metrics are transferred into continuous surrogate losses with learnable parameters, which are further optimized. Fig. 1 illustrates our approach.
70
+
71
+ # 4.1 EXTENDING METRICS TO SURROGATES
72
+
73
+ As shown in Section 3, most segmentation metrics are non-differentiable because they take one-hot prediction maps as input, and contain binary logical operations. We extend these metrics to be continuous loss surrogates by smoothing the non-differentiable operations within.
74
+
75
+ Extending One-hot Operation. The one-hot prediction map, $\hat{Y}_n = \{\hat{y}_{n,c,h,w}\}_{c,h,w}$ , is derived by picking the highest scoring category at each pixel, which is further turned into one-hot form. Here, we approximate the one-hot predictions with softmax probabilities, as,
76
+
77
+ $$
78
+ \hat {y} _ {n, c, h, w} \approx \widetilde {y} _ {n, c, h, w} = \operatorname {S o f t m a x} _ {c} \left(z _ {n, c, h, w}\right), \tag {7}
79
+ $$
80
+
81
+ where $z_{n,c,h,w} \in \mathbb{R}$ is the category score output by the network (without normalization). The approximated one-hot prediction is denoted by $\widetilde{y}_{n,c,h,w}$ .
82
+
83
+ Extending Logical Operations. As shown in Table 1, the non-differentiable logical operations, $f_{\mathrm{AND}}(y_1,y_2)$ , $f_{\mathrm{OR}}(y_1,y_2)$ , and $f_{\mathrm{XOR}}(y_1,y_2)$ , are of indispensable components in these metrics. Because the XOR operation can be constructed by AND and OR, $f_{\mathrm{XOR}}(y_1,y_2) = f_{\mathrm{OR}}(y_1,y_2) - f_{\mathrm{AND}}(y_1,y_2)$ , we focus on extending $f_{\mathrm{AND}}(y_1,y_2)$ and $f_{\mathrm{OR}}(y_1,y_2)$ to the continuous domain.
84
+
85
+ Following the common practice, the logical operators are substituted with arithmetic operators
86
+
87
+ $$
88
+ f _ {\mathrm {A N D}} \left(y _ {1}, y _ {2}\right) = y _ {1} y _ {2}, f _ {\mathrm {O R}} \left(y _ {1}, y _ {2}\right) = y _ {1} + y _ {2} - y _ {1} y _ {2}, \tag {8}
89
+ $$
90
+
91
+ ![](images/25e19249d6ceb7421ca31dd1c272d96035bda398a7177633af5614070d0acf24.jpg)
92
+ Figure 1: Overview of the proposed Auto Seg-Loss framework. The surfaces of $h_{\mathrm{AND}}$ and $h_{\mathrm{OR}}$ shown in the "Optimal Parameterization" illustrate the searched optimal parameterization for mIoU, where $y_1, y_2 \in \{0,1\}$ . Eq. (8) can be directly extended to take continuous $y_1, y_2 \in [0,1]$ as inputs. By such an extension, together with the approximated one-hot operation, a naive version of differentiable surrogate losses can be obtained. The strength of such surrogates is that they are directly derived from the metrics, which significantly reduces the gap between training and evaluation. However, there is no guarantee that the loss surfaces formed by naively extending Eq. (8) provide accurate loss signals. To adjust the loss surfaces, we parameterize the AND and OR functions as
93
+
94
+ $$
95
+ \begin{array}{l} h _ {\text {A N D}} \left(y _ {1}, y _ {2}; \theta_ {\text {A N D}}\right) = g \left(y _ {1}; \theta_ {\text {A N D}}\right) g \left(y _ {2}; \theta_ {\text {A N D}}\right), \\ h _ {\text {O R}} \left(y _ {1}, y _ {2}; \theta_ {\text {O R}}\right) = g \left(y _ {1}; \theta_ {\text {O R}}\right) + g \left(y _ {2}; \theta_ {\text {O R}}\right) - g \left(y _ {1}; \theta_ {\text {O R}}\right), \tag {9} \\ \end{array}
96
+ $$
97
+
98
+ where $g(y;\theta):[0,1]\to \mathbb{R}$ is a scalar function parameterized by $\theta$
99
+
100
+ The parameterized function $g(y; \theta)$ can be from arbitrary function families defined on [0, 1], e.g., piecewise linear functions and piecewise Bézier curves. With a chosen function family, the parameters $\theta$ control the shape of loss surfaces. We seek to search for the optimal parameters $\theta$ so as to maximize the given evaluation metric.
101
+
102
+ Meanwhile, optimal parameter search is non-trivial. With the introduced parameters, the plasticity of loss surfaces is strong. The parameterized loss surfaces may well be chaotic, or be far away from the target evaluation metric even at the binary inputs. For more effective parameter search, we regularize the loss surfaces by introducing two constraints on $g(y; \theta)$ .
103
+
104
+ Truth-table constraint is introduced to enforce the surrogate loss surfaces taking the same values as the evaluation metric score at binary inputs. This is applied by enforcing
105
+
106
+ $$
107
+ g (0; \theta) = 0, g (1; \theta) = 1. \tag {10}
108
+ $$
109
+
110
+ Thus, the parameterized functions $h(y_1, y_2; \theta)$ preserve the behavior of the corresponding logical operations $f(y_1, y_2)$ on binary inputs $y_1, y_2 \in \{0, 1\}$ .
111
+
112
+ Monotonicity constraint is introduced based on the observation of monotonicity tendency in the truth tables of AND and OR. It pushes the loss surfaces towards a benign landscape, avoiding dramatic non-smoothness. The monotonicity constraint is enforced on $h_{\mathrm{AND}}(y_1,y_2)$ and $h_{\mathrm{OR}}(y_1,y_2)$ , as
113
+
114
+ $$
115
+ \partial h _ {\text {A N D}} / \partial y _ {i} \geq 0, \partial h _ {\text {O R}} / \partial y _ {i} \geq 0, \forall y _ {i} \in [ 0, 1 ], i = 1, 2.
116
+ $$
117
+
118
+ Applying the chain rule and the truth table constraint, the monotonicity constraint implies
119
+
120
+ $$
121
+ \partial g (y; \theta) / \partial y \geq 0, \forall y \in [ 0, 1 ]. \tag {11}
122
+ $$
123
+
124
+ Empirically we find it important to enforce these two constraints in parameterization.
125
+
126
+ Extending Evaluation Metrics. Now we can extend the metrics to surrogate losses by a) replacing the one-hot predictions with softmax probabilities, and b) substituting the logical operations with parameterized functions. Note that if the metric contains several logical operations, their parameters will not be shared. The collection of parameters in one metric are denoted as $\Theta$ . For a segmentation network N and evaluation dataset $S$ , the score of the evaluation metric is denoted as $\xi(N; S)$ . And the parameterized surrogate loss is denoted as $\widetilde{\xi}_{\Theta}(N; S)$ .
127
+
128
+ # 4.2 SURROGATE PARAMETERIZATION
129
+
130
+ The parameterized function can be from any function families defined on $[0, 1]$ , such as piecewise Bézier curve and piecewise linear functions. Here we choose the piecewise Bézier curve for parameterizing $g(y; \theta)$ , which is widely used in computer graphics and is easy to enforce the constraints via its control points. We also verify the effectiveness of parameterizing $g(y; \theta)$ by piecewise linear functions. See Fig. 2 for visualization and Appendix B for more details.
131
+
132
+ A piecewise Bézier curve consists of a series of quadratic Bézier curves, where the last control point of one curve segment coincides with the first control point of the next curve segment. If there are $n$ segments in a piecewise Bézier curve, the $k$ -th segment is defined as
133
+
134
+ $$
135
+ B (k, s) = (1 - s) ^ {2} B _ {2 k} + 2 s (1 - s) B _ {2 k + 1} + s ^ {2} B _ {2 k + 2}, 0 \leq s \leq 1 \tag {12}
136
+ $$
137
+
138
+ where $s$ transverses the $k$ -th segment, $B_{2k + i} = (B_{(2k + i),u}, B_{(2k + i),v})$ $(i = 0,1,2)$ denotes the $i$ -th control point on the $k$ -th segment, in which $u, v$ index the 2-d plane axes. A piecewise Bézier curve with $n$ segments has $2n + 1$ control points in total. To parameterize $g(y;\theta)$ , we assign
139
+
140
+ $$
141
+ y = (1 - s) ^ {2} B _ {2 k, u} + 2 s (1 - s) B _ {(2 k + 1), u} + s ^ {2} B _ {(2 k + 2), u}, \tag {13a}
142
+ $$
143
+
144
+ $$
145
+ g (y; \theta) = (1 - s) ^ {2} B _ {2 k, v} + 2 s (1 - s) B _ {(2 k + 1), v} + s ^ {2} B _ {(2 k + 2), v}, \tag {13b}
146
+ $$
147
+
148
+ $$
149
+ \text {s . t .} B _ {2 k, u} \leq y \leq B _ {(2 k + 2), u}, \tag {13c}
150
+ $$
151
+
152
+ where $\theta$ is the control point set, $B_{2k,u} < B_{(2k + 1),u} < B_{(2k + 2),u}$ , $0 \leq k \leq n - 1$ . Given an input $y$ , the segment index $k$ and the transversal parameter $s$ are derived from Eq. (13c) and Eq. (13a), respectively. Then $g(y;\theta)$ is assigned as Eq. (13b). Because $g(y;\theta)$ is defined on $y \in [0,1]$ , we arrange the control points in the $u$ -axis as, $B_{0,u} = 0$ , $B_{2n,u} = 1$ , where the $u$ -coordinate of the first and the last control points are at 0 and 1, respectively.
153
+
154
+ The strength of the piecewise Bézier curve is that the curve shape is defined explicitly via the control points. Here we enforce the truth-table and the monotonicity constraints on the control points via,
155
+
156
+ $$
157
+ B _ {0, v} = 0, B _ {2 n, v} = 1;
158
+ $$
159
+
160
+ $$
161
+ (\text {t r u t h - t a b l e c o n s t r a i n t})
162
+ $$
163
+
164
+ $$
165
+ B _ {2 k, v} \leq B _ {(2 k + 1), v} \leq B _ {(2 k + 2), v}, \quad k = 0, 1, \dots , n - 1.
166
+ $$
167
+
168
+ $$
169
+ (m o n o t o n i c i t y c o n s t r a i n t)
170
+ $$
171
+
172
+ To fulfill the above restrictions in optimization, the specific form of the parameters is given by
173
+
174
+ $$
175
+ \theta = \left\{\left(\frac {B _ {i , u} - B _ {(i - 1) , u}}{B _ {2 n , u} - B _ {(i - 1) , u}}, \frac {B _ {i , v} - B _ {(i - 1) , v}}{B _ {2 n , v} - B _ {(i - 1) , v}}\right) \mid i = 1, 2, \dots , 2 n - 1 \right\},
176
+ $$
177
+
178
+ with $B_0 = (0,0)$ and $B_{2n} = (1,1)$ fixed. So every $\theta_i = (\theta_{i,u},\theta_{i,v})$ is in range $[0,1]^2$ and it is straightforward to compute the actual coordinates of control points from this parameterized form. Such parameterization makes each $\theta_i$ independent with each other, and thus simplifies the optimization. By default, we use piecewise Bézier curve with two segments to parameterize $g(y,\theta)$ .
179
+
180
+ ![](images/3d25b312b95e80952a99bf9722996f38b211fd68712a98a9220c7dcdfc33daa0.jpg)
181
+ Figure 2: Parameterization of $g(y;\theta)$ using Piecewise Bézier curve with four segments. The red points are control points. The purple point is on the curve, which shows the relationship among $y$ , $g(y;\theta)$ and the transversal parameter $s$ .
182
+
183
+ # Algorithm 1: Auto Seg-Loss Parameter Search
184
+
185
+ Input: Initialized network $\mathrm{N}_{\omega_0}$ , initialized distribution $\mu_{1}$ and $\sigma^2$ , target metric $\xi$ , training set $S_{train}$ and hold-out training set $S_{hold - out}$
186
+
187
+ Result: Obtained optimal parameters $\Theta^{*}$
188
+
189
+ for $t = 1$ to $T$ do
190
+
191
+ for $i = 1$ to $M$ do
192
+
193
+ Sample parameter $\Theta_{i}^{(t)}\sim \mathcal{N}_{\mathrm{trunc}[0,1]}(\mu_t,\sigma^2 I)$ Network training
194
+
195
+ $\omega^{*}(\Theta_{i}^{(t)}) = \arg \max_{\omega}\tilde{\xi}_{\Theta^{(t)}}(\mathrm{N}_{\omega};\mathcal{S}_{\mathrm{train}}),$
196
+
197
+ with $w$ initialized from $w_{0}$
198
+
199
+ Compute the evaluation metric score
200
+
201
+ $$
202
+ \xi (\Theta_ {i} ^ {(t)}) = \xi (N _ {\omega^ {*} (\Theta_ {i} ^ {(t)})}; S _ {\text {h o l d - o u t}});
203
+ $$
204
+
205
+ end
206
+
207
+ Update $\mu_{t + 1} = \arg \max_{\mu}\frac{1}{M}\sum_{i = 1}^{M}R(\mu ,\mu_{t},\Theta_{i}^{(t)})$
208
+
209
+ end
210
+
211
+ return $\Theta^{*} = \arg \max_{\mu_t}\sum_{i = 1}^M\xi (\Theta_i^{(t)}),\forall t = 1,\ldots ,T + 1$
212
+
213
+ # 4.3 SURROGATE PARAMETER OPTIMIZATION
214
+
215
+ Algorithm 1 describes our parameter search algorithm. The training set is split into two subsets, $S_{\text{train}}$ for training and $S_{\text{hold-out}}$ for evaluation in the search algorithm, respectively. Specifically, suppose we have a segmentation network $N_{\omega}$ with weights $\omega$ , our search target is the parameters that maximize the evaluation metric on the hold-out training set $\xi(N_{\omega}; S_{\text{hold-out}})$ .
216
+
217
+ $$
218
+ \max _ {\Theta} \xi (\Theta) = \xi \left(\mathrm {N} _ {\omega^ {*} (\Theta)}; \mathcal {S} _ {\text {h o l d - o u t}}\right), \quad \text {s . t .} \quad \omega^ {*} (\Theta) = \underset {\omega} {\arg \max } \widetilde {\xi_ {\Theta}} \left(\mathrm {N} _ {\omega}; \mathcal {S} _ {\text {t r a i n}}\right). \tag {14}
219
+ $$
220
+
221
+ To optimize Eq. (14), the segmentation network is trained with SGD as the inner-level problem. At the outer-level, we use reinforcement learning as our searching algorithm, following the common practice in AutoML (Zoph & Le, 2017; Pham et al., 2018). Other searching algorithms, such as evolutionary algorithm, may also be employed. Specifically, the surrogate parameters are searched via the PPO2 algorithm (Schulman et al., 2017). The process consists of $T$ sampling steps. In the $t$ -th step, we aim to explore the search space around that from $t - 1$ . Here $M$ sets of parameters $\{\Theta_i^{(t)}\}_{i = 1}^M$ are sampled independently from a truncated normal distribution (Burkardt, 2014), as $\Theta \sim \mathcal{N}_{\mathrm{trunc}[0,1]}(\mu_t,\sigma^2 I)$ , with each variable in range [0,1]. In it, $\mu_t$ and $\sigma^2 I$ denote the mean and covariance of the parent normal distribution ( $\sigma$ is fixed as 0.2 in this paper). $\mu_t$ summarizes the information from the $(t - 1)$ -th step. $M$ surrogate losses are constructed with the sampled parameters, which drive the training of $M$ segmentation networks separately. To optimize the outer-level problem, we evaluate these models with the target metric and take the evaluation scores as rewards for PPO2. Following the PPO2 algorithm, $\mu_{t + 1}$ is computed as $\mu_{t + 1} = \arg \max_{\mu}\frac{1}{M}\sum_{i = 1}^{M}R(\mu ,\mu_{t},\Theta_{i})$ , where the reward $R(\mu ,\mu_t,\Theta_i)$ is as
222
+
223
+ $$
224
+ R (\mu , \mu_ {t}, \Theta_ {i}) = \min \left(\frac {p (\Theta_ {i} ; \mu , \sigma^ {2} I)}{p (\Theta_ {i} ; \mu_ {t} , \sigma^ {2} I)} \xi (\Theta_ {i}), \mathrm {C L I P} \left(\frac {p (\Theta_ {i} ; \mu , \sigma^ {2} I)}{p (\Theta_ {i} ; \mu_ {t} , \sigma^ {2} I)}, 1 - \epsilon , 1 + \epsilon\right) \xi (\Theta_ {i})\right),
225
+ $$
226
+
227
+ where $\min (\cdot ,\cdot)$ picks the smaller item from its inputs, $\mathrm{CLIP}(x,1 - \epsilon ,1 + \epsilon)$ clips $x$ to be within $1 - \epsilon$ and $1 + \epsilon$ , and $p(\Theta_i;\mu ,\sigma^2 I)$ is the PDF of the truncated normal distribution. Note that the mean reward of the $M$ samples is subtracted when computing $\xi (\Theta_{i})$ for better convergence. After $T$ steps, the mean $\mu_t$ with the highest average evaluation score is output as the final parameters $\Theta^{*}$ .
228
+
229
+ Empirically we find the searched losses have good transferability, i.e., they can be applied for different datasets and networks. Benefiting from this, we use a light proxy task for parameter search. In it, we utilize a smaller image size, a shorter learning schedule and a lightweight network. Thus, the whole search process is quite efficient (8 hours on PASCAL VOC with 8 NVIDIA Tesla V100 GPUs). More details are in Appendix A. In addition, the search process can be conducted only once for a specific metric and the resulting surrogate loss can be directly used for training henceforth.
230
+
231
+ # 5 EXPERIMENTS
232
+
233
+ We evaluate on the PASCAL VOC 2012 (Everingham et al., 2015) and the Cityscapes (Cordts et al., 2016) datasets. We use Deeplabv3+ (Chen et al., 2018) with ResNet-50/101 (He et al., 2016) as the network model. During the surrogate parameter search, we randomly sample 1500 training images in PASCAL VOC and 500 training images in Cityscapes to form the hold-out set $S_{\mathrm{hold - out}}$ , respectively. The remaining training images form the training set $S_{\mathrm{train}}$ in search. $\mu_0$ is set to make $g(y;\theta) = y$ . The backbone network is ResNet-50. The images are down-sampled to be of $128\times 128$ resolution. SGD lasts only 1000 iterations with a mini-batch size of 32. After the search procedure, we re-train the segmentation networks with ResNet-101 using the searched losses on the full training set and evaluate them on the actual validation set. The re-train settings are the same as Deeplabv3+ (Chen et al., 2018), except that the loss function is substituted by the obtained surrogate loss. The search time is counted on 8 NVIDIA Tesla V100 GPUs. More details are in Appendix A.
234
+
235
+ # 5.1 SEARCHING FOR DIFFERENT METRICS
236
+
237
+ In Table 2, we compare our searched surrogate losses against the widely-used cross-entropy loss and its variants, and some other metric-specific surrogate losses. We also seek to compare with the AutoML-based method in Li et al. (2019), which was originally designed for other tasks. But we cannot get reasonable results due to convergence issues. The results show that our searched losses
238
+
239
+ are on par or better the previous losses on their target metrics. It is interesting to note that the obtained surrogates for boundary metrics (such as BIoU and BF1) only focus on the boundary areas, see Appendix C for further discussion. We also tried training segmentation networks driven by both searched mIoU and BIoU/BF1 surrogate losses. Such combined losses refine the boundaries while keeping reasonable global performance.
240
+
241
+ Table 2: Performance of different losses on PASCAL VOC and Cityscapes segmentation. The results of each loss function's target metrics are underlined. The scores whose difference with the highest is less than 0.3 are marked in bold.
242
+
243
+ <table><tr><td>Dataset</td><td colspan="6">PASCAL VOC</td><td colspan="6">Cityscapes</td></tr><tr><td>Loss Function</td><td>mIoU</td><td>FWIoU</td><td>BIoU</td><td>BF1</td><td>mAcc</td><td>gAcc</td><td>mIoU</td><td>FWIoU</td><td>BIoU</td><td>BF1</td><td>mAcc</td><td>gAcc</td></tr><tr><td>Cross Entropy</td><td>78.69</td><td>91.31</td><td>70.61</td><td>65.30</td><td>87.31</td><td>95.17</td><td>79.97</td><td>93.33</td><td>62.07</td><td>62.24</td><td>87.01</td><td>96.44</td></tr><tr><td>WCE (Ronneberger et al., 2015)</td><td>69.60</td><td>85.64</td><td>61.80</td><td>37.59</td><td>92.61</td><td>91.11</td><td>73.01</td><td>90.51</td><td>53.07</td><td>51.19</td><td>89.22</td><td>94.56</td></tr><tr><td>DPCE (Caliva et al., 2019)</td><td>79.82</td><td>91.76</td><td>71.87</td><td>66.54</td><td>87.76</td><td>95.45</td><td>80.27</td><td>93.38</td><td>62.57</td><td>65.99</td><td>86.99</td><td>96.46</td></tr><tr><td>SSIM (Qin et al., 2019)</td><td>79.26</td><td>91.68</td><td>71.54</td><td>66.35</td><td>87.87</td><td>95.38</td><td>80.65</td><td>93.22</td><td>63.04</td><td>72.20</td><td>86.88</td><td>96.39</td></tr><tr><td>DiceLoss (Milletari et al., 2016)</td><td>77.78</td><td>91.34</td><td>69.85</td><td>64.38</td><td>87.47</td><td>95.11</td><td>79.30</td><td>93.25</td><td>60.93</td><td>59.94</td><td>86.38</td><td>96.39</td></tr><tr><td>Lovasz (Berman et al., 2018)</td><td>79.72</td><td>91.78</td><td>72.47</td><td>66.65</td><td>88.64</td><td>95.42</td><td>77.67</td><td>92.51</td><td>56.71</td><td>53.48</td><td>82.05</td><td>96.03</td></tr><tr><td>Searched mIoU</td><td>80.97</td><td>92.09</td><td>73.44</td><td>68.86</td><td>88.23</td><td>95.68</td><td>80.67</td><td>93.30</td><td>63.05</td><td>67.97</td><td>87.20</td><td>96.44</td></tr><tr><td>Searched FWIoU</td><td>80.00</td><td>91.93</td><td>75.14</td><td>65.67</td><td>89.23</td><td>95.44</td><td>79.42</td><td>93.33</td><td>61.71</td><td>59.68</td><td>87.96</td><td>96.37</td></tr><tr><td>Searched BIoU</td><td>48.97</td><td>69.89</td><td>79.27</td><td>38.99</td><td>81.28</td><td>62.64</td><td>45.89</td><td>39.80</td><td>63.89</td><td>38.29</td><td>62.80</td><td>58.15</td></tr><tr><td>Searched BF1</td><td>1.93</td><td>0.96</td><td>7.39</td><td>74.83</td><td>6.51</td><td>2.66</td><td>6.78</td><td>3.19</td><td>18.37</td><td>77.40</td><td>12.09</td><td>8.19</td></tr><tr><td>Searched mAcc</td><td>69.80</td><td>85.86</td><td>72.85</td><td>35.62</td><td>92.66</td><td>91.28</td><td>74.10</td><td>90.79</td><td>54.62</td><td>53.45</td><td>89.22</td><td>94.75</td></tr><tr><td>Searched gAcc</td><td>79.73</td><td>91.76</td><td>74.09</td><td>64.41</td><td>88.95</td><td>95.47</td><td>79.41</td><td>93.30</td><td>61.65</td><td>62.04</td><td>87.08</td><td>96.51</td></tr><tr><td>Searched mIoU + BIoU</td><td>81.19</td><td>92.19</td><td>76.89</td><td>69.56</td><td>88.36</td><td>95.75</td><td>80.43</td><td>93.34</td><td>63.88</td><td>65.87</td><td>87.03</td><td>96.45</td></tr><tr><td>Searched mIoU + BF1</td><td>78.72</td><td>90.80</td><td>71.81</td><td>73.57</td><td>86.70</td><td>94.88</td><td>78.30</td><td>93.00</td><td>61.62</td><td>71.73</td><td>87.13</td><td>96.23</td></tr></table>
244
+
245
+ # 5.2 GENERALIZATION OF THE LOSS
246
+
247
+ Generalization among datasets. Table 3 evaluates the generalization ability of our searched loss surrogates among different datasets. Due to limited computational resource, we train networks only with the searched mIoU, BF1 and mAcc surrogate losses. The results show that our searched surrogate losses generalize well between these two datasets with quite different scenes and categories.
248
+
249
+ Table 3: Generalization of our searched surrogate losses between PASCAL VOC and Cityscapes.
250
+
251
+ <table><tr><td>Datasets</td><td colspan="7">Cityscapes → VOC</td><td colspan="6">VOC → Cityscapes</td></tr><tr><td>Loss Function</td><td>mIoU</td><td>FWIoU</td><td>BIoU</td><td>BF1</td><td>mAcc</td><td>gAcc</td><td>mIoU</td><td>FWIoU</td><td>BIoU</td><td>BF1</td><td>mAcc</td><td>gAcc</td><td></td></tr><tr><td>Cross Entropy</td><td>78.69</td><td>91.31</td><td>70.61</td><td>65.30</td><td>87.31</td><td>95.17</td><td>79.97</td><td>93.33</td><td>62.07</td><td>62.24</td><td>87.01</td><td>96.44</td><td></td></tr><tr><td>Searched mIoU</td><td>80.05</td><td>91.72</td><td>73.97</td><td>67.61</td><td>88.01</td><td>95.45</td><td>80.67</td><td>93.31</td><td>62.96</td><td>66.48</td><td>87.36</td><td>96.44</td><td></td></tr><tr><td>Searched BF1</td><td>1.84</td><td>0.93</td><td>7.42</td><td>75.85</td><td>6.48</td><td>1.47</td><td>6.67</td><td>3.20</td><td>19.00</td><td>77.99</td><td>12.12</td><td>4.09</td><td></td></tr><tr><td>Searched mAcc</td><td>70.90</td><td>86.29</td><td>73.43</td><td>37.18</td><td>93.19</td><td>91.43</td><td>73.50</td><td>90.68</td><td>54.34</td><td>54.04</td><td>88.66</td><td>94.68</td><td></td></tr></table>
252
+
253
+ Generalization among segmentation networks. The surrogate losses are searched with ResNet-50 + DeepLabv3+ on PASCAL VOC. The searched losses drive the training of ResNet-101 + DeepLabv3+, PSPNet (Zhao et al., 2017) and HRNet (Sun et al., 2019) on PASCAL VOC. Table 4 shows the results. The results demonstrate that our searched loss functions can be applied to various semantic segmentation networks.
254
+
255
+ # 5.3 ABLATION
256
+
257
+ Parameterization and constraints. Table 5 ablates the parameterization and the search space constraints. In it, a surrogate without parameters refers to Eq. (8), with the domain extended from discrete points $\{0,1\}$ to continuous interval [0, 1]. This naive surrogate delivers much lower accuracy, indicating the essence of parameterization. Without the truth-table constraint, the training process diverges at the very beginning, where the loss gradients become "NaN". And the performance drops if the monotonicity constraint is not enforced. The performance drops or even the algorithm fails without the constraints.
258
+
259
+ Table 4: Generalization of our searched surrogate losses among different network architectures on PASCAL VOC. The losses are searched with ResNet-50 + DeepLabv3+ on PASCAL VOC.
260
+
261
+ <table><tr><td>Network</td><td colspan="3">R50-DeepLabv3+</td><td colspan="3">R101-DeepLabv3+</td><td colspan="3">R101-PSPNet</td><td colspan="3">HRNetV2p-W48</td></tr><tr><td>Loss Function</td><td>mIoU</td><td>BF1</td><td>mAcc</td><td>mIoU</td><td>BF1</td><td>mAcc</td><td>mIoU</td><td>BF1</td><td>mAcc</td><td>mIoU</td><td>BF1</td><td>mAcc</td></tr><tr><td>Cross Entropy</td><td>76.22</td><td>61.75</td><td>85.43</td><td>78.69</td><td>65.30</td><td>87.31</td><td>77.91</td><td>64.70</td><td>85.71</td><td>76.35</td><td>61.19</td><td>85.12</td></tr><tr><td>Searched mIoU</td><td>78.35</td><td>66.93</td><td>85.53</td><td>80.97</td><td>68.86</td><td>88.23</td><td>78.93</td><td>65.65</td><td>87.42</td><td>77.26</td><td>63.52</td><td>86.80</td></tr><tr><td>Searched BF1</td><td>1.35</td><td>70.81</td><td>6.05</td><td>1.93</td><td>74.83</td><td>6.51</td><td>1.62</td><td>71.84</td><td>6.33</td><td>1.34</td><td>68.41</td><td>5.99</td></tr><tr><td>Searched mAcc</td><td>69.82</td><td>36.92</td><td>91.61</td><td>69.80</td><td>35.62</td><td>92.66</td><td>71.66</td><td>39.44</td><td>92.06</td><td>68.22</td><td>35.90</td><td>91.46</td></tr></table>
262
+
263
+ Proxy tasks for parameter search. Table 6 ablates this. The bottom row is our default setting with a light-weight backbone, down-sampled image size and shorter learning schedule. The default setting delivers on par accuracy with heavier settings. This is consistent with the generalization ability of our surrogate losses. Thus we can improve the search efficiency via light proxy tasks.
264
+
265
+ Parameter search algorithm. Fig. 3 compares the employed PPO2 (Schulman et al., 2017) algorithm with random search. The much better performance of PPO2 suggests that surrogate loss search is non-trivial and reinforcement learning helps to improve the search efficiency.
266
+
267
+ Table 5: Ablation on search space constraints.
268
+
269
+ <table><tr><td>Parameter</td><td>Truth-table</td><td>Monotonicity</td><td>VOC mIoU</td></tr><tr><td>X</td><td>X</td><td>X</td><td>46.99</td></tr><tr><td>✓</td><td>X</td><td>X</td><td>Fail</td></tr><tr><td>✓</td><td>✓</td><td>X</td><td>77.76</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>80.64</td></tr></table>
270
+
271
+ Table 6: Ablation on search proxy tasks.
272
+
273
+ <table><tr><td>Backbone</td><td>Image Size</td><td>Iterations</td><td>Time(hours)</td><td>VOC mIoU</td></tr><tr><td>R50</td><td>256 × 256</td><td>1000</td><td>33.0</td><td>81.15</td></tr><tr><td>R50</td><td>128 × 128</td><td>2000</td><td>17.1</td><td>80.56</td></tr><tr><td>R101</td><td>128 × 128</td><td>1000</td><td>13.3</td><td>80.75</td></tr><tr><td>R50</td><td>128 × 128</td><td>1000</td><td>8.5</td><td>80.97</td></tr></table>
274
+
275
+ ![](images/4338645f8798a7738d15eb89afa95a1054de459edae7d95eb6a535c284577856.jpg)
276
+ (a) search for mIoU
277
+
278
+ ![](images/55b6a81260edb784928469c912377827d9353c723ef4643360b5e4cbc9176e31.jpg)
279
+ (b) search for BF1
280
+
281
+ ![](images/6d94ad2cf19964fdea0c57e53888ed4cee533f02b89db8e5e118974c90d759af.jpg)
282
+ (c) search for mAcc
283
+ Figure 3: Ablation on loss parameter search. Each curve presents the highest average evaluation score up to the $t$ -th step in one search process. The search process is repeated four times.
284
+
285
+ # 6 CONCLUSION
286
+
287
+ The introduced Auto Seg-Loss is a powerful framework to search for the parameterized surrogate losses for mainstream segmentation evaluation metrics. The non-differentiable operators are substituted by their parameterized continuous counterparts. The parameters are optimized to improve the final evaluation metrics with essential constraints. It would be interesting to extend the framework to more tasks, like object detection, pose estimation and machine translation problems.
288
+
289
+ # ACKNOWLEDGMENTS
290
+
291
+ The work is supported by the National Key R&D Program of China (2020AAA0105200), Beijing Academy of Artificial Intelligence and the Institute for Guo Qiang of Tsinghua University.
292
+
293
+ # REFERENCES
294
+
295
+ Maxim Berman, Amal Rannen Triki, and Matthew B Blaschko. The lovasz-softmax loss: A tractable surrogate for the optimization of the intersection-over-union measure in neural networks. In Pro
296
+
297
+ ceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4413-4421, 2018.
298
+ John Burkardt. The truncated normal distribution. Department of Scientific Computing Website, Florida State University, pp. 1-35, 2014.
299
+ Francesco Caliva, Claudia Iriondo, Alejandro Morales Martinez, Sharmila Majumdar, and Valentina Pedoia. Distance map loss penalty term for semantic segmentation. In International Conference on Medical Imaging with Deep Learning-Extended Abstract Track, 2019.
300
+ Liang-Chieh Chen, George Papandreou, Iasonas Kokkinos, Kevin Murphy, and Alan L Yuille. Deep plab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(4): 834-848, 2017.
301
+ Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 801-818, 2018.
302
+ Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3213-3223, 2016.
303
+ Gabriela Csurka, Diane Larlus, Florent Perronnin, and France Meylan. What is a good evaluation measure for semantic segmentation? In Proceedings of the British Machine Vision Conference (BMVC), volume 27, pp. 2013, 2013.
304
+ Mark Everingham, SM Ali Eslami, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The Pascal visual object classes challenge: A retrospective. International Journal of Computer Vision, 111(1):98-136, 2015.
305
+ Bharath Hariharan, Pablo Arbeláez, Lubomir Bourdev, Subhransu Maji, and Jitendra Malik. Semantic contours from inverse detectors. In Proceedings of IEEE International Conference on Computer Vision (ICCV), pp. 991-998, 2011.
306
+ Tamir Hazan, Joseph Keshet, and David A McAllester. Direct loss minimization for structured prediction. In Advances in Neural Information Processing Systems (NIPS), pp. 1594-1602, 2010.
307
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770-778, 2016.
308
+ Xin He, Kaiyong Zhao, and Xiaowen Chu. Automl: A survey of the state-of-the-art. arXiv preprint arXiv:1908.00709, 2019.
309
+ Thorsten Joachims. A support vector method for multivariate performance measures. In Proceedings of the 22nd International Conference on Machine Learning (ICML), pp. 377-384. PMLR, 2005.
310
+ Pushmeet Kohli, Philip HS Torr, et al. Robust higher order potentials for enforcing label consistency. International Journal of Computer Vision, 82(3):302-324, 2009.
311
+ Chuming Li, Xin Yuan, Chen Lin, Minghao Guo, Wei Wu, Junjie Yan, and Wanli Ouyang. Am-lds: Automl for loss function search. In Proceedings of the IEEE International Conference on Computer Vision (CVPR), pp. 8410-8419, 2019.
312
+ Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. In Proceedings of the 6th International Conference on Learning Representations (ICLR), 2018.
313
+ Jun Ma. Segmentation loss odyssey. arXiv preprint arXiv:2005.13449, 2020.
314
+ Fausto Miletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 Fourth International Conference on 3D Vision (3DV), pp. 565-571. IEEE, 2016.
315
+
316
+ Pritish Mohapatra, Michal Rolinek, CV Jawahar, Vladimir Kolmogorov, and M Pawan Kumar. Efficient optimization for rank-based loss functions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3693-3701, 2018.
317
+ Gattigorla Nagendar, Digvijay Singh, Vineeth N Balasubramanian, and CV Jawahar. Neuro-iou: Learning a surrogate loss for semantic segmentation. In Proceedings of the British Machine Vision Conference (BMVC), pp. 278, 2018.
318
+ Yash Patel, Tomas Hodan, and Jiri Matas. Learning surrogates via deep embedding. In Proceedings of the European Conference on Computer Vision (ECCV), 2020.
319
+ Hieu Pham, Melody Guan, Barret Zoph, Quoc Le, and Jeff Dean. Efficient neural architecture search via parameters sharing. In Proceedings of the 35th International Conference on Machine Learning (ICML), pp. 4095-4104. PMLR, 2018.
320
+ Xuebin Qin, Zichen Zhang, Chenyang Huang, Chao Gao, Masood Dehghan, and Martin Jagersand. Basnet: Boundary-aware salient object detection. In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7479-7489, 2019.
321
+ Md Atiqur Rahman and Yang Wang. Optimizing intersection-over-union in deep neural networks for image segmentation. In International Symposium on Visual Computing, pp. 234–244. Springer, 2016.
322
+ Mani Ranjbar, Tian Lan, Yang Wang, Steven N Robinovitch, Ze-Nian Li, and Greg Mori. Optimizing nondecomposable loss functions in structured prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(4):911-924, 2012.
323
+ Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 234-241. Springer, 2015.
324
+ John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
325
+ Yang Song, Alexander Schwing, Raquel Urtasun, et al. Training deep neural networks via direct loss minimization. In Proceedings of the 33rd International Conference on Machine Learning (ICML), pp. 2169-2177. PMLR, 2016.
326
+ Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5693-5703, 2019.
327
+ Xiaobo Wang, Shuo Wang, Cheng Chi, Shifeng Zhang, and Tao Mei. Loss function search for face recognition. In Proceedings of the 37th International Conference on Machine Learning (ICML). PMLR, 2020.
328
+ Zifeng Wu, Chunhua Shen, and Anton van den Hengel. Bridging category-level and instance-level semantic image segmentation. arXiv preprint arXiv:1605.06885, 2016.
329
+ Yisong Yue, Thomas Finley, Filip Radlinski, and Thorsten Joachims. A support vector method for optimizing average precision. In Proceedings of the 30th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 271-278, 2007.
330
+ Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, and Jiaya Jia. Pyramid scene parsing network. In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2881-2890, 2017.
331
+ Barret Zoph and Quoc V. Le. Neural architecture search with reinforcement learning. In Proceedings of the 5th International Conference on Learning Representations (ICLR), 2017.
autoseglosssearchingmetricsurrogatesforsemanticsegmentation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14bb63f8102209ad6da0b89342f2091471e04e3cf928787963cdb5f2bd90b1e6
3
+ size 624841
autoseglosssearchingmetricsurrogatesforsemanticsegmentation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6637bcb8f9aa87822dc5abe925b34828651d5adc4361f70042560ceb6b96a01
3
+ size 420522
auxiliarylearningbyimplicitdifferentiation/ec102424-6e52-4b06-9cc2-c97f7dd83d6e_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c930221afa118f682d0a6926bb172b9f42682db9e8b9b5b83827931500f0aa97
3
+ size 131018
auxiliarylearningbyimplicitdifferentiation/ec102424-6e52-4b06-9cc2-c97f7dd83d6e_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ac390d16a51a820b71dfd2fe2cb2483f093b252b39ccd59293c3c17598953a7
3
+ size 157736
auxiliarylearningbyimplicitdifferentiation/ec102424-6e52-4b06-9cc2-c97f7dd83d6e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b73fc4ce83f0bc7e29b3da0d6ccf46cd268b169ec5660e0120b13df9aa35611
3
+ size 4529694
auxiliarylearningbyimplicitdifferentiation/full.md ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AUXILIARY LEARNING BY IMPLICIT DIFFERENTIATION
2
+
3
+ Aviv Navon*
4
+
5
+ Bar-Ilan University, Israel
6
+
7
+ aviv.navon@biu.ac.il
8
+
9
+ Idan Achituve*
10
+
11
+ Bar-Ilan University, Israel
12
+
13
+ idan.achituve@biu.ac.il
14
+
15
+ Haggai Maron
16
+
17
+ NVIDIA, Israel
18
+
19
+ hmaron@nvidia.com
20
+
21
+ Gal Chechik†
22
+
23
+ Bar-Ilan University, Israel
24
+
25
+ NVIDIA, Israel
26
+
27
+ gal.checkik@biu.ac.il
28
+
29
+ Ethan Fetaya†
30
+
31
+ Bar-Ilan University, Israel
32
+
33
+ ethan.fetaya@biu.ac.il
34
+
35
+ # ABSTRACT
36
+
37
+ Training neural networks with auxiliary tasks is a common practice for improving the performance on a main task of interest. Two main challenges arise in this multi-task learning setting: (i) designing useful auxiliary tasks; and (ii) combining auxiliary tasks into a single coherent loss. Here, we propose a novel framework, AuxiLearn, that targets both challenges based on implicit differentiation. First, when useful auxiliaries are known, we propose learning a network that combines all losses into a single coherent objective function. This network can learn nonlinear interactions between tasks. Second, when no useful auxiliary task is known, we describe how to learn a network that generates a meaningful, novel auxiliary task. We evaluate AuxiLearn in a series of tasks and domains, including image segmentation and learning with attributes in the low data regime, and find that it consistently outperforms competing methods.
38
+
39
+ # 1 INTRODUCTION
40
+
41
+ The performance of deep neural networks can significantly improve by training the main task of interest with additional auxiliary tasks (Goyal et al., 2019; Jaderberg et al., 2016; Mirowski, 2019). For example, learning to segment an image into objects can be more accurate when the model is simultaneously trained to predict other properties of the image like pixel depth or 3D structure (Standley et al., 2019). In the low data regime, models trained with the main task only are prone to overfit and generalize poorly to unseen data (Vinyals et al., 2016). In this case, the benefits of learning with multiple tasks are amplified (Zhang and Yang, 2017). Training with auxiliary tasks adds an inductive bias that pushes learned models to capture meaningful representations and avoid overfitting to spurious correlations.
42
+
43
+ In some domains, it may be easy to design beneficial auxiliary tasks and collect supervised data. For example, numerous tasks were proposed for self-supervised learning in image classification, including masking (Doersch et al., 2015), rotation (Gidaris et al., 2018) and patch shuffling (Doersch and Zisserman, 2017; Noroozi and Favaro, 2016). In these cases, it is not clear what would be the best way to combine all auxiliary tasks into a single loss (Doersch and Zisserman, 2017). The common practice is to compute a weighted combination of pretext losses by tuning the weights of individual losses using hyperparameter grid search. This approach, however, limits the potential of learning with auxiliary tasks because the run time of grid search grows exponentially with the number of tasks.
44
+
45
+ In other domains, obtaining good auxiliaries in the first place may be challenging or may require expert knowledge. For example, for point cloud classification, few self-supervised tasks have been proposed; however, their benefits so far are limited (Achituve et al., 2020; Hassani and Haley, 2019;
46
+
47
+ Sauder and Sievers, 2019; Tang et al., 2020). For these cases, it would be beneficial to automate the process of generating auxiliary tasks without domain expertise.
48
+
49
+ Our work takes a step forward in automating the use and design of auxiliary learning tasks. We name our approach AuxiLearn. AuxiLearn leverages recent progress made in implicit differentiation for optimizing hyperparameters (Liao et al., 2018; Lorraine et al., 2020). We demonstrate the effectiveness of AuxiLearn in two types of problems. First, in combining auxiliaries, for cases where auxiliary tasks are predefined. We describe how to train a deep neural network (NN) on top of auxiliary losses and combine them non-linearly into a unified loss. For instance, we combine per-pixel losses in image segmentation tasks using a convolutional NN (CNN). Second, designing auxiliaries, for cases where predefined auxiliary tasks are not available. We present an approach for learning such auxiliary tasks without domain knowledge and from input data alone. This is achieved by training an auxiliary network to generate auxiliary labels while training another, primary network to learn both the original task and the auxiliary task. One important distinction from previous works, such as (Kendall et al., 2018; Liu et al., 2019a), is that we do not optimize the auxiliary parameters using the training loss but rather on a separate (small) auxiliary set, allocated from the training data. This is a key difference since the goal of auxiliary learning is to improve generalization rather than help optimization on the training data.
50
+
51
+ To validate our proposed solution, we extensively evaluate AuxiLearn in several tasks in the low-data regime. In this regime, the models suffer from severe overfitting and auxiliary learning can provide the largest benefits. Our results demonstrate that using AuxiLearn leads to improved loss functions and auxiliary tasks, in terms of the performance of the resulting model on the main task. We complement our experimental section with two interesting theoretical insights regarding our model. The first shows that a relatively simple auxiliary hypothesis class may overfit. The second aims to understand which auxiliaries benefit the main task.
52
+
53
+ To summarize, we propose a novel general approach for learning with auxiliaries using implicit differentiation. We make the following novel contributions: (a) We describe a unified approach for combining multiple loss terms and for learning novel auxiliary tasks from the data alone; (b) We provide a theoretical observation on the capacity of auxiliary learning; (c) We show that the key quantity for determining beneficial auxiliaries is the Newton update; (d) We provide new results on a variety of auxiliary learning tasks with a focus on the low data regime. We conclude that implicit differentiation can play a significant role in automating the design of auxiliary learning setups.
54
+
55
+ # 2 RELATED WORK
56
+
57
+ Learning with multiple tasks. Multitask Learning (MTL) aims at simultaneously solving multiple learning problems while sharing information across tasks. In some cases, MTL benefits the optimization process and improves task-specific generalization performance compared to single-task learning (Standley et al., 2019). In contrast to MTL, auxiliary learning aims at solving a single, main task, and the purpose of all other tasks is to facilitate the learning of the primary task. At test time, only the main task is considered. This approach has been successfully applied in multiple domains, including computer vision (Zhang et al., 2014), natural language processing (Fan et al., 2017; Trinh et al., 2018), and reinforcement learning (Jaderberg et al., 2016; Lin et al., 2019).
58
+
59
+ Dynamic task weighting. When learning a set of tasks, the task-specific losses are combined into an overall loss. The way individual losses are combined is crucial because MTL-based models are sensitive to the relative weightings of the tasks (Kendall et al., 2018). A common approach for combining task losses is in a linear fashion. When the number of tasks is small, task weights are commonly tuned with a simple grid search. However, this approach does not extend to a large number of tasks, or a more complex weighting scheme. Several recent studies proposed scaling task weights using gradient magnitude (Chen et al., 2018), task uncertainty (Kendall et al., 2018), or the rate of loss change (Liu et al., 2019b). Sener and Koltun (2018) proposed casting the multitask learning problem as a multi-objective optimization. These methods assume that all tasks are equally important, and are less suited for auxiliary learning. Du et al. (2018) and Lin et al. (2019) proposed to weight auxiliary losses using gradient similarity. However, these methods do not scale well with the number of auxiliaries and do not take into account interactions between auxiliaries. In contrast, we propose to learn from data how to combine auxiliaries, possibly in a non-linear manner.
60
+
61
+ ![](images/2b903459aa83948262563de8df24c14d6359a52fe887be37981778e98cc22957.jpg)
62
+ (a) Combining losses
63
+
64
+ ![](images/14ecb9fe7e65d31ef39cc2cf9ca1e0910232eddb44c4d8e1ed341ff64263646a.jpg)
65
+ (b) Learning a new auxiliary task
66
+ Figure 1: The AuxiLearn framework. (a) Learning to combine losses into a single coherent loss term. Here, the auxiliary network operates over a vector of losses. (b) Generating a novel auxiliary task. Here the auxiliary network operates over the input space. In both cases, $g(\cdot ;\phi)$ is optimized using IFT based on $\mathcal{L}_A$ .
67
+
68
+ Devising auxiliaries. Designing an auxiliary task for a given main task is challenging because it may require domain expertise and additional labeling effort. For self-supervised learning (SSL), many approaches have been proposed (see Jing and Tian (2020) for a recent survey), but the joint representation learned through SSL may suffer from negative transfer and hurt the main task (Standley et al., 2019). Liu et al. (2019a) proposed learning a helpful auxiliary in a meta-learning fashion, removing the need for handcrafted auxiliaries. However, their system is optimized for the training data, which may lead to degenerate auxiliaries. To address this issue, an entropy term is introduced to force the auxiliary network to spread the probability mass across classes.
69
+
70
+ Implicit differentiation based optimization. Our formulation gives rise to a bi-level optimization problem. Such problems naturally arise in the context of meta-learning (Finn et al., 2017; Rajeswaran et al., 2019) and hyperparameter optimization (Bengio, 2000; Foo et al., 2008; Larsen et al., 1996; Liao et al., 2018; Lorraine et al., 2020; Pedregosa, 2016). The Implicit Function Theorem (IFT) is often used for computing gradients of the upper-level function, this operation requires calculating a vector-inverse Hessian product. However, for modern neural networks, it is infeasible to calculate it explicitly, and an approximation must be devised. Luketina et al. (2016) proposed approximating the Hessian with the identity matrix, whereas Foo et al. (2008); Pedregosa (2016); Rajeswaran et al. (2019) used conjugate gradient (CG) to approximate the product. Following Liao et al. (2018); Lorraine et al. (2020), we use a truncated Neumann series and efficient vector-Jacobian products, as it was empirically shown to be more stable than CG.
71
+
72
+ # 3 OUR METHOD
73
+
74
+ We now describe the general AuxiLearn framework for learning with auxiliary tasks. For that purpose, we use two networks, a primary network that is optimized on all tasks and an auxiliary network that is optimized on the main task only. First, we introduce our notations and formulate the general objective. Then, we describe two instances of this framework: combining auxiliaries and learning new auxiliaries. Finally, we present our optimization approach for both instances.
75
+
76
+ # 3.1 PROBLEM DEFINITION
77
+
78
+ Let $\{(\mathbf{x}_i^t,\pmb {y}_i^t)\}_{i}$ be the training set and $\{(\mathbf{x}_i^a,\pmb {y}_i^a)\}_{i}$ be a distinct independent set which we term auxiliary set. Let $f(\cdot ;W)$ denote the primary network, and let $g(\cdot ;\phi)$ denote the auxiliary network. Here, $W$ are the parameters of the model optimized on the training set, and $\phi$ are the auxiliary parameters trained on the auxiliary set. The training loss is defined as:
79
+
80
+ $$
81
+ \mathcal {L} _ {T} = \mathcal {L} _ {T} (W, \phi) = \sum_ {i} \ell_ {\text {m a i n}} \left(\mathbf {x} _ {i} ^ {t}, \boldsymbol {y} _ {i} ^ {t}; W\right) + h \left(\mathbf {x} _ {i} ^ {t}, \boldsymbol {y} _ {i} ^ {t}, W; \phi\right), \tag {1}
82
+ $$
83
+
84
+ where $\ell_{main}$ denotes the loss of the main task and $h$ is the overall auxiliary loss, controlled by $\phi$ . In Sections 3.2 & 3.3 we will describe two instances of $h$ . We note that $h$ has access to both $W$ and $\phi$ . The loss on the auxiliary set is defined as $\mathcal{L}_A = \sum_i \ell_{main}(\mathbf{x}_i^a, \mathbf{y}_i^a; W)$ , since we are interested in the generalization performance of the main task.
85
+
86
+ We wish to find auxiliary parameters $(\phi)$ such that the primary parameters $(W)$ , trained with the combined objective, generalize well. More formally, we seek
87
+
88
+ $$
89
+ \phi^ {*} = \arg \min _ {\phi} \mathcal {L} _ {A} \left(W ^ {*} (\phi)\right), \quad \text {s . t .} \quad W ^ {*} (\phi) = \arg \min _ {W} \mathcal {L} _ {T} (W, \phi). \tag {2}
90
+ $$
91
+
92
+ # 3.2 LEARNING TO COMBINE AUXILIARY TASKS
93
+
94
+ Suppose we are given $K$ auxiliary tasks, usually designed using expert domain knowledge. We wish to learn how to optimally leverage these auxiliaries by learning to combine their corresponding losses. Let $\ell (\mathbf{x},\mathbf{y};W) = (\ell_{main}(\mathbf{x},y^{main};W),\ell_1(\mathbf{x},y^1;W),\dots,\ell_K(\mathbf{x},y^K;W))$ denote a loss vector. We wish to learn an auxiliary network $g:\mathbb{R}^{K + 1}\to \mathbb{R}$ over the losses that will be added to $\ell_{main}$ in order to output the training loss $\mathcal{L}_T = \ell_{main} + g(\ell ;\phi)$ . Here, $h$ from Eq. (1) is given by $h(\cdot ;\phi) = g(\ell ;\phi)$ .
95
+
96
+ Typically, $g(\ell; \phi)$ is chosen to be a linear combination of the losses: $g(\ell; \phi) = \sum_{j} \phi_{j} \ell_{j}$ , with positive weights $\phi_{j} \geq 0$ that are tuned using a grid search. However, this method can only scale to a few auxiliaries, as the run time of grid search is exponential in the number of tasks. Our method can handle a large number of auxiliaries and easily extends to a more flexible formulation in which $g$ parametrized by a deep NN. This general form allows us to capture complex interactions between tasks, and learn non-linear combinations of losses. See Figure 1a for illustration.
97
+
98
+ One way to view a non-linear combination of losses is as an adaptive linear weighting, where losses have a different set of weights for each datum. If the loss at point $\mathbf{x}$ is $\ell_{\text{main}}(\mathbf{x}, y^{\text{main}}) + g(\ell(\mathbf{x}, \mathbf{y}))$ , then the gradients are $\nabla_W \ell_{\text{main}}(\mathbf{x}, y^{\text{main}}) + \sum_j \frac{\partial g}{\partial \ell_j} \nabla_W \ell_j(\mathbf{x}, y^j)$ . This is equivalent to an adaptive loss where the loss of datum $\mathbf{x}$ is $\ell_{\text{main}} + \sum_j \alpha_{j,\mathbf{x}} \ell_j$ and, $\alpha_{j,\mathbf{x}} = \frac{\partial g}{\partial \ell_j}$ . This observation connects our approach to other studies that assign adaptive loss weights (e.g., Du et al. (2018); Liu et al. (2019b)).
99
+
100
+ Convolutional loss network. In certain problems there exists a spatial relation among losses. For example, semantic segmentation and depth estimation for images. A common approach is to average the losses over all locations. In contrast, AuxiLearn can leverage this spatial relation for creating a loss-image in which each task forms a channel of pixel-losses induced by the task. We then parametrize $g$ as a CNN that acts on this loss-image. This yields a spatial-aware loss function that captures interactions between task losses. See an example of a loss image in Figure 3
101
+
102
+ Monotonicity. It is common to parametrize the function $g(\ell ;\phi)$ as a linear combination with non-negative weights. Under this parameterization, $g$ is a monotonic non-decreasing function of the losses. A natural question that arises is whether we should generalize this behavior and constrain $g(\ell ;\phi)$ to be non-decreasing w.r.t. the input losses as well? Empirically, we found that training with monotonic non-decreasing networks tends to be more stable and has a better or equivalent performance. We impose monotonicity during training with negative weights clipping. See Appendix C.2 for a detailed discussion and empirical comparison to non-monotonic networks.
103
+
104
+ # 3.3 LEARNING NEW AUXILIARY TASKS
105
+
106
+ The previous subsection focused on situations where auxiliary tasks are given. In many cases, however, no useful auxiliary tasks are known in advance, and we are only presented with the main task. We now describe how to use AuxiLearn in such cases. The intuition is simple: We wish to learn an auxiliary task that pushes the representation of the primary network to generalize better on the main task, as measured using the auxiliary set. We do so in a student-teacher manner: an auxiliary "teacher" network produces labels for the primary network (the "student") which tries to predict these labels as an auxiliary task. Both networks are trained jointly.
107
+
108
+ More specifically, for auxiliary classification, we learn a soft labeling function $g(\mathbf{x};\phi)$ which produces pseudo labels $y_{aux}$ for input samples $\mathbf{x}$ . These labels are then provided to the main network $f(\mathbf{x};W)$ for training (see Figure 1b). During training, the primary network $f(\mathbf{x};W)$ outputs two predictions, $\hat{y}_{main}$ for the main task and $\hat{y}_{aux}$ for the auxiliary task. We then compute the full training loss $\mathcal{L}_T = \ell_{main}(\hat{y}_{main},y_{main}) + \ell_{aux}(\hat{y}_{aux},y_{aux})$ to update $W$ . Here, the $h$ component of $\mathcal{L}_T$ in Eq. (1) is given by $h(\cdot ;\phi) = \ell_{aux}(f(\mathbf{x}_i^t;W),g(\mathbf{x}_i^t;\phi))$ . As before, we update $\phi$ using the auxiliary set with the loss $\mathcal{L}_A = \ell_{main}$ . Intuitively, the teacher auxiliary network $g$ is rewarded when it provides labels to the student that help it succeed in the main task, as measured using $\mathcal{L}_A$ .
109
+
110
+ # 3.4 OPTIMIZING AUXILIARY PARAMETERS
111
+
112
+ We now return to the bi-level optimization problem in Eq. (2) and present the optimizing method for $\phi$ . Solving Eq. (2) for $\phi$ poses a problem due to the indirect dependence of $\mathcal{L}_A$ on the auxiliary parameters. To compute the gradients of $\phi$ , we need to differentiate through the optimization process over $W$ , since $\nabla_{\phi}\mathcal{L}_A = \nabla_W\mathcal{L}_A\cdot \nabla_{\phi}W^*$ . As in Liao et al. (2018); Lorraine et al. (2020), we use the implicit function theorem (IFT) to evaluate $\nabla_{\phi}W^{*}$ :
113
+
114
+ $$
115
+ \nabla_ {\phi} W ^ {*} = - \underbrace {(\nabla_ {W} ^ {2} \mathcal {L} _ {T}) ^ {- 1}} _ {| W | \times | W |} \cdot \underbrace {\nabla_ {\phi} \nabla_ {W} \mathcal {L} _ {T}} _ {| W | \times | \phi |}. \tag {3}
116
+ $$
117
+
118
+ We can leverage the IFT to approximate the gradients of the auxiliary parameters $\phi$ :
119
+
120
+ $$
121
+ \nabla_ {\phi} \mathcal {L} _ {A} \left(W ^ {*} (\phi)\right) = - \underbrace {\nabla_ {W} \mathcal {L} _ {A}} _ {1 \times | W |} \cdot \underbrace {\left(\nabla_ {W} ^ {2} \mathcal {L} _ {T}\right) ^ {- 1}} _ {| W | \times | W |} \cdot \underbrace {\nabla_ {\phi} \nabla_ {W} \mathcal {L} _ {T}} _ {| W | \times | \phi |}. \tag {4}
122
+ $$
123
+
124
+ See Appendix A for a detailed derivation. To compute the vector and Hessian inverse product, we use the algorithm proposed by Lorraine et al. (2020), which uses Neumann approximation and efficient vector-Jacobian product. We note that accurately computing $\nabla_{\phi}\mathcal{L}_A$ by IFT requires finding a point such that $\nabla_W\mathcal{L}_T = 0$ . In practice, we only approximate $W^{*}$ , and simultaneously train both $W$ and $\phi$ by altering between optimizing $W$ on $\mathcal{L}_T$ , and optimizing $\phi$ using $\mathcal{L}_A$ . We summarize our method in Alg. 1 and 2. Theoretical considerations regarding our method are given in Appendix D.
125
+
126
+ Algorithm 1: AuxiLearn
127
+ Initialize auxiliary parameters $\phi$ and weights $W$ ; while not converged do
128
+ for $k = 1, \dots, N$ do
129
+ $\mathcal{L}_T = \ell_{main}(\mathbf{x}, y; W) + h(\mathbf{x}, y, W; \phi)$ $W \gets W - \alpha \nabla_W \mathcal{L}_T|_{\phi, W}$
130
+ end
131
+ $\phi \gets \phi - \text{Hypergradient}(\mathcal{L}_A, \mathcal{L}_T, \phi, W)$
132
+ end
133
+ return $W$
134
+
135
+ Algorithm 2: Hypergradient
136
+ Input: training loss $\mathcal{L}_T$ , auxiliary loss $\mathcal{L}_A$ , a fixed point $(\phi^{\prime},W^{*})$ , number of iterations $J$ , learning rate $\alpha$ $v = p = \nabla_W\mathcal{L}_A|_{(\phi ',W*)}$
137
+ for $j = 1,\dots ,J$ do
138
+ $|v - = \alpha v\cdot \nabla_W\nabla_W\mathcal{L}_T$ $p + = v$
139
+ end
140
+ return $-p\nabla_{\phi}\nabla_{W}\mathcal{L}_{T}|_{(\phi^{\prime},W^{*})}$
141
+
142
+ # 4 ANALYSIS
143
+
144
+ # 4.1 COMPLEXITY OF AUXILIARY HYPOTHESIS SPACE
145
+
146
+ In our learning setup, an additional auxiliary set is used for tuning a large set of auxiliary parameters. A natural question arises: could the auxiliary parameters overfit this auxiliary set? and what is the complexity of the auxiliary hypothesis space $\mathcal{H}_{\phi}$ ? Analyzing the complexity of this space is difficult because it is coupled with the hypothesis space $\mathcal{H}_W$ of the main model. One can think of this hypothesis space as a subset of the original model hypothesis space $\mathcal{H}_{\phi} = \{h_W : \exists \phi \text{ s.t. } W = \arg \min_W \mathcal{L}_T(W, \phi)\} \subset \mathcal{H}_W$ . Due to the coupling with $\mathcal{H}_W$ the behavior can be unintuitive. We show that even simple auxiliaries can have infinite VC dimensions.
147
+
148
+ Example: Consider the following 1D hypothesis space for binary classification $\mathcal{H}_W = \{\lceil \cos (Wx)\rceil ,W\in \mathbb{R}\}$ , which has infinite VC-dimension. Let the main loss be the zero-one loss and the auxiliary loss be $h(\phi ,W) = (\phi -W)^2$ , namely, an $L_{2}$ regularization with a learned center. Since the model hypothesis space $\mathcal{H}_W$ has an infinite VC-dimension, there exist training and auxiliary sets of any size that are shattered by $\mathcal{H}_W$ . Therefore, for any labeling of the auxiliary and training sets, we can let $\phi = \hat{\phi}$ , the parameter that perfectly classifies both sets. We then have that $\hat{\phi}$ is the optimum of the training with this auxiliary loss and we get that $\mathcal{H}_{\phi}$ also has an infinite VC-dimension.
149
+
150
+ This important example shows that even seemingly simple-looking auxiliary losses can overfit due to the interaction with the model hypothesis space. Thus, it motivates our use of a separate auxiliary set.
151
+
152
+ # 4.2 ANALYZING AN AUXILIARY TASK EFFECT
153
+
154
+ When designing or learning auxiliary tasks, one important question is, what makes an auxiliary task useful? Consider the following loss with a single auxiliary task $\mathcal{L}_T(W,\phi) = \sum_i\ell_{main}(\mathbf{x}_i^t,\pmb {y}_i^t,W) +$
155
+
156
+ $\phi \cdot \ell_{aux}(\mathbf{x}_i^t,\mathbf{y}_i^t,W)$ . Here $h = \phi \cdot \ell_{aux}$ . Assume $\phi = 0$ so we optimize $W$ only on the standard main task loss. We can now check if $\frac{d\mathcal{L}_A}{d\phi} |_{\phi = 0} > 0$ , namely would it help to add this auxiliary task?
157
+
158
+ Proposition 1. Let $\mathcal{L}_T(W,\phi) = \sum_i\ell_{main}(\mathbf{x}_i^t,\pmb {y}_i^t,W) + \phi \cdot \ell_{aux}(\mathbf{x}_i^t,\pmb {y}_i^t,W)$ . Suppose that $\phi = 0$ and that the main task was trained until convergence. We have
159
+
160
+ $$
161
+ \left. \frac {d \mathcal {L} _ {A} \left(W ^ {*} (\phi)\right)}{d \phi} \right| _ {\phi = 0} = - \left\langle \nabla_ {W} \mathcal {L} _ {A} ^ {T}, \nabla_ {W} ^ {2} \mathcal {L} _ {T} ^ {- 1} \nabla_ {W} \mathcal {L} _ {T} \right\rangle , \tag {5}
162
+ $$
163
+
164
+ i.e. the gradient with respect to the auxiliary weight is the inner product between the Newton methods update and the gradient of the loss on the auxiliary set.
165
+
166
+ Proof. In the general case, the following holds $\frac{d\mathcal{L}_A}{d\phi} = -\nabla_W\mathcal{L}_A(\nabla_W^2\mathcal{L}_T)^{-1}\nabla_\phi \nabla_W\mathcal{L}_T$ . For a linear combination, we have $\nabla_{\phi}\nabla_{W}\mathcal{L}_{T} = \sum_{i}\nabla_{W}\ell_{aux}(\mathbf{x}_{i}^{t},\pmb{y}_{i}^{t})$ . Since $W$ is optimized till convergence of the main task we obtain $\nabla_{\phi}\nabla_{W}\mathcal{L}_{T} = \nabla_{W}\mathcal{L}_{T}$ .
167
+
168
+ This simple result shows that the key quantity to observe is the Newton update, rather than the gradient which is often used (Lin et al., 2019; Du et al., 2018). Intuitively, the Newton update is the important quantity because if $\Delta \phi$ is small then we are almost at the optimum. Then, due to quadratic convergence, a single Newton step is sufficient for approximately converging to the new optimum.
169
+
170
+ # 5 EXPERIMENTS
171
+
172
+ We evaluate the AuxiLearn framework in a series of tasks of two types: combining given auxiliary tasks into a unified loss (Sections 5.1 - 5.3), and generating a new auxiliary task (Section 5.4). Further experiments and analysis of both modules are given in Appendix C. Throughout all experiments, we use an extra data split for the auxiliary set. Hence, we use four data sets: training set, validation set, test set, and auxiliary set. The samples for the auxiliary set are pre-allocated from the training set. For a fair comparison, these samples are used as part of the training set by all competing methods. Effectively, this means we have a slightly smaller training set for optimizing the parameters $W$ of the primary network. In all experiments, we report the mean performance (e.g., accuracy) along with the Standard Error of the Mean (SEM). Full implementation details of all experiments are given in Appendix B. Our code is available at https://github.com/AvivNavon/AuxiLearn.
173
+
174
+ Model variants. For learning to combine losses, we evaluated the following variants of auxiliary networks: (1) Linear: A convex linear combination between the loss terms; (2) Linear neural network (Deep linear): A deep fully-connected NN with linear activations; (3) Nonlinear: A standard feed-forward NN over the loss terms. For Section 5.3 only (4) ConvNet: A CNN over the loss-images. The expressive power of the deep linear network is equivalent to that of a 1-layer linear network; However, from an optimization perspective, it was shown that the over-parameterization introduced by the network's depth could stabilize and accelerate convergence (Arora et al., 2018; Saxe et al., 2014). All variants are constrained to represent only monotone non-decreasing functions.
175
+
176
+ # 5.1 AN ILLUSTRATIVE EXAMPLE
177
+
178
+ We first present an illustrative example of how AuxiLearn changes the loss landscape and helps generalization in the presence of label noise and harmful tasks. Consider a regression problem with $y_{main} = \mathbf{w}^{\star T}\mathbf{x} + \epsilon_0$ and two auxiliary tasks. The first auxiliary is helpful, $y_{1} = \mathbf{w}^{\star T}\mathbf{x} + \epsilon_{1}$ , whereas the second auxiliary is harmful $y_{2} = \tilde{\mathbf{w}}^{T}\mathbf{x} + \epsilon_{2},\tilde{\mathbf{w}}\neq \mathbf{w}^{\star}$ . We let
179
+
180
+ ![](images/7b8f1945ccc3b4263ebb0521c8cbcc1c0ab00c4204cc6a3c1c629d2fc738f96e.jpg)
181
+ Figure 2: Loss landscape generated by the auxiliary network. Darker is higher. See text for details.
182
+
183
+ ![](images/33bf88e8894498229508d6f78806cef414467e83432bc6bb914b7ddaad9bb0f0.jpg)
184
+
185
+ ![](images/1b2e0eb7118d23d759c009b6316144a7e7802bbe5832bb21451bc920b5ac06be.jpg)
186
+
187
+ $\epsilon_0 \sim \mathcal{N}(0, \sigma_{main}^2)$ and $\epsilon_1, \epsilon_2 \sim \mathcal{N}(0, \sigma_{aux}^2)$ , with $\sigma_{main}^2 > \sigma_{aux}^2$ . We optimize a linear model with weights $\mathbf{w} \in \mathbb{R}^2$ that are shared across tasks, i.e., no task-specific parameters. We set $\mathbf{w}^\star = (1, 1)^T$ and $\tilde{\mathbf{w}} = (2, -4)^T$ . We train an auxiliary network to output linear task weights and observe the changes to the loss landscape in Figure 2. The left plot shows the loss landscape for the main task,
188
+
189
+ ![](images/84835989d2b8273bfde4ab52251db815f7ffb632ac5f5333d90c3605fbc24e06.jpg)
190
+ (a) image
191
+ (b) GT labels
192
+ (c) aux. loss
193
+ (d) main loss
194
+ (e) pix. weight
195
+ Figure 3: Loss images on test examples from NYUv2: (a) original image; (b) semantic segmentation ground truth; (c) auxiliaries loss; (d) segmentation (main task) loss; (e) adaptive pixel-wise weight $\sum_{j}\partial \mathcal{L}_{T} / \partial \ell_{j}$ .
196
+
197
+ with a training set optimal solution $\mathbf{w}_{train}$ . Note that $\mathbf{w}_{train} \neq \mathbf{w}^*$ due to the noise in the training data. The loss landscape of the weighted train loss at the beginning ( $t = 0$ ) and the end ( $t = T$ ) of training is shown in the middle and right plots, respectively. Note how AuxiLearn learns to ignore the harmful auxiliary and use the helpful one to find a better solution by changing the loss landscape. In Appendix C.3 we show that the auxiliary task weight is inversely proportional to the label noise.
198
+
199
+ # 5.2 FINE-GRAINED CLASSIFICATION WITH MANY AUXILIARY TASKS
200
+
201
+ In fine-grained visual classification tasks, annotators should have domain expertise, making data labeling challenging and potentially expensive (e.g., in the medical domain). In some cases, however, non-experts can annotate visual attributes that are informative about the main task. As an example, consider the case of recognizing bird species, which would require an ornithologist, yet a layman can describe the head color or bill shape of a bird. These features naturally form auxiliary tasks, which can be leveraged for training jointly with the main task of bird classification.
202
+
203
+ We evaluate AuxiLearn in this setup of fine-grained classification using the CaltechUCSD Birds 200-2011 dataset (CUB) (Wah et al., 2011). CUB contains 200 bird species in 11,788 images, each associated with a set of 312 binary visual attributes, which we use as auxiliaries. Since we are interested in setups where optimizing the main task alone does not generalize well, we demonstrate our method in a semi-supervised setting: we assume that auxiliary labels are available for all images but only $K$ labels per class are available for the main task (noted as $K$ -shot).
204
+
205
+ Table 1: Test classification accuracy on CUB 200-2011 dataset, averaged over three runs (± SEM).
206
+
207
+ <table><tr><td></td><td colspan="2">5-shot</td><td colspan="2">10-shot</td></tr><tr><td></td><td>Top 1</td><td>Top 3</td><td>Top 1</td><td>Top 3</td></tr><tr><td>STL</td><td>35.50 ± 0.7</td><td>54.79 ± 0.7</td><td>54.79 ± 0.3</td><td>74.00 ± 0.1</td></tr><tr><td>Equal</td><td>41.47 ± 0.4</td><td>62.62 ± 0.4</td><td>55.36 ± 0.3</td><td>75.51 ± 0.4</td></tr><tr><td>Uncertainty</td><td>35.22 ± 0.3</td><td>54.99 ± 0.7</td><td>53.75 ± 0.6</td><td>73.25 ± 0.3</td></tr><tr><td>DWA</td><td>41.82 ± 0.1</td><td>62.91 ± 0.4</td><td>54.90 ± 0.3</td><td>75.74 ± 0.3</td></tr><tr><td>GradNorm</td><td>41.49 ± 0.4</td><td>63.12 ± 0.4</td><td>55.23 ± 0.1</td><td>75.62 ± 0.3</td></tr><tr><td>GCS</td><td>42.57 ± 0.7</td><td>62.60 ± 0.1</td><td>55.65 ± 0.2</td><td>75.71 ± 0.1</td></tr><tr><td colspan="5">AuxiLearn</td></tr><tr><td>Linear</td><td>41.71 ± 0.4</td><td>63.73 ± 0.6</td><td>54.77 ± 0.2</td><td>75.51 ± 0.7</td></tr><tr><td>Deep Linear</td><td>45.84 ± 0.3</td><td>66.21 ± 0.5</td><td>57.08 ± 0.2</td><td>75.3 ± 0.6</td></tr><tr><td>Nonlinear</td><td>47.07 ± 0.1</td><td>68.25 ± 0.3</td><td>59.04 ± 0.2</td><td>78.08 ± 0.2</td></tr></table>
208
+
209
+ We compare AuxiLearn with the following MTL and auxiliary learning baselines: (1) Single-task learning (STL): Training only on the main task. (2) Equal: Standard multitask learning with equal weights for all auxiliary tasks. (3) GradNorm (Chen et al., 2018): An MTL method that scales losses based on gradient magnitude. (4) Uncertainty (Kendall et al., 2018): An MTL approach that uses task uncertainty to adjust task weights. (5) Gradient Cosine Similarity (GCS) (Du et al., 2018): An auxiliary-learning approach that uses gradient similarity between the main and auxiliary tasks. (6) Dynamic weight averaging (DWA) (Liu et al., 2019b): An MTL approach that sets task weights based on the rate of loss change over time. The primary network in all experiments is ResNet-18 (He et al., 2016) pre-trained on ImageNet. We use a 5-layer fully connected NN for the auxiliary network. Sensitivity analysis of the network size and auxiliary set size is presented in Appendix C.4.
210
+
211
+ Table 1 shows the test set classification accuracy. Most methods significantly improve over the STL baseline, highlighting the benefits of using additional (weak) labels. Our Nonlinear and Deep linear auxiliary network variants outperform all previous approaches by a large margin. As expected, a non-linear auxiliary network is better than its linear counterparts. This suggests that there are some non-linear interactions between the loss terms that the non-linear network is able to capture. Also, notice the effect of using deep-linear compared to a (shallow) linear model. This result indicates that at least part of the improvement achieved by our method is attributed to the over-parameterization of the auxiliary network. In the Appendix we further analyze properties of auxiliary networks. Appendix C.5 visualizes the full optimization path of a linear auxiliary network over a polynomial kernel on the losses, and Appendix C.6 shows that the last state of the auxiliary network is not informative enough.
212
+
213
+ # 5.3 Pixel-WISE LOSSES
214
+
215
+ We consider the indoor-scene segmentation task from Couprie et al. (2013), that uses the NYUv2 dataset (Silberman et al., 2012). We consider the 13-class semantic segmentation as the main task, with depth and surface-normal prediction (Eigen and Fergus, 2015) as auxiliaries. We use SegNet (Badrinarayanan et al., 2017) based model for the primary network, and a 4-layer CNN for the auxiliary network.
216
+
217
+ Since losses in this task are given per-pixel, we can apply the ConvNet variant of the auxiliary network to the loss image. Namely, each task forms a channel with the per-pixel losses as values. Table 2 reports the mean Intersection over Union (mIoU) and pixel accuracy for the main segmentation task. Here, we
218
+
219
+ Table 2: Test results for semantic segmentation on NYUv2, averaged over four runs (± SEM).
220
+
221
+ <table><tr><td></td><td>mIoU</td><td>Pixel acc.</td></tr><tr><td>STL</td><td>18.90 ± 0.21</td><td>54.74 ± 0.94</td></tr><tr><td>Equal</td><td>19.20 ± 0.19</td><td>55.37 ± 1.00</td></tr><tr><td>Uncertainty</td><td>19.34 ± 0.18</td><td>55.70 ± 0.79</td></tr><tr><td>DWA</td><td>19.38 ± 0.14</td><td>55.37 ± 0.35</td></tr><tr><td>GradNorm</td><td>19.52 ± 0.21</td><td>56.70 ± 0.33</td></tr><tr><td>MGDA</td><td>19.53 ± 0.35</td><td>56.28 ± 0.46</td></tr><tr><td>GCS</td><td>19.94 ± 0.13</td><td>56.58 ± 0.81</td></tr><tr><td colspan="3">AuxiLearn (ours)</td></tr><tr><td>Linear</td><td>20.04 ± 0.38</td><td>56.80 ± 0.14</td></tr><tr><td>Deep Linear</td><td>19.94 ± 0.12</td><td>56.45 ± 0.79</td></tr><tr><td>Nonlinear</td><td>20.09 ± 0.34</td><td>56.80 ± 0.53</td></tr><tr><td>ConvNet</td><td>20.54 ± 0.30</td><td>56.69 ± 0.44</td></tr></table>
222
+
223
+ also compare with MGDA (Sener and Koltun, 2018) which had extremely long training time in CUB experiments due to the large number of auxiliary tasks, and therefore was not evaluated in Section 5.2. All weighting methods achieve a performance gain over the STL model. The ConvNet variant of AuxiLearn outperforms all competitors in terms of test mIoU.
224
+
225
+ Figure 3 shows examples of the loss-images for the auxiliary (c) and main (d) tasks, together with the pixel-wise weights (e). First, note how the loss-images resemble the actual input images. This suggests that a spatial relationship can be leveraged using a CNN auxiliary network. Second, pixel weights are a non-trivial combination of the main and auxiliary task losses. In the top (bottom) row, the plant (couch) has a low segmentation loss and intermediate auxiliary loss. As a result, a higher weight is allocated to these pixels which increases the error signal.
226
+
227
+ # 5.4 LEARNING AUXILIARY LABELS
228
+
229
+ Table 3: Learning auxiliary task. Test accuracy averaged over three runs (±SEM) without pre-training.
230
+
231
+ <table><tr><td></td><td>CIFAR10 (5%)</td><td>CIFAR100 (5%)</td><td>SVHN (5%)</td><td>CUB (30-shot)</td><td>Pet (30-shot)</td><td>Cars (30-shot)</td></tr><tr><td>STL</td><td>50.8 ± 0.8</td><td>19.8 ± 0.7</td><td>72.9 ± 0.3</td><td>37.2 ± 0.8</td><td>26.1 ± 0.5</td><td>59.2 ± 0.4</td></tr><tr><td>MAXL-F</td><td>56.1 ± 0.1</td><td>20.4 ± 0.6</td><td>75.4 ± 0.3</td><td>39.6 ± 1.3</td><td>26.2 ± 0.3</td><td>59.6 ± 1.1</td></tr><tr><td>MAXL</td><td>58.2 ± 0.3</td><td>21.0 ± 0.4</td><td>75.5 ± 0.4</td><td>40.7 ± 0.6</td><td>26.3 ± 0.6</td><td>60.4 ± 0.8</td></tr><tr><td>AuxiLearn</td><td>60.7 ± 1.3</td><td>21.5 ± 0.3</td><td>76.4 ± 0.2</td><td>44.5 ± 0.3</td><td>37.0 ± 0.6</td><td>64.4 ± 0.3</td></tr></table>
232
+
233
+ In many cases, designing helpful auxiliaries is challenging. We now evaluate AuxiLearn in learning multi-class classification auxiliary tasks. We use three multi-class classification datasets: CIFAR10, CIFAR100 (Krizhevsky et al., 2009), SVHN (Netzer et al., 2011), and three fine-grained classification datasets: CUB-200-2011, Oxford-IIIT Pet (Parkhi et al., 2012), and Cars (Krause et al., 2013). Pet contains 7349 images of 37 species of dogs and cats, and Cars contains 16,185 images of 196 cars.
234
+
235
+ Following Liu et al. (2019a), we learn a different auxiliary task for each class of the main task. In all experiments and all learned tasks, we set the number of classes to 5. To examine the effect of the learned auxiliary losses in the low-data regime, we evaluate the performance while training with only $5\%$ of the training set in CIFAR10, CIFAR100, and SVHN datasets, and $\sim 30$ samples per
236
+
237
+ ![](images/a47abd6f6382da5690e085bb6b4e31161a7e2d81e1c77470907d148f057647ce.jpg)
238
+ Figure 4: t-SNE applied to auxiliary labels learned for Frog and Deer classes, in CIFAR10. Best viewed in color.
239
+
240
+ ![](images/948cfa89490828ebfd59f944ee17f0df154d6d75e3920b0e3edba391003f1a8b.jpg)
241
+
242
+ class in CUB, Oxford-IIIT Pet, and Cars. We use VGG-16 (Simonyan and Zisserman, 2014) as the backbone for both CIFAR datasets, a 4-layers ConvNet for the SVHN experiment, and ResNet18 for the fine-grained datasets. In all experiments, the architectures of the auxiliary and primary networks were set the same and were trained from scratch without pre-training.
243
+
244
+ We compared our approach with the following baselines: (1) Single-task learning (STL): Training the main task only. (2) MAXL: Meta AuXiliary Learning (MAXL) proposed by Liu et al. (2019a) for learning auxiliary tasks. MAXL optimizes the label generator in a meta-learning fashion. (3) MAXL-F: A frozen MAXL label generator, that is initialized randomly. It decouples the effect of having a teacher network from the additional effect brought by the training process.
245
+
246
+ Table 3 shows that AuxiLearn outperforms all baselines in all setups, even though it sacrifices some of the training set for the auxiliary set. It is also worth noting that our optimization approach is significantly faster than MAXL, yielding $\times 3$ improvement in run-time. In Appendix C.9 and C.10 we show additional experiments for this setup, including an extension of the method to point-cloud part segmentation and experiments with varying training data sizes.
247
+
248
+ Figure 4 presents a 2D t-SNE projection of the 5D vector of auxiliary (soft) labels that are learned using AuxiLearn. We use samples of the main classes Frog (left) and Deer (right) from the CIFAR10 dataset. t-SNE was applied to each auxiliary task separately. When considering how images are projected to this space of auxiliary soft labels, several structures emerge. The auxiliary network learns a fine partition of the Frog class that separates real images from illustrations. More interesting, the soft labels learned for the class Deer have a middle region that only contains deers with antlers (in various poses and varying backgrounds). By capturing this semantic feature in the learned auxiliary labels, the auxiliary task can help the primary network to discriminate between main task classes.
249
+
250
+ # 6 DISCUSSION
251
+
252
+ In this paper, we presented a novel and unified approach for two tasks: combining predefined auxiliary tasks, and learning auxiliary tasks that are useful for the primary task. We theoretically showed which auxiliaries can be beneficial and the importance of using a separate auxiliary set. We empirically demonstrated that our method achieves significant improvement over existing methods on various datasets and tasks. This work opens interesting directions for future research. First, when training deep linear auxiliary networks, we observed similar learning dynamics to those of non-linear models. As a result, they generated better performance compared to their linear counterparts. This effect was observed in standard training setup, but the optimization path in auxiliary networks is very different. Second, we find that reallocating labeled data from the training set to an auxiliary set is consistently helpful. A broader question remains what is the most efficient allocation.
253
+
254
+ # ACKNOWLEDGEMENTS
255
+
256
+ This study was funded by a grant to GC from the Israel Science Foundation (ISF 737/2018), and by an equipment grant to GC and Bar-Ilan University from the Israel Science Foundation (ISF 2332/18). IA and AN were funded by a grant from the Israeli innovation authority, through the AVATAR consortium.
257
+
258
+ # REFERENCES
259
+
260
+ Achituve, I., Maron, H., and Chechik, G. (2020). Self-supervised learning for domain adaptation on point-clouds. arXiv preprint arXiv:2003.12641.
261
+ Alemi, A. A., Fischer, I., Dillon, J. V., and Murphy, K. (2017). Deep variational information bottleneck. In International Conference on Learning Representations.
262
+ Arora, S., Cohen, N., and Hazan, E. (2018). On the optimization of deep networks: Implicit acceleration by overparameterization. In International Conference on Machine Learning.
263
+ Badrinarayanan, V., Kendall, A., and Cipolla, R. (2017). Segnet: A deep convolutional encoder-decoder architecture for image segmentation. IEEE transactions on pattern analysis and machine intelligence, 39(12):2481-2495.
264
+ Bengio, Y. (2000). Gradient-based optimization of hyperparameters. Neural computation, 12(8):1889-1900.
265
+ Chang, A. X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al. (2015). Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012.
266
+ Chen, Z., Badrinarayanan, V., Lee, C.-Y., and Rabinovich, A. (2018). Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In International Conference on Machine Learning, pages 794-803. PMLR.
267
+ Cordts, M., Omran, M., Ramos, S., Rehfeld, T., Enzweiler, M., Benenson, R., Franke, U., Roth, S., and Schiele, B. (2016). The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3213-3223.
268
+ Couprie, C., Farabet, C., Najman, L., and LeCun, Y. (2013). Indoor semantic segmentation using depth information. In International Conference on Learning Representations.
269
+ Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and Fei-Fei, L. (2009). Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 248-255.
270
+ Doersch, C., Gupta, A., and Efros, A. A. (2015). Unsupervised visual representation learning by context prediction. In Proceedings of the IEEE International Conference on Computer Vision, pages 1422-1430.
271
+ Doersch, C. and Zisserman, A. (2017). Multi-task self-supervised visual learning. In Proceedings of the IEEE International Conference on Computer Vision, pages 2051-2060.
272
+ Du, Y., Czarnecki, W. M., Jayakumar, S. M., Pascanu, R., and Lakshminarayanan, B. (2018). Adapting auxiliary losses using gradient similarity. arXiv preprint arXiv:1812.02224.
273
+ Eigen, D. and Fergus, R. (2015). Predicting depth, surface normals and semantic labels with a common multi-scale convolutional architecture. In Proceedings of the IEEE international conference on computer vision, pages 2650-2658.
274
+ Fan, X., Monti, E., Mathias, L., and Dreyer, M. (2017). Transfer learning for neural semantic parsing. In Proceedings of the 2nd Workshop on Representation Learning for NLP, pages 48-56.
275
+ Finn, C., Abbeel, P., and Levine, S. (2017). Model-agnostic meta-learning for fast adaptation of deep networks. In International Conference on Machine Learning, pages 1126-1135.
276
+ Foo, C.-s., Do, C. B., and Ng, A. Y. (2008). Efficient multiple hyperparameter learning for log-linear models. In Advances in neural information processing systems, pages 377-384.
277
+ Ganin, Y. and Lempitsky, V. (2015). Unsupervised domain adaptation by backpropagation. In International Conference on Machine Learning.
278
+ Gidaris, S., Singh, P., and Komodakis, N. (2018). Unsupervised representation learning by predicting image rotations. In International Conference on Learning Representations.
279
+
280
+ Goyal, P., Mahajan, D., Gupta, A., and Misra, I. (2019). Scaling and benchmarking self-supervised visual representation learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV).
281
+ Hassani, K. and Haley, M. (2019). Unsupervised multi-task feature learning on point clouds. In Proceedings of the IEEE International Conference on Computer Vision, pages 8160-8171.
282
+ He, K., Zhang, X., Ren, S., and Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778.
283
+ Jaderberg, M., Mnih, V., Czarnecki, W. M., Schaul, T., Leibo, J. Z., Silver, D., and Kavukcuoglu, K. (2016). Reinforcement learning with unsupervised auxiliary tasks. arXiv preprint arXiv:1611.05397.
284
+ Jing, L. and Tian, Y. (2020). Self-supervised visual feature learning with deep neural networks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence.
285
+ Kendall, A., Gal, Y., and Cipolla, R. (2018). Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7482-7491.
286
+ Kingma, D. P. and Ba, J. (2014). ADAM: A method for stochastic optimization. In International Conference on Learning Representations.
287
+ Krause, J., Stark, M., Deng, J., and Fei-Fei, L. (2013). 3D object representations for fine-grained categorization. In 4th International IEEE Workshop on 3D Representation and Recognition, Sydney, Australia.
288
+ Krizhevsky, A., Hinton, G., et al. (2009). Learning multiple layers of features from tiny images. Technical report, University of Toronto.
289
+ Larsen, J., Hansen, L. K., Svarer, C., and Ohlsson, M. (1996). Design and regularization of neural networks: the optimal use of a validation set. In Neural Networks for Signal Processing VI. Proceedings of the IEEE Signal Processing Society Workshop, pages 62-71. IEEE.
290
+ Liao, R., Xiong, Y., Fetaya, E., Zhang, L., Yoon, K., Pitkow, X., Urtasun, R., and Zemel, R. (2018). Reviving and improving recurrent back-propagation. In International Conference on Machine Learning.
291
+ Lin, X., Baweja, H., Kantor, G., and Held, D. (2019). Adaptive auxiliary task weighting for reinforcement learning. In Advances in Neural Information Processing Systems, pages 4773-4784.
292
+ Liu, S., Davison, A., and Johns, E. (2019a). Self-supervised generalisation with meta auxiliary learning. In Advances in Neural Information Processing Systems, pages 1677-1687.
293
+ Liu, S., Johns, E., and Davison, A. J. (2019b). End-to-end multi-task learning with attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1871-1880.
294
+ Lorraine, J., Vicol, P., and Duvenaud, D. (2020). Optimizing millions of hyperparameters by implicit differentiation. In International Conference on Artificial Intelligence and Statistics, pages 1540-1552. PMLR.
295
+ Luketina, J., Berglund, M., Greff, K., and Raiko, T. (2016). Scalable gradient-based tuning of continuous regularization hyperparameters. In International conference on machine learning, pages 2952-2960.
296
+ Mirowski, P. (2019). Learning to navigate. In 1st International Workshop on Multimodal Understanding and Learning for Embodied Applications, pages 25-25.
297
+ Netzer, Y., Wang, T., Coates, A., Bissacco, A., Wu, B., and Ng, A. Y. (2011). Reading digits in natural images with unsupervised feature learning.
298
+ Noroozi, M. and Favaro, P. (2016). Unsupervised learning of visual representations by solving jigsaw puzzles. In Proceedings of the European Conference on Computer Vision, pages 69-84. Springer.
299
+
300
+ Parkhi, O. M., Vedaldi, A., Zisserman, A., and Jawahar, C. V. (2012). Cats and dogs. In IEEE Conference on Computer Vision and Pattern Recognition.
301
+ Pedregosa, F. (2016). Hyperparameter optimization with approximate gradient. In International Conference on Machine Learning, pages 737-746.
302
+ Qi, C. R., Su, H., Mo, K., and Guibas, L. J. (2017). Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660.
303
+ Rajeswaran, A., Finn, C., Kakade, S. M., and Levine, S. (2019). Meta-learning with implicit gradients. In Advances in Neural Information Processing Systems, pages 113-124.
304
+ Salimans, T. and Kingma, D. P. (2016). Weight normalization: A simple reparameterization to accelerate training of deep neural networks. In Advances in neural information processing systems, pages 901-909.
305
+ Sauder, J. and Sievers, B. (2019). Self-supervised deep learning on point clouds by reconstructing space. In Advances in Neural Information Processing Systems, pages 12942-12952.
306
+ Saxe, A. M., McClelland, J. L., and Ganguli, S. (2014). Exact solutions to the nonlinear dynamics of learning in deep linear neural network. In In International Conference on Learning Representations. Citeseer.
307
+ Sener, O. and Koltun, V. (2018). Multi-task learning as multi-objective optimization. In Advances in Neural Information Processing Systems, pages 527-538.
308
+ Silberman, N., Hoiem, D., Kohli, P., and Fergus, R. (2012). Indoor segmentation and support inference from RGBD images. In Proceedings of the European conference on computer vision, pages 746-760. Springer.
309
+ Simonyan, K. and Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
310
+ Standley, T., Zamir, A. R., Chen, D., Guibas, L., Malik, J., and Savarese, S. (2019). Which tasks should be learned together in multi-task learning? arXiv preprint arXiv:1905.07553.
311
+ Tang, L., Chen, K., Wu, C., Hong, Y., Jia, K., and Yang, Z. (2020). Improving semantic analysis on point clouds via auxiliary supervision of local geometric priors. arXiv preprint arXiv:2001.04803.
312
+ Trinh, T., Dai, A., Luong, T., and Le, Q. (2018). Learning longer-term dependencies in RNNs with auxiliary losses. In International Conference on Machine Learning, pages 4965-4974.
313
+ Vinyals, O., Blundell, C., Lillicrap, T., Wierstra, D., et al. (2016). Matching networks for one shot learning. In Advances in neural information processing systems, pages 3630-3638.
314
+ Wah, C., Branson, S., Welinder, P., Perona, P., and Belongie, S. (2011). The Caltech-UCSD Birds-200-2011 Dataset. Technical Report CNS-TR-2011-001, California Institute of Technology.
315
+ Wang, Y., Sun, Y., Liu, Z., Sarma, S. E., Bronstein, M. M., and Solomon, J. M. (2019). Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics, 38(5):1-12.
316
+ Yi, L., Kim, V. G., Ceylan, D., Shen, I.-C., Yan, M., Su, H., Lu, C., Huang, Q., Sheffer, A., and Guibas, L. (2016). A scalable active framework for region annotation in 3D shape collections. ACM Transactions on Graphics, 35(6):1-12.
317
+ Zhang, Y. and Yang, Q. (2017). A survey on multi-task learning. arXiv preprint arXiv:1707.08114.
318
+ Zhang, Z., Luo, P., Loy, C. C., and Tang, X. (2014). Facial landmark detection by deep multi-task learning. In European conference on computer vision, pages 94-108. Springer.
319
+
320
+ # Appendix: Auxiliary Learning by Implicit Differentiation
321
+
322
+ # A GRADIENT DERIVATION
323
+
324
+ We provide here the derivation of Eq. (4) in Section 3. One can look at the function $\nabla_W\mathcal{L}_T(W,\phi)$ around a certain local-minima point $(\hat{W},\hat{\phi})$ and assume the Hessian $\nabla_W^2\mathcal{L}_T(\hat{W},\hat{\phi})$ is positive-definite. At that point, we have $\nabla_W\mathcal{L}_T(\hat{W},\hat{\phi}) = 0$ . From the IFT, we have that locally around $(\hat{W},\hat{\phi})$ , there exists a smooth function $W^{*}(\phi)$ such that $\nabla_W\mathcal{L}_T(W,\phi) = 0$ if $W = W^{*}(\phi)$ . Since the function $\nabla_W\mathcal{L}_T(W^* (\phi),\phi)$ is constant and equal to zero, we have that its derivative w.r.t. $\phi$ is also zero. Taking the total derivative we obtain
325
+
326
+ $$
327
+ 0 = \nabla_ {W} ^ {2} \mathcal {L} _ {T} (W, \phi) \nabla_ {\phi} W ^ {*} (\phi) + \nabla_ {\phi} \nabla_ {W} \mathcal {L} _ {T} (W, \phi). \tag {6}
328
+ $$
329
+
330
+ Multiplying by $\nabla_W^2\mathcal{L}_T(W,\phi)^{-1}$ and reordering we obtain
331
+
332
+ $$
333
+ \nabla_ {\phi} W ^ {*} (\phi) = - \nabla_ {W} ^ {2} \mathcal {L} _ {T} (W, \phi) ^ {- 1} \nabla_ {\phi} \nabla_ {W} \mathcal {L} _ {T} (W, \phi). \tag {7}
334
+ $$
335
+
336
+ We can use this result to compute the gradients of the auxiliary set loss w.r.t $\phi$
337
+
338
+ $$
339
+ \nabla_ {\phi} \mathcal {L} _ {A} \left(W ^ {*} (\phi)\right) = \nabla_ {W} \mathcal {L} _ {A} \cdot \nabla_ {\phi} W ^ {*} (\phi) = - \nabla_ {W} \mathcal {L} _ {A} \cdot \left(\nabla_ {W} ^ {2} \mathcal {L} _ {T}\right) ^ {- 1} \cdot \nabla_ {\phi} \nabla_ {W} \mathcal {L} _ {T}. \tag {8}
340
+ $$
341
+
342
+ As discussed in the main text, fully optimizing $W$ to convergence is too computationally expensive. Instead, we update $\phi$ once for every several update steps for $W$ , as seen in Alg. 1. To compute the vector inverse-Hessian product, we use Alg. 2 that was proposed in (Lorraine et al., 2020).
343
+
344
+ # B EXPERIMENTAL DETAILS
345
+
346
+ # B.1 CUB 200-2011
347
+
348
+ Data. To examine the effect of varying training set sizes we use all 5994 predefined images for training according to the official split and, we split the predefined test set to 2897 samples for validation and 2897 for testing. All images were resized to $256 \times 256$ and Z-score normalized. During training, images were randomly cropped to 224 and flipped horizontally. Test images were centered cropped to 224. The same processing was applied in all fine-grain experiments.
349
+
350
+ Training details for baselines. We fine-tuned a ResNet-18 (He et al., 2016) pre-trained on ImageNet (Deng et al., 2009) with a classification layer on top for all tasks. Because the scale of auxiliary losses differed from that of the main task, we multiplied each auxiliary loss, on all compared method, by the scaling factor $\tau = 0.1$ . It was chosen based on a grid search over $\{0.1, 0.3, 0.6, 1.0\}$ using the Equal baseline. We applied grid search over the learning rates in $\{1e - 3, 1e - 4, 1e - 5\}$ and the weight decay in $\{5e - 3, 5e - 4, 5e - 5\}$ . For DWA (Liu et al., 2019b), we searched over the temperature in $\{0.5, 2, 5\}$ and for GradNorm (Chen et al., 2018), over $\alpha$ in $\{0.3, 0.8, 1.5\}$ . The computational complexity of GSC (Du et al., 2018) grows with the number of tasks. As a result, we were able to run this baseline only in a setup where there are two loss terms: the main and the sum of all auxiliary tasks. We ran each configuration with 3 different seeds for 100 epochs with ADAM optimizer (Kingma and Ba, 2014) and used early stopping based on the validation set.
351
+
352
+ The auxiliary set and auxiliary network. In our experiments, we found that allocating as little as 20 samples from the training set for the auxiliary set and using a NN with 5 layers and 10 units in each layer yielded good performance for both deep linear and non-linear models. We found that our method was not sensitive to these design choices. We use skip connection between the main loss $\ell_{\text{main}}$ and the overall loss term and Softplus activation.
353
+
354
+ Optimization of the auxiliary network. In all variants of our method, the auxiliary network was optimized using SGD with 0.9 momentum. We applied grid search over the auxiliary network learning rate in $\{1e - 2, 1e - 3\}$ and weight decay in $\{1e - 5, 5e - 5\}$ . The total training time of all methods was 3 hours on a 16GB Nvidia V100 GPU.
355
+
356
+ # B.2 NYUv2
357
+
358
+ The data consists of 1449 RGB-D images, split into 795 train images and 654 test images. We further split the train set to allocate 79 images, $10\%$ of training examples, to construct a validation
359
+
360
+ set. Following (Liu et al., 2019b), we resize images to $288 \times 384$ pixels for training and evaluation and use SegNet (Badrinarayanan et al., 2017) based architecture as the backbone.
361
+
362
+ Similar to (Liu et al., 2019b), we train the model for 200 epochs using Adam optimizer (Kingma and Ba, 2014) with learning rate $1e - 4$ , and halve the learning rate after 100 epochs. We choose the best model with early stopping on a pre-allocated validation set. For DWA (Liu et al., 2019b) we set the temperature hyperparameter to 2, as in the NYUv2 experiment in (Liu et al., 2019b). For GradNorm (Chen et al., 2018) we set $\alpha = 1.5$ . This value for $\alpha$ was used in (Chen et al., 2018) for the NYUv2 experiments. In all variants of our method, the auxiliary networks are optimized using SGD with 0.9 momentum. We allocate $2.5\%$ of training examples to form an auxiliary set. We use grid search to tune the learning rate $\{1e - 3, 5e - 4, 1e - 4\}$ and weight decay $\{1e - 5, 1e - 4\}$ of the auxiliary networks. Here as well, we use skip connection between the main loss $\ell_{main}$ and the overall loss term and Softplus activation.
363
+
364
+ # B.3 LEARNING AUXILIARIES
365
+
366
+ Multi-class classification datasets. On the CIFAR datasets, we train the model for 200 epochs using SGD with momentum 0.9, weight decay $5e - 4$ , and initial learning rates $1e - 1$ and $1e - 2$ for CIFAR10 and CIFAR100, respectively. For the SVHN experiment, we train for 50 epochs using SGD with momentum 0.9, weight decay $5e - 4$ , and initial learning rates $1e - 1$ . The learning rate is modified using a cosine annealing scheduler. We use VGG-16 (Simonyan and Zisserman, 2014) based architecture for the CIFAR experiments, and a 4-layer ConvNet for the SVHN experiment. For MAXL (Liu et al., 2019a) label generating network, we tune the following hyperparameters: learning rate $\{1e - 3,5e - 4\}$ , weight decay $\{5e - 4,1e - 4,5e - 5\}$ , and entropy term weight $\{.2,.4,.6\}$ (see (Liu et al., 2019a) for details). We explore the same learning rate and weight decay for the auxiliary network in our method, and also tune the number of optimization steps between every auxiliary parameter update $\{5,15,25\}$ , and the size of the auxiliary set $\{1.5\%,2.5\% \}$ (of training examples). We choose the best model on the validation set and allow for early stopping.
367
+
368
+ Fine-grain classification datasets. In CUB experiments we use the same data and splits as described in Sections 5.2 and B.1. Oxford-IIIT Pet contains 7349 images of 37 species of dogs and cats. We use the official train-test split. We pre-allocate $30\%$ from the training set to validation. As a results, the total number of train/validation/test images are 2576/1104/3669 respectively. Cars (Krause et al., 2013) contains 16, 185 images of 196 car classes. We use the official train-test split and pre-allocate $30\%$ from the training set to validation. As a results, the total number of train/validation/test images are 5700/2444/8041 respectively. In all experiments we use ResNet-18 as the backbone network for both the primary and auxiliary networks. Importantly, the networks are not pre-trained. The task specific (classification) heads in both the primary and auxiliary networks is implemented using a 2-layer NN with sizes 512 and $C$ . Where $C$ is number of labels (e.g., 200 for CUB and 37 for Oxford-IIIT Pet). In all experiments we use the same learning rate of $1e - 4$ and weight decay of $5e - 3$ which were shown to work best, based on a grid search applied on the STL baseline. For MAXL and AuxiLearn we applied a grid search over the auxiliary network learning rate and weight decay as described in the Multi-class classification datasets subsection. We tune the number of optimization steps between every auxiliary parameter update in $\{30,60\}$ for Oxford-IIIT Pet and $\{40,80\}$ for CUB and Cars. Also, the auxiliary set size was tuned over $\{0.084\%, 1.68\%, 3.33\%\}$ with stratified sampling. For our method, we leverage the module of AuxiLearn for combining auxiliaries. We use a Nonlinear network with either two or three hidden layers of sizes 10 (which was selected according to a grid search). The batch size was set to 64 in CUB and Cars experiments and to 16 in Oxford-IIIT Pet experiments. We ran each configuration with 3 different seeds for 150 epochs with ADAM optimizer and used early stopping based on the validation set.
369
+
370
+ # C ADDITIONAL EXPERIMENTS
371
+
372
+ # C.1 IMPORTANCE OF AUXILIARY SET
373
+
374
+ In this section we illustrate the importance of the auxiliary set to complement our theoretical observation in Section 4. We repeat the experiment in Section 5.1, but this time we optimize the auxiliary parameters $\phi$ using the training data. Figure 5 shows how the tasks' weights change during training. The optimization procedure is reduced to single-task learning, which badly hurts
375
+
376
+ ![](images/741af778e6dafe4dc6a9c98190d4981a7bd40289183c667699785fad983c6570.jpg)
377
+ Figure 5: Optimizing task weights on the training set reduce to single-task learning.
378
+
379
+ generalization (see Figure 2). These results are consistent with (Liu et al., 2019a) that added an entropy loss term to avoid the diminishing auxiliary task.
380
+
381
+ # C.2 MONOTONOCITY
382
+
383
+ As discussed in the main text, it is a common practice to combine auxiliary losses as a convex combination. This is equivalent to parametrize the function $g(\ell; \phi)$ as a linear combination over losses $g(\ell; \phi) = \sum_{j=1}^{K} \phi_j \ell_j$ , with non-negative weights, $\phi_j \geq 0$ . Under this parameterization, $g$ is a monotonic non-decreasing function of the losses, since $\partial \mathcal{L}_T / \partial \ell_j \geq 0$ . The non-decreasing property means that the overall loss grows (or is left unchanged) with any increase to the auxiliary losses. As a result, an optimization procedure that operates to minimize the combined loss also operates in the direction of reducing individual losses (or not changing them).
384
+
385
+ A natural question that arises is whether the function $g$ should generalize this behavior, and be constrained to be non-decreasing w.r.t. the losses as well? Non-decreasing networks can "ignore" an auxiliary task by zeroing its corresponding loss, but cannot reverse the gradient of a task by negating its weight. While monotonicity is a very natural requirement, in some cases, negative task weights (i.e., non-monotonicity) seem desirable if one wishes to "delete" input information not directly related to the task at hand (Alemi et al., 2017; Ganin and Lempitsky, 2015). For example, in domain adaptation, one might want to remove information that allows a discriminator to recognize the domain of a given sample (Ganin and Lempitsky, 2015). Empirically, we found that training with monotonic non-decreasing networks to be more stable and has better or equivalent performance, see Table 4 for comparison.
386
+
387
+ Table 4 compares monotonic and non-monotonic auxiliary networks in both the semi-supervised and the fully-supervised setting. Monotonic networks show a small but consistent improvement over non-monotonic ones. It is also worth mentioning that the non-monotonic networks were harder to stabilize.
388
+
389
+ Table 4: CUB 200-2011: Monotonic vs non-monotonic test classification accuracy (± SEM) over three runs.
390
+
391
+ <table><tr><td></td><td></td><td>Top 1</td><td>Top 3</td></tr><tr><td rowspan="2">5-shot</td><td>Non-Monotonic</td><td>46.3 ± 0.32</td><td>67.46 ± 0.55</td></tr><tr><td>Monotonic</td><td>47.07 ± 0.10</td><td>68.25 ± 0.32</td></tr><tr><td rowspan="2">10-shot</td><td>Non-Monotonic</td><td>58.84 ± 0.04</td><td>77.67 ± 0.08</td></tr><tr><td>Monotonic</td><td>59.04 ± 0.22</td><td>78.08 ± 0.24</td></tr><tr><td rowspan="2">Full Dataset</td><td>Non-Monotonic</td><td>74.74 ± 0.30</td><td>88.3 ± 0.23</td></tr><tr><td>Monotonic</td><td>74.92 ± 0.21</td><td>88.55 ± 0.17</td></tr></table>
392
+
393
+ # C.3 NOISY AUXILIARIES
394
+
395
+ We demonstrate the effectiveness of AuxiLearn in identifying helpful auxiliaries and ignoring harmful ones. Consider a regression problem with main task $y = \mathbf{w}^T\mathbf{x} + \epsilon$ , where $\epsilon \sim \mathcal{N}(0,\sigma^2)$ . We learn this task jointly with $K = 100$ auxiliaries of the form $y_{j} = \mathbf{w}^{T}\mathbf{x} + |\epsilon_{j}|$ , where $\epsilon_{j} \sim \mathcal{N}(0,j\cdot \sigma_{aux}^{2})$ for $j = 1,\dots,100$ . We use the absolute value on the noise so that noisy estimations are no longer unbiased, making the noisy labels even less helpful as the noise increases. We use a linear auxiliary network to weigh the loss terms. Figure 6 shows the learned weight for each task. We can see that the auxiliary network captures the noise patterns, and assign weights based on the noise level.
396
+
397
+ ![](images/db61b71714b113ce073273f17bbd2594b1b9a780b173f4ffd17391112a9df8e9.jpg)
398
+ Figure 6: Learning with noisy labels: task ID is proportional to the label noise.
399
+
400
+ # C.4 CUB SENSITIVITY ANALYSIS
401
+
402
+ In this section, we provide further analysis for the experiments conducted on the CUB 200-2011 dataset in the 5-shot setup. We examine the sensitivity of a non-linear auxiliary network to the size of the auxiliary set, and the depth of the auxiliary network. In Figure 7a we test the effect of allocating (labeled) samples from the training set to the auxiliary set. As seen, allocating between $10 - 50$ samples results in similar performance picking at 20. The figure shows that removing too many samples from the training set can be damaging. Nevertheless, we notice that even when allocating 200 labeled samples (out of 1000), our nonlinear method is still better than the best competitor GSC (Du et al., 2018) (which reached an accuracy of 42.57).
403
+
404
+ Figure 7b shows how accuracy changes with the number of hidden layers. As expected, there is a positive trend. As we increase the number of layers, the network expressivity increases, and the performance improves. Clearly, making the auxiliary network too large may cause the network to overfit the auxiliary set as was shown in Section 4, and empirically in (Lorraine et al., 2020).
405
+
406
+ # C.5 LINEARLY WEIGHTED NON-LINEAR TERMS
407
+
408
+ To further motivate the use of non-linear interactions between tasks, we train a linear auxiliary network over a polynomial kernel on the tasks segmentation, depth estimation and normal prediction from the NYUv2 dataset. Figure 8 shows the learned loss weights. From the figure, we learn that two of the three largest weights at the end of training belong to non-linear terms, specifically, $\text{Seg}^2$ and $\text{Seg} \cdot \text{Depth}$ . Also, we observe a scheduling effect, in which at the start of training, the auxiliary network focuses on the auxiliary tasks (first $\sim 50$ steps), and afterwards it draws most of the attention of the primary network towards the main task.
409
+
410
+ # C.6 FIXED AUXILIARY
411
+
412
+ As a result of alternating between optimizing the primary network parameters and the auxiliary parameters, the weighting of the loss terms are updated during the training process. This means that the loss landscape is changed during training. This effect is observed in the illustrative examples
413
+
414
+ ![](images/a8a2bace2b7cece8f9c24cf5045838a11a27b0d63cde56a100c07a8b2453fd4c.jpg)
415
+ (a) Effect of auxiliary set size
416
+
417
+ ![](images/5016743c851747e3b7232a357110797698d6bfa82022e5c23b01f56f67a43dac.jpg)
418
+ (b) Effect of Depth
419
+ Figure 7: Mean test accuracy $(\pm$ SEM) averaged over 3 runs as a function of the number of samples in the auxiliary set (left) and the number of hidden layers (right). Results are on 5-shot CUB 200-2011 dataset.
420
+
421
+ ![](images/047745223b62ef0dc26dc7eafc1709823fbfd3ac4af5acb3298ee411ebe0582d.jpg)
422
+ Polynomial kernel - linear weights
423
+ Figure 8: Learned linear weights for a polynomial kernel on the loss terms of the tasks segmentation, depth estimation and normal prediction from the NYUv2 dataset.
424
+
425
+ described in Section 5.1 and Section C.5, where the auxiliary network focuses on different tasks during different learning stages. Since the optimization is non-convex, the end result may depend not only on the final parameters but also on the loss landscape during the entire process.
426
+
427
+ We examined this effect with the following setup on the 5-shot setting on CUB 200-2011 dataset: we trained a non-linear auxiliary network and saved the best model. Then we retrain with the same configuration, only this time, the auxiliary network is initialized using the best model, and is kept fixed. We repeat this using ten different random seeds, affecting the primary network initialization and data shuffling. As a result, we observed a drop of $6.7\%$ on average in the model performance with an std of $1.2\%$ (46.7% compared to $40\%$ ).
428
+
429
+ # C.7 FULL CUB DATASET
430
+
431
+ In Section 5.2 we evaluated AuxiLearn and the baseline models performance under a semi-supervised scenario in which we have 5 or 10 labeled samples per class. For completeness sake, we show in Table 5 the test accuracy results in the standard fully-supervised scenario. As can be seen, in this case the STL baseline achieves the highest top-1 test accuracy while our nonlinear method is second on the top-1 and first on the top-3. Most baselines suffer from severe negative transfer due to the large number of auxiliary tasks (which are not needed in this case) while our method cause minimal performance degradation.
432
+
433
+ Table 5: CUB 200-2011: Fully supervised test classification accuracy (± SEM) averaged over three runs.
434
+
435
+ <table><tr><td></td><td>Top 1</td><td>Top 3</td></tr><tr><td>STL</td><td>75.2 ± 0.52</td><td>88.4 ± 0.36</td></tr><tr><td>Equal</td><td>70.16 ± 0.10</td><td>86.87 ± 0.22</td></tr><tr><td>Uncertainty</td><td>74.70 ± 0.56</td><td>88.21 ± 0.14</td></tr><tr><td>DWA</td><td>69.88 ± 0.10</td><td>86.62 ± 0.20</td></tr><tr><td>GradNorm</td><td>70.04 ± 0.21</td><td>86.63 ± 0.13</td></tr><tr><td>GSC</td><td>71.30 ± 0.01</td><td>86.91 ± 0.28</td></tr><tr><td colspan="3">AuxiLearn (ours)</td></tr><tr><td>Linear</td><td>70.97± 0.31</td><td>86.92 ± 0.08</td></tr><tr><td>Deep Linear</td><td>73.6 ± 0.72</td><td>88.37 ± 0.21</td></tr><tr><td>Nonlinear</td><td>74.92 ± 0.21</td><td>88.55 ± 0.17</td></tr></table>
436
+
437
+ # C.8 CITYSCAPES
438
+
439
+ Cityscapes (Cordts et al., 2016) is a high-quality urban-scene dataset. We use the data provided in (Liu et al., 2019b) with 2975 training and 500 test images. The data comprises of four learning tasks: 19-classes, 7-classes and 2-classes semantic segmentation, and depth estimation. We use the 19-classes semantic segmentation as the main task, and all other tasks as auxiliaries. We allocate $10\%$ of the training data for validation set, to allow for hyperparameter tuning and early stopping. We further allocate $2.5\%$ of the remaining training examples to construct the auxiliary set. All images are resized to $128 \times 256$ to speed up computation.
440
+
441
+ We train a SegNet (Badrinarayanan et al., 2017) based model for 150 epochs using Adam optimizer (Kingma and Ba, 2014) with learning rate $1e - 4$ , and halve the learning rate after 100 epochs. We search over weight decay in $\{1e - 4,1e - 5\}$ . We compare AuxiLearn to the same baselines used in Section 5.2 and search over the same hyperparameters as in the NYUv2 experiment. We set the DWA temperature to 2 similar to (Liu et al., 2019b), and the GradNorm hyperparameter $\alpha$ to 1.5, as used in (Chen et al., 2018) for the NYUv2 experiments. We present the results in Table 6. The ConvNet variant of the auxiliary network achieves best performance in terms of mIoU and pixel accuracy.
442
+
443
+ Table 6: 19-classes semantic segmentation test set results on Cityscapes, averaged over three runs (± SEM).
444
+
445
+ <table><tr><td></td><td>mIoU</td><td>Pixel acc.</td></tr><tr><td>STL</td><td>30.18 ± 0.04</td><td>87.08 ± 0.18</td></tr><tr><td>Equal</td><td>30.45 ± 0.14</td><td>87.14 ± 0.08</td></tr><tr><td>Uncertainty</td><td>30.49 ± 0.21</td><td>86.89 ± 0.07</td></tr><tr><td>DWA</td><td>30.79 ± 0.32</td><td>86.97 ± 0.26</td></tr><tr><td>GradNorm</td><td>30.62 ± 0.03</td><td>87.15 ± 0.04</td></tr><tr><td>GCS</td><td>30.32 ± 0.23</td><td>87.02 ± 0.12</td></tr><tr><td colspan="3">AuxiLearn (ours)</td></tr><tr><td>Linear</td><td>30.63 ± 0.19</td><td>86.88 ± 0.03</td></tr><tr><td>Nonlinear</td><td>30.85 ± 0.19</td><td>87.19 ± 0.20</td></tr><tr><td>ConvNet</td><td>30.99 ± 0.05</td><td>87.21 ± 0.11</td></tr></table>
446
+
447
+ # C.9 LEARNING SEGMENTATION AUXILIARY FOR 3D POINT CLOUDS
448
+
449
+ Recently, several methods were offered for learning auxiliary tasks in point clouds (Achituve et al., 2020; Hassani and Haley, 2019; Sauder and Sievers, 2019); however, this domain is still largely unexplored and it is not yet clear which auxiliary tasks could be beneficial beforehand. Therefore, it is desirable to automate this process, even at the cost of performance degradation to some extent compared to human designed methods.
450
+
451
+ We further evaluate our method in the task of generating helpful auxiliary tasks for 3D point-cloud data. We propose to extend the use of AuxiLearn for segmentation tasks. In Section 5.4 we trained an auxiliary network to output soft auxiliary labels for classification task. Here, we use a similar
452
+
453
+ Table 7: Learning auxiliary segmentation task. Test mean IOU on ShapeNet part dataset averaged over three runs (±SEM) - 30 shot
454
+
455
+ <table><tr><td></td><td>Mean</td><td>Airplane</td><td>Bag</td><td>Cap</td><td>Car</td><td>Chair</td><td>Earphone</td><td>Guitar</td><td>Knife</td><td>Lamp</td><td>Laptop</td><td>Motorbike</td><td>Mug</td><td>Pistol</td><td>Rocket</td><td>Skateboard</td><td>Table</td></tr><tr><td>Num. samples</td><td>2874</td><td>341</td><td>14</td><td>11</td><td>158</td><td>704</td><td>14</td><td>159</td><td>80</td><td>286</td><td>83</td><td>51</td><td>38</td><td>44</td><td>12</td><td>31</td><td>848</td></tr><tr><td>STL</td><td>75.6</td><td>68.7</td><td>82.9</td><td>85.2</td><td>65.6</td><td>82.3</td><td>70.2</td><td>86.1</td><td>75.1</td><td>68.4</td><td>94.3</td><td>55.1</td><td>91.0</td><td>72.6</td><td>60.2</td><td>72.3</td><td>74.2</td></tr><tr><td>DAE</td><td>74.0</td><td>66.6</td><td>77.6</td><td>79.1</td><td>60.5</td><td>81.2</td><td>73.8</td><td>87.1</td><td>77.0</td><td>65.4</td><td>93.6</td><td>51.8</td><td>88.4</td><td>74.0</td><td>55.4</td><td>68.4</td><td>72.7</td></tr><tr><td>DefRec</td><td>74.6</td><td>68.6</td><td>81.2</td><td>83.8</td><td>63.6</td><td>82.1</td><td>72.9</td><td>86.9</td><td>72.7</td><td>69.4</td><td>93.4</td><td>51.8</td><td>89.7</td><td>72.0</td><td>57.2</td><td>70.5</td><td>71.7</td></tr><tr><td>RS</td><td>76.5</td><td>69.7</td><td>79.1</td><td>85.9</td><td>64.9</td><td>83.8</td><td>68.4</td><td>82.8</td><td>79.4</td><td>70.7</td><td>94.5</td><td>58.9</td><td>91.8</td><td>72.0</td><td>53.4</td><td>70.3</td><td>75.0</td></tr><tr><td>AuxiLearn</td><td>76.2</td><td>68.9</td><td>78.3</td><td>83.6</td><td>64.9</td><td>83.4</td><td>69.7</td><td>87.4</td><td>80.7</td><td>68.3</td><td>94.6</td><td>53.2</td><td>92.1</td><td>73.7</td><td>61.6</td><td>72.4</td><td>74.6</td></tr></table>
456
+
457
+ approach, assigning a soft label vector to each point. We then train the primary network on the main task and the auxiliary task of segmenting each point based on the learned labels.
458
+
459
+ We evaluated the above approach in a part-segmentation task using the ShapeNet part dataset (Yi et al., 2016). This dataset contains 16,881 3D shapes from 16 object categories (including Airplane, Bag, Lamp), annotated with a total of 50 parts (at most 6 parts per object). The main task is to predict a part label for each point. We follow the official train/val/test split scheme in (Chang et al., 2015). We also follow the standard experimental setup in the literature, which assumes known object category labels during segmentation of a shape (see e.g., (Qi et al., 2017; Wang et al., 2019)). During training we uniformly sample 1024 points from each shape and we ignore points normal. During evaluation we use all points of a shape. For all methods (ours and baselines) we used the DGCNN architecture (Wang et al., 2019) as the backbone feature extractor and for part segmentation. We evaluated performance using point-Intersection over Union (IoU) following (Qi et al., 2017).
460
+
461
+ We compared AuxiLearn with the following baselines: (1) Single Task Learning (STL): Training with the main task only. (2) DefRec: An auxiliary task of reconstructing a shape with a deformed region (Achituve et al., 2020). (3) Reconstructing Spaces (RS): An auxiliary task of reconstructing a shape from a shuffled version of it (Sauder and Sievers, 2019). and (4) Denoising Auto-encoder (DAE): An auxiliary task of reconstructing a point-cloud perturbed with an iid noise from $\mathcal{N}(0,0.01)$ .
462
+
463
+ We performed hyper-parameter search over the primary network learning rate in $\{1e - 3,1e - 4\}$ , weight decay in $\{5e - 5,1e - 5\}$ and weight ratio between the main and auxiliary task of $\{1:1,1:0.5,1:0.25\}$ . We trained each method for 150 epochs, used the Adam optimizer with cosine scheduler. We applied early stopping based on the mean IoU of the validation set. We ran each configuration with 3 different seeds and report the average mean IoU along with the SEM. We used the segmentation network proposed in (Wang et al., 2019) with an exception that the network wasn't supplied with the object label as input.
464
+
465
+ For AuxiLearn, we used a smaller version of PointNet (Qi et al., 2017) as the auxiliary network without input and feature transform layers. We selected PointNet because its model complexity is light and therefore is a good fit in our case. We learned a different auxiliary task per each object category (with 6 classes per category) since it showed better results. We performed hyper-parameter search over the auxiliary network learning rate in $\{1e - 2,1e - 3\}$ , weight decay in $\{5e - 3,5e - 4\}$ . Two training samples from each class were allocated for the auxiliary set.
466
+
467
+ Table 7 shows the mean IOU per category when training with only 30 segmented point-clouds per object category (total of 480). As can be seen, AuxiLearn performance is close to RS (Sauder and Sievers, 2019) and improve upon other baselines. This shows that in this case, our method generates useful auxiliary tasks that has shown similar or better gain than those designed by humans.
468
+
469
+ # C.10 LEARNING AN AUXILIARY CLASSIFIER
470
+
471
+ In Section 5.4 we show how AuxiLearn learns a novel auxiliary to improve upon baseline methods. For the fine-grained classification experiments, we use only 30 samples per class. Here we also compare AuxiLearn with the baseline methods when there are only 15 images per class. Table 8 shows that AuxiLearn is superior to baseline methods in this setup as well, even though it requires to allocate some samples from the training data to the auxiliary set.
472
+
473
+ To further examine the effect of learning novel auxiliary task with varying train set size, we provide here additional experiments on the CIFAR10 dataset. We evaluate the methods with of $10\%$ , $15\%$ and $100\%$ of training examples. The results are presented in Table 9. As expected, learning with
474
+
475
+ Table 8: Learning auxiliary task. Test accuracy averaged over three runs (±SEM) - 15 shot
476
+
477
+ <table><tr><td></td><td>CUB</td><td>Pet</td></tr><tr><td>STL</td><td>22.6 ± 0.2</td><td>13.6 ± 0.7</td></tr><tr><td>MAXL-F</td><td>24.2 ± 0.7</td><td>14.1 ± 0.1</td></tr><tr><td>MAXL</td><td>24.2 ± 0.8</td><td>14.2 ± 0.2</td></tr><tr><td>AuxiLearn</td><td>26.1 ± 0.7</td><td>18.0 ± 0.9</td></tr></table>
478
+
479
+ Table 9: CIFAR10 test results accuracy averaged over three runs (±SEM).
480
+
481
+ <table><tr><td></td><td colspan="3">CIFAR10</td></tr><tr><td></td><td>10%</td><td>15%</td><td>100%</td></tr><tr><td>STL</td><td>72.63 ± 2.14</td><td>80.30 ± 0.09</td><td>93.36 ± 0.05</td></tr><tr><td>MAXL</td><td>75.85 ± 0.32</td><td>81.37 ± 0.26</td><td>93.49 ± 0.02</td></tr><tr><td>AuxiLearn</td><td>76.75 ± 0.08</td><td>81.42 ± 0.30</td><td>93.54 ± 0.05</td></tr></table>
482
+
483
+ auxiliaries is mostly helpful in the low data regime. Nonetheless, AuxiLearn improves over single task learning and MAXL for all training set sizes.
484
+
485
+ # D THEORETICAL CONSIDERATIONS
486
+
487
+ In this section, we discuss the theoretical limitations of AuxiLearn. First, we discuss the smoothness of our loss criterion while learning to combine losses using DNNs. Next, we present limitations that may arise from utilizing the IFT and their resolution. Finally, we discuss the approximations made for achieving an efficient optimization procedure.
488
+
489
+ Smoothness of the loss criterion. When learning to combine losses as described in Section 3.2, one must take into consideration the smoothness of the learn loss criterion as a function of $W$ . This limits, at least in theory, the design choice of the auxiliary network. In our experiments we use smooth activation functions, namely Softplus, to ensure the existence of $\partial \mathcal{L}_T / \partial W$ . Nonetheless, using non-smooth activation (e.g. ReLU) results with a piecewise smooth loss function hence might work well in practice.
490
+
491
+ Assumptions for IFT. One assumption for applying the IFT as described in Section 3.4, is that $\mathcal{L}_T$ is continuously differentiable w.r.t to the auxiliary and primary parameters. This assumption limits the design choice of both the auxiliary, and the primary networks. For instance, one must utilize only smooth activation functions. However, many non-smooth components can be replaced with smooth counterparts. For example, ReLU can be replaced with Softplus, $ReLU(x) = \lim_{\alpha \to \infty}\ln (1 + \exp (\alpha x)) / \alpha$ , and the beneficial effects of Batch-Normalization can be captured with Weight-Normalization as argued in (Salimans and Kingma, 2016).
492
+
493
+ For the setup of learning to combine losses, we use the above substitutes, namely Softplus and Weight Normalization, however for the learning a novel auxiliary setup, we share architecture between primary and auxiliary network (e.g. ResNet18). While using non-smooth components may, in theory, cause issues, we show empirically through extensive experiment that AuxiLean performs well in practice, and its optimization is stable. Furthermore, we note that while RLUs are non-smooth, they are piecewise smooth, hence the set of non-smoothness points is a zero-measure set.
494
+
495
+ Approximations. Our optimization procedure relies on several approximations to efficiently solve complex bi-level optimization. This trade-off between computation efficiency and accurate approximation can be controlled by (i) The number of Neumann series components, and; (ii) The number of optimization steps between auxiliary parameters update. While we cannot guarantee that the bi-level optimization process converges, empirically we observe a stable optimization process.
496
+
497
+ Our work builds on previous studies in the field of hyperparameter optimization (Lorraine et al., 2020; Pedregosa, 2016). Lorraine et al. (2020) provide an error analysis for both approximations, in a setup for which the exact Hessian can be evaluated in closed form. We refer the readers to Pedregosa (2016) for theoretical analysis and results regarding the second approximation (i.e. sub-optimally of the inner optimization problem in Eq. 2).
auxiliarylearningbyimplicitdifferentiation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4788182c79fd99b7cdfe419a3ec4ccb809dcca8585bf41d15e99599f68d7e3f
3
+ size 542032
auxiliarylearningbyimplicitdifferentiation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8ac08e247db4e08c9cab67edafd221aa273c3206d0244cbb362e1d9bb572e6
3
+ size 685069
averagecaseaccelerationforbilineargamesandnormalmatrices/a2131a7c-1207-4a46-87c5-3883e655b7ea_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab5b26bc84db76dfc3f77cdca5b2478a35fbfbf9b050067c7d59ced04bccb6f3
3
+ size 171403
averagecaseaccelerationforbilineargamesandnormalmatrices/a2131a7c-1207-4a46-87c5-3883e655b7ea_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc33f12e807d4e7a948be6d22075d16afa899115406ab55495da781b4fbbd197
3
+ size 193352
averagecaseaccelerationforbilineargamesandnormalmatrices/a2131a7c-1207-4a46-87c5-3883e655b7ea_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:236d4d567420c1fb4f688eea4d7ab48979b03467fbe4445661dff825126d1acf
3
+ size 533883
averagecaseaccelerationforbilineargamesandnormalmatrices/full.md ADDED
@@ -0,0 +1,999 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AVERAGE-CASE ACCELERATION FOR BILINEAR GAMES AND NORMAL MATRICES
2
+
3
+ Carles Domingo-Enrich
4
+
5
+ Computer Science Department
6
+
7
+ Courant Institute of Mathematical Sciences
8
+
9
+ New York University
10
+
11
+ New York, NY 10012, USA
12
+
13
+ cd2754@nyu.edu
14
+
15
+ Fabian Pedregosa
16
+
17
+ Google Research
18
+
19
+ pedregosa@google.com
20
+
21
+ Damien Scieur
22
+
23
+ Samsung SAIT AI Lab & Mila
24
+
25
+ Montreal, Canada
26
+
27
+ damien.scieur@gmail.com
28
+
29
+ # ABSTRACT
30
+
31
+ Advances in generative modeling and adversarial learning have given rise to renewed interest in smooth games. However, the absence of symmetry in the matrix of second derivatives poses challenges that are not present in the classical minimization framework. While a rich theory of average-case analysis has been developed for minimization problems, little is known in the context of smooth games. In this work we take a first step towards closing this gap by developing average-case optimal first-order methods for a subset of smooth games. We make the following three main contributions. First, we show that for zero-sum bilinear games the average-case optimal method is the optimal method for the minimization of the Hamiltonian. Second, we provide an explicit expression for the optimal method corresponding to normal matrices, potentially non-symmetric. Finally, we specialize it to matrices with eigenvalues located in a disk and show a provable speed-up compared to worst-case optimal algorithms. We illustrate our findings through numerical simulations with a varying degree of mismatch with our assumptions.
32
+
33
+ # 1 INTRODUCTION
34
+
35
+ The traditional analysis of optimization algorithms is a worst-case analysis (Nemirovski, 1995; Nesterov, 2004). This type of analysis provides a complexity bound for any input from a function class, no matter how unlikely. However, since hard-to-solve inputs might rarely occur in practice, the worst-case complexity bounds might not be representative of the observed running time.
36
+
37
+ A more representative analysis is given by the average-case complexity, averaging the algorithm's complexity over all possible inputs. This analysis is standard for analyzing, e.g., sorting (Knuth, 1997) and cryptography algorithms (Katz & Lindell, 2014). Recently, a line of work (Berthier et al., 2020; Pedregosa & Scieur, 2020; Lacotte & Pilanci, 2020; Paquette et al., 2020) focused on optimal methods for the optimization of quadratics, specified by a symmetric matrix. While worst-case analysis uses bounds on the matrix eigenvalues to yield upper and lower bounds on convergence, average-case analysis relies on the expected distribution of eigenvalues and provides algorithms with sharp optimal convergence rates. While the algorithms developed in this context have been shown to be efficient for minimization problems, these have not been extended to smooth games.
38
+
39
+ A different line of work considers algorithms for smooth games but studies worst-case optimal methods (Azizian et al., 2020). In this work, we combine average-case analysis with smooth games, and develop novel average-case optimal algorithms for finding the root of a linear system determined by a (potentially non-symmetric) normal matrix. We make the following main contributions:
40
+
41
+ 1. Inspired by the problem of finding equilibria in smooth games, we develop average-case optimal algorithms for finding the root of a non-symmetric affine operator, both under a normality assumption (Thm. 4.1), and under the extra assumption that eigenvalues of the operator are supported in a disk (Thm. 4.2). The proposed method shows a polynomial speedup compared to the worst-case optimal method, verified by numerical simulations.
42
+ 2. We make a novel connection between average-case optimal methods for optimization, and average-case optimal methods for bilinear games. In particular, we show that solving the Hamiltonian using an average-case optimal method is optimal (Theorem 3.1) for bilinear games. This result complements (Azizian et al., 2020), who proved that Polyak Heavy Ball algorithm on the Hamiltonian is asymptotically worst-case optimal for bilinear games.
43
+
44
+ # 2 AVERAGE-CASE ANALYSIS FOR NORMAL MATRICES
45
+
46
+ In this paper we consider the following class of problems.
47
+
48
+ Definition 1. Let $\mathbf{A} \in \mathbb{R}^{d \times d}$ be a real matrix and $\mathbf{x}^{\star} \in \mathbb{R}^{d}$ a vector. The non-symmetric (affine) operator (NSO) problem is defined as:
49
+
50
+ $$
51
+ F i n d \boldsymbol {x}: F (\boldsymbol {x}) \stackrel {\text {d e f}} {=} A (\boldsymbol {x} - \boldsymbol {x} ^ {\star}) = \mathbf {0}. \tag {NSO}
52
+ $$
53
+
54
+ This problem generalizes that of minimization of a convex quadratic function $f$ , since we can cast the latter in this framework by setting the operator $F = \nabla f$ . The set of solutions is an affine subspace that we will denote $\mathcal{X}^{\star}$ . We will find convenient to consider the distance to this set, defined as
55
+
56
+ $$
57
+ \operatorname {d i s t} (\boldsymbol {x}, \mathcal {X} ^ {\star}) \stackrel {\text {d e f}} {=} \min _ {\boldsymbol {v} \in \mathcal {X} ^ {\star}} \| \boldsymbol {x} - \boldsymbol {v} \| ^ {2}, \quad \text {w i t h} \mathcal {X} ^ {\star} = \left\{\boldsymbol {x} \in \mathbb {R} ^ {d} \mid \boldsymbol {A} \left(\boldsymbol {x} - \boldsymbol {x} ^ {\star}\right) = \boldsymbol {0} \right\}. \tag {1}
58
+ $$
59
+
60
+ In this paper we will develop average-case optimal methods. For this, we consider $\mathbf{A}$ and $x^{\star}$ to be random vectors, and a random initialization $x_0$ . This induces a probability distribution over NSO problems, and we seek to find methods that have an optimal expected suboptimality w.r.t. this distribution. Denoting $\mathbb{E}_{(\mathbf{A},\mathbf{x}^{\star},\mathbf{x}_0)}$ the expectation over these random problems, we have that average-case optimal methods they verify the following property at each iteration $t$
61
+
62
+ $$
63
+ \min _ {\boldsymbol {x} _ {t}} \mathbb {E} _ {\left(\boldsymbol {A}, \boldsymbol {x} ^ {\star}, \boldsymbol {x} _ {0}\right)} \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \mathcal {X} ^ {\star}\right) \quad \text {s . t .} \boldsymbol {x} _ {i} \in \boldsymbol {x} _ {0} + \operatorname {s p a n} \left(\left\{F \left(\boldsymbol {x} _ {j}\right) \right\} _ {j = 0} ^ {i - 1}\right), \forall i \in [ 1: t ]. \tag {2}
64
+ $$
65
+
66
+ The last condition on $\boldsymbol{x}_t$ stems from restricting the class of algorithms to first-order methods. The class of first-order methods encompasses many known schemes such as gradient descent with momentum, or full-matrix AdaGrad. However, methods such as Adam (Kingma & Ba, 2015) or diagonal AdaGrad (Duchi et al., 2011) are not in this class, as the diagonal re-scaling creates iterates $\boldsymbol{x}_t$ outside the span of previous gradients. Although we will focus on the distance to the solution, the results can be extended to other convergence criteria such as $\| F(\boldsymbol{x}_t)\|^2$ .
67
+
68
+ Finally, note that the expectations in this paper are on the problem instance and not on the randomness of the algorithm.
69
+
70
+ # 2.1 ORTHOGONAL RESIDUAL POLYNOMIALS AND FIRST-ORDER METHODS
71
+
72
+ The analysis of first-order methods simplifies through the use of polynomials. This section provides the tools required to leverage this connection.
73
+
74
+ Definition 2. A residual polynomial is a polynomial $P$ that satisfies $P(0) = 1$ .
75
+
76
+ Proposition 2.1. (Hestenes et al., 1952) If the sequence $(\pmb{x}_t)_{t\in \mathbb{Z}_+}$ is generated by a first-order method, then there exist residual polynomials $P_{t}$ , each one of degree at most $t$ , verifying
77
+
78
+ $$
79
+ \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} = P _ {t} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right). \tag {3}
80
+ $$
81
+
82
+ As we will see, optimal average-case method are strongly related to orthogonal polynomials. We first define the inner product between polynomials, where we use $z^*$ for the complex conjugate of $z \in \mathbb{C}$ .
83
+
84
+ Definition 3. For $P, Q \in \mathbb{R}[X]$ , we define the inner product $\langle \cdot, \cdot \rangle_{\mu}$ for a measure $\mu$ over $\mathbb{C}$ as
85
+
86
+ $$
87
+ \langle P, Q \rangle_ {\mu} \stackrel {\text {d e f}} {=} \int_ {\mathbb {C}} P (\lambda) Q (\lambda) ^ {*} \mathrm {d} \mu (\lambda). \tag {4}
88
+ $$
89
+
90
+ Definition 4. A sequence of polynomials $\{P_i\}$ is orthogonal (resp. orthonormal) w.r.t. $\langle \cdot, \cdot \rangle_{\mu}$ if
91
+
92
+ $$
93
+ \langle P _ {i}, P _ {i} \rangle_ {\mu} > 0 (r e s p. = 1); \quad \langle P _ {i}, P _ {j} \rangle_ {\mu} = 0 i f i \neq j.
94
+ $$
95
+
96
+ # 2.2 EXPECTED SPECTRAL DISTRIBUTION
97
+
98
+ Following (Pedregosa & Scieur, 2020), we make the following assumption on the problem family.
99
+
100
+ Assumption 1. $\pmb{x}_0 - \pmb{x}^\star$ is independent of $\pmb{A}$ , and $\mathbb{E}_{(\pmb{x}_0, \pmb{x}^\star)}[(\pmb{x}_0 - \pmb{x}^\star)(\pmb{x}_0 - \pmb{x}^\star)^\top] = \frac{R^2}{d}\pmb{I}_d$ .
101
+
102
+ We will also require the following definitions to characterize difficulty of a problem class. Let $\{\lambda_1,\dots ,\lambda_d\}$ be the eigenvalues of a matrix $A\in \mathbb{R}^{d\times d}$ . We define the empirical spectral distribution of $A$ as the probability measure
103
+
104
+ $$
105
+ \hat {\mu} _ {\boldsymbol {A}} (\lambda) \stackrel {\text {d e f}} {=} \frac {1}{d} \sum_ {i = 1} ^ {d} \delta_ {\lambda_ {i}} (\lambda), \tag {5}
106
+ $$
107
+
108
+ where $\delta_{\lambda_i}$ is the Dirac delta, a distribution equal to zero everywhere except at $\lambda_i$ and whose integral over the entire real line is equal to one. Note that with this definition, $\int_{\mathcal{D}}\mathrm{d}\hat{\mu}_A(\lambda)$ corresponds to the proportion of eigenvalues in $\mathcal{D}$ .
109
+
110
+ When $\mathbf{A}$ is a matrix-valued random variable, $\mu_{\mathbf{A}}$ is a measure-valued random variable. As such, we can define its expected spectral distribution
111
+
112
+ $$
113
+ \mu_ {\mathbf {A}} \stackrel {\text {d e f}} {=} \mathbb {E} _ {\mathbf {A}} [ \hat {\mu} _ {\mathbf {A}} ], \tag {6}
114
+ $$
115
+
116
+ which by the Riesz representation theorem is the measure that verifies $\int f\mathrm{d}\mu = \mathbb{E}_A[\int f\mathrm{d}\mu_A]$ for all measurable $f$ . Surprisingly, the expected spectral distribution is the only required characteristic to design optimal algorithms in the average-case.
117
+
118
+ # 2.3 EXPECTED ERROR OF FIRST-ORDER METHODS
119
+
120
+ In this section we provide an expression for the expected convergence in terms of the residual polynomial and the expected spectral distribution introduced in the previous section. To go further in the analysis, we have to assume that $A$ is a normal matrix.
121
+
122
+ Assumption 2. The (real) random matrix $\mathbf{A}$ is normal, that is, it verifies $\mathbf{A}\mathbf{A}^{\top} = \mathbf{A}^{\top}\mathbf{A}$ .
123
+
124
+ Normality is equivalent to $\mathbf{A}$ having the spectral decomposition $\mathbf{A} = \mathbf{U}\boldsymbol{\Lambda}\mathbf{U}^{*}$ , where $\mathbf{U}$ is unitary, i.e., $\mathbf{U}^{*}\mathbf{U} = \mathbf{U}\mathbf{U}^{*} = \mathbf{I}$ . We now have everything to write the expected error of a first-order algorithm applied to (NSO).
125
+
126
+ Theorem 2.1. Consider the application of a first-order method associated to the sequence of polynomials $\{P_t\}$ (Proposition 2.1) on the problem (NSO). Let $\mu$ be the expected spectral distribution of $A$ . Under Assumptions 1 and 2, we have
127
+
128
+ $$
129
+ \mathbb {E} \left[ \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \boldsymbol {\mathcal {X}} ^ {\star}\right) \right] = R ^ {2} \int_ {\mathbb {C} \backslash \{0 \}} \left| P _ {t} \right| ^ {2} \mathrm {d} \mu , \tag {7}
130
+ $$
131
+
132
+ Before designing optimal algorithms for certain specific distributions, we compare our setting with the average-case accelerating for minimization problems of Pedregosa & Scieur (2020), who proposed optimal optimization algorithms in the average-case.
133
+
134
+ # 2.4 DIFFICULTIES OF FIRST-ORDER METHODS ON GAMES AND RELATED WORK
135
+
136
+ This section compares our contribution with the existing framework of average-case optimal methods for quadratic minimization problems.
137
+
138
+ Definition 5. Let $\pmb{H} \in \mathbb{R}^{d \times d}$ be a random symmetric positive-definite matrix and $\pmb{x}^{\star} \in \mathbb{R}^{d}$ a random vector. These elements determine the following random quadratic minimization problem
139
+
140
+ $$
141
+ \min _ {\boldsymbol {x} \in \mathbb {R} ^ {d}} \left\{f (\boldsymbol {x}) \stackrel {{\text {d e f}}} {{=}} \frac {1}{2} \left(\boldsymbol {x} - \boldsymbol {x} ^ {\star}\right) ^ {\top} \boldsymbol {H} \left(\boldsymbol {x} - \boldsymbol {x} ^ {\star}\right) \right\}. \tag {OPT}
142
+ $$
143
+
144
+ As in our paper, Pedregosa & Scieur (2020) find deterministic optimal first-order algorithms in expectation w.r.t. the matrix $\pmb{H}$ , the solution $\pmb{x}^{\star}$ , and the initialization $\pmb{x}_0$ . Since they work with problem (OPT), their problem is equivalent to (NSO) with the matrix $A = H$ . However, they have the stronger assumption that the matrix is symmetric, which implies being normal. The normality assumption is restrictive in the case of game theory, as they do not always naturally fit such applications. However, this set is expressive enough to consider interesting cases, such as bilinear games, and our experiments show that our findings are also consistent with non-normal matrices.
145
+
146
+ Using orthogonal residual polynomials and spectral distributions, they derive the explicit formula of the expected error. Their result is similar to Theorem 2.1, but the major difference is the domain of the integral, a real positive line in convex optimization, but a shape in the complex plane in our case. This shape plays a crucial role in the rate of converge of first-order algorithms, as depicted in the work of Azizian et al. (2020); Bollapragada et al. (2018).
147
+
148
+ In the case of optimization methods, they show that optimal schemes in the average-case follow a simple three-term recurrence arising from the three-term recurrence for residual orthogonal polynomials for the measure $\lambda \mu (\lambda)$ . Indeed, by Theorem 2.1 the optimal method corresponds to the residual polynomials minimizing $\langle P,P\rangle_{\mu}$ , and the following result holds:
149
+
150
+ Theorem 2.2. (Fischer, 1996, §2.4) When $\mu$ is supported in the real line, the residual polynomial of degree $t$ minimizing $\langle P, P \rangle_{\mu}$ is given by the degree $t$ residual orthogonal polynomial w.r.t. $\lambda \mu(\lambda)$ .
151
+
152
+ However, the analogous result does not hold for general measures in $\mathbb{C}$ , and hence our arguments will make use of the following Theorem 2.3 instead, which links the residual polynomial of degree at most $t$ that minimizes $\langle P, P \rangle_{\mu}$ to the sequence of orthonormal polynomials for $\mu$ .
153
+
154
+ Theorem 2.3. [Theorem 1.4 of Assche (1997)] Let $\mu$ be a positive Borel measure in the complex plane. The minimum of the integral $\int_{\mathbb{C}}|P(\lambda)|^2\mathrm{d}\mu (\lambda)$ over residual polynomials $P$ of degree lower or equal than $t$ is uniquely attained by the polynomial
155
+
156
+ $$
157
+ P ^ {\star} (\lambda) = \frac {\sum_ {k = 0} ^ {t} \phi_ {k} (\lambda) \phi_ {k} (0) ^ {*}}{\sum_ {k = 0} ^ {t} | \phi_ {k} (0) | ^ {2}}, \quad \text {w i t h o p t i m a l v a l u e} \int_ {\mathbb {C}} | P ^ {\star} (\lambda) | ^ {2} \mathrm {d} \mu (\lambda) = \frac {1}{\sum_ {k = 0} ^ {t} | \phi_ {k} (0) | ^ {2}}, \tag {8}
158
+ $$
159
+
160
+ where $(\phi_k)_k$ is the orthonormal sequence of polynomials with respect to the inner product $\langle \cdot, \cdot \rangle_{\mu}$ .
161
+
162
+ In the next sections we consider cases where the optimal scheme is identifiable.
163
+
164
+ # 3 AVERAGE-CASE OPTIMAL METHODS FOR BILINEAR GAMES
165
+
166
+ We consider the problem of finding a Nash equilibrium of the zero-sum minimax game given by
167
+
168
+ $$
169
+ \min _ {\boldsymbol {\theta} _ {1}} \max _ {\boldsymbol {\theta} _ {2}} \ell \left(\boldsymbol {\theta} _ {1}, \boldsymbol {\theta} _ {2}\right) \stackrel {\text {d e f}} {=} \left(\boldsymbol {\theta} _ {1} - \boldsymbol {\theta} _ {1} ^ {\star}\right) ^ {\top} M \left(\boldsymbol {\theta} _ {2} - \boldsymbol {\theta} _ {2} ^ {\star}\right). \tag {9}
170
+ $$
171
+
172
+ Let $\pmb{\theta}_1, \pmb{\theta}_1^* \in \mathbb{R}^{d_1}, \pmb{\theta}_2, \pmb{\theta}_2^* \in \mathbb{R}^{d_2}, \pmb{M} \in \mathbb{R}^{d_1 \times d_2}$ and $d \stackrel{\mathrm{def}}{=} d_1 + d_2$ . The vector field of the game (Balduzzi et al., 2018) is defined as $F(\pmb{x}) = \pmb{A}(\pmb{x} - \pmb{x}^*)$ , where
173
+
174
+ $$
175
+ F \left(\boldsymbol {\theta} _ {1}, \boldsymbol {\theta} _ {2}\right) = \left[ \begin{array}{c} \nabla_ {\boldsymbol {\theta} _ {1}} \ell \left(\boldsymbol {\theta} _ {1}, \boldsymbol {\theta} _ {2}\right) \\ - \nabla_ {\boldsymbol {\theta} _ {2}} \ell \left(\boldsymbol {\theta} _ {1}, \boldsymbol {\theta} _ {2}\right) \end{array} \right] = \underbrace {\left[ \begin{array}{c c} 0 & M \\ - M ^ {\top} & 0 \end{array} \right]} _ {= A} \left(\underbrace {\left[ \begin{array}{l} \boldsymbol {\theta} _ {1} \\ \boldsymbol {\theta} _ {2} \end{array} \right]} _ {= x} - \underbrace {\left[ \begin{array}{l} \boldsymbol {\theta} _ {1} ^ {\star} \\ \boldsymbol {\theta} _ {2} ^ {\star} \end{array} \right]} _ {= x ^ {*}}\right) = A \left(x - x ^ {\star}\right). \tag {10}
176
+ $$
177
+
178
+ As before, $\mathcal{X}^{\star}$ denotes the set of points $\pmb{x}$ such that $F(\pmb{x}) = 0$ , which is equivalent to the set of Nash equilibrium. If $\pmb{M}$ is sampled independently from $\pmb{x}_0, \pmb{x}^\star$ and $\pmb{x}_0 - \pmb{x}^\star$ has covariance $\frac{R^2}{d}\pmb{I}_d$ , Assumption 1 is fulfilled. Since $\pmb{A}$ is skew-symmetric, it is in particular normal and Assumption 2 is also satisfied.
179
+
180
+ We now show that the optimal average-case algorithm to solve bilinear problems is Hamiltonian gradient descent with momentum, described below in its general form. Contrary to the methods in Azizian et al. (2020), the method we propose is anytime (and not only asymptotically) average-case optimal.
181
+
182
+ # Optimal average-case algorithm for bilinear games.
183
+
184
+ Initialization. $\pmb{x}_{-1} = \pmb{x}_0 = (\pmb{\theta}_{1,0},\pmb{\theta}_{2,0})$ , sequence $\{h_t,m_t\}$ given by Theorem 3.1.
185
+
186
+ Main loop. For $t \geq 0$ ,
187
+
188
+ $$
189
+ \boldsymbol {g} _ {t} = F \left(\boldsymbol {x} _ {t} - F \left(\boldsymbol {x} _ {t}\right)\right) - F \left(\boldsymbol {x} _ {t}\right) \quad \left(= \frac {1}{2} \nabla \| F \left(\boldsymbol {x} _ {t}\right) \| ^ {2} \text {b y (1 2)}\right) \tag {11}
190
+ $$
191
+
192
+ $$
193
+ \boldsymbol {x} _ {t + 1} = \boldsymbol {x} _ {t} - h _ {t + 1} \boldsymbol {g} _ {t} + m _ {t + 1} \left(\boldsymbol {x} _ {t - 1} - \boldsymbol {x} _ {t}\right)
194
+ $$
195
+
196
+ The quantity $\frac{1}{2}\| F(\pmb{x})\|^2$ is commonly known as the Hamiltonian of the game (Balduzzi et al., 2018), hence the name Hamiltonian gradient descent. Indeed, $\pmb{g}_t = \nabla \left(\frac{1}{2}\| F(\pmb{x})\|^2\right)$ when $F$ is affine:
197
+
198
+ $$
199
+ \begin{array}{l} F (\boldsymbol {x} - F (\boldsymbol {x})) - F (\boldsymbol {x}) = \boldsymbol {A} (\boldsymbol {x} - \boldsymbol {A} (\boldsymbol {x} - \boldsymbol {x} ^ {\star}) - \boldsymbol {x} ^ {\star}) - \boldsymbol {A} (\boldsymbol {x} - \boldsymbol {x} ^ {\star}) = - \boldsymbol {A} (\boldsymbol {A} (\boldsymbol {x} - \boldsymbol {x} ^ {\star})) \\ = \boldsymbol {A} ^ {\top} (\boldsymbol {A} (\boldsymbol {x} - \boldsymbol {x} ^ {\star})) = \nabla \left(\frac {1}{2} \| \boldsymbol {A} (\boldsymbol {x} - \boldsymbol {x} ^ {\star}) \| ^ {2}\right) = \nabla \left(\frac {1}{2} \| F (\boldsymbol {x}) \| ^ {2}\right). \tag {12} \\ \end{array}
200
+ $$
201
+
202
+ The following theorem shows that (11) is indeed the optimal average-case method associated to the minimization problem $\min_{\pmb{x}}\left(\frac{1}{2}\| F(\pmb{x})\| ^2\right)$ , as the following theorem shows.
203
+
204
+ Theorem 3.1. Suppose that Assumption 1 holds and that the expected spectral distribution of $MM^{\top}$ is absolutely continuous with respect to the Lebesgue measure. Then, the method (11) is average-case optimal for bilinear games when $h_t$ , $m_t$ are chosen to be the coefficients of the average-case optimal minimization of $\frac{1}{2}\| F(\pmb{x})\|^2$ .
205
+
206
+ How to find optimal coefficients? Since $\frac{1}{2}\| F(\pmb{x})\|^2$ is a quadratic problem, the coefficients $\{h_t, m_t\}$ can be found using the average-case framework for quadratic minimization problems of (Pedregosa & Scierur, 2020, Theorem 3.1).
207
+
208
+ Proof sketch. When computing the optimal polynomial $\pmb{x}_t = P_t(\pmb{A})(\pmb{x}_0 - \pmb{x}^*)$ , we have that the residual orthogonal polynomial $P_t$ behaves differently if $t$ is even or odd.
209
+
210
+ - Case 1: $t$ is even. In this case, we observe that the polynomial $P_{t}(\mathbf{A})$ can be expressed as $Q_{t/2}(-\mathbf{A}^{2})$ , where $(Q_{t})_{t \geq 0}$ is the sequence of orthogonal polynomials w.r.t. the expected spectral density of $-\mathbf{A}^{2}$ , whose eigenvalues are real and positive. This gives the recursion in (11).
211
+ - Case 2: $t$ is odd. There is no residual orthogonal polynomial of degree $t$ for $t$ odd. Instead, odd iterations do correspond to the intermediate computation of $g_t$ in (11), but not to an actual iterate.
212
+
213
+ # 3.1 PARTICULAR CASE: M WITH I.I.D. COMPONENTS
214
+
215
+ We now show the optimal method when the entries of $M$ are i.i.d. sampled. For simplicity, we order the players such that $d_{1} \leq d_{2}$ .
216
+
217
+ Assumption 3. Assume that each component of $M$ is sampled iid from a distribution of mean 0 and variance $\sigma^2$ , and we take $d_1, d_2 \to \infty$ with $\frac{d_1}{d_2} \to r < 1$ .
218
+
219
+ In such case, the spectral distribution of $\frac{1}{d_2} MM^\top$ tends to the Marchenko-Pastur law, supported in $[\ell, L]$ and with density:
220
+
221
+ $$
222
+ \rho_ {M P} (\lambda) \stackrel {\text {d e f}} {=} \frac {\sqrt {(L - \lambda) (\lambda - \ell)}}{2 \pi \sigma^ {2} r \lambda}, \quad \text {w h e r e} L \stackrel {\text {d e f}} {=} \sigma^ {2} (1 + \sqrt {r}) ^ {2}, \ell \stackrel {\text {d e f}} {=} \sigma^ {2} (1 - \sqrt {r}) ^ {2}. \tag {13}
223
+ $$
224
+
225
+ Proposition 3.1. When $M$ satisfies Assumption 3, the optimal parameter of scheme (11) are
226
+
227
+ $$
228
+ h _ {t} = - \frac {\delta_ {t}}{\sigma^ {2} \sqrt {r}}, \quad m _ {t} = 1 + \rho \delta_ {t}, \quad \text {w h e r e} \rho = \frac {1 + r}{\sqrt {r}}, \delta_ {t} = (- \rho - \delta_ {t - 1}) ^ {- 1}, \delta_ {0} = 0. \tag {14}
229
+ $$
230
+
231
+ Proof. By Theorem 3.1, the problem reduces to finding the optimal average-case algorithm for the problem $\min_{\boldsymbol{x}} \frac{1}{2} \| F(\boldsymbol{x}) \|^2$ . Since the expected spectral distribution of $\frac{1}{d_2} MM^\top$ is the Marchenko-Pastur law, we can use the optimal algorithm from (Pedregosa & Scieur, 2020, Section 5).
232
+
233
+ # 4 GENERAL AVERAGE-CASE OPTIMAL METHOD FOR NORMAL OPERATORS
234
+
235
+ In this section we derive general average-case optimal first-order methods for normal operators. First, we need to assume the existence of a three-term recurrence for residual orthogonal polynomials (Assumption 4). As mentioned in subsection 2.4, for general measures in the complex plane, the existence of a three-term recurrence of orthogonal polynomials is not ensured. In Proposition B.3 in Appendix B we give a sufficient condition for its existence, and in the next subsection we will show specific examples where the residual orthogonal polynomials satisfy the three-term recurrence.
236
+
237
+ Assumption 4 (Simplifying assumption). The sequence of residual polynomials $\{\psi_t\}_{t\geq 0}$ orthogonal w.r.t. the measure $\mu$ , defined on the complex plane, admits the three-term recurrence
238
+
239
+ $$
240
+ \psi_ {- 1} = 0, \quad \psi_ {0} = 1, \quad \psi_ {t} (\lambda) = \left(a _ {t} + b _ {t} \lambda\right) \psi_ {t - 1} (\lambda) + (1 - a _ {t}) \psi_ {t - 2} (\lambda). \tag {15}
241
+ $$
242
+
243
+ Under Assumption 4, Theorem 4.1 shows that the optimal algorithm can also be written as an average of iterates following a simple three-terms recurrence.
244
+
245
+ Theorem 4.1. Under Assumption 4 and the assumptions of Theorem 2.1, the following algorithm is optimal in the average case, with $\mathbf{y}_{-1} = \mathbf{y}_0 = \mathbf{x}_0$ :
246
+
247
+ $$
248
+ \boldsymbol {y} _ {t} = a _ {t} \boldsymbol {y} _ {t - 1} + (1 - a _ {t}) \boldsymbol {y} _ {t - 2} + b _ {t} F (\boldsymbol {y} _ {t - 1})
249
+ $$
250
+
251
+ $$
252
+ \boldsymbol {x} _ {t} = \frac {B _ {t}}{B _ {t} + \beta_ {t}} \boldsymbol {x} _ {t - 1} + \frac {\beta_ {t}}{B _ {t} + \beta_ {t}} \boldsymbol {y} _ {t}, \quad \beta_ {t} = \phi_ {t} ^ {2} (0), \quad B _ {t} = B _ {t - 1} + \beta_ {t - 1}, \quad B _ {0} = 0. \tag {16}
253
+ $$
254
+
255
+ where $(\phi_k(0))_{k\geq 0}$ can be computed using the three-term recurrence (upon normalization). Moreover, $\mathbb{E}_{(\pmb {A},\pmb{x}^{\star},\pmb {x}_0)}\mathrm{dist}(\pmb {x}_t,\mathcal{X}^\star)$ converges to zero at rate $1 / B_{t}$
256
+
257
+ Remark. Notice that it is not immediate that (16) fulfills the definition of first-order algorithms stated in (2), as $\pmb{y}_t$ is clearly a first-order method but $\pmb{x}_t$ is an average of the iterates $\pmb{y}_t$ . Using that $F$ is an affine function we see that $\pmb{x}_t$ indeed fulfills (2).
258
+
259
+ Remark. Assumption 4 is needed for the sequence $(\pmb{y}_t)_{t\geq 0}$ to be computable using a three-term recurrence. However, for some distribution, the associated sequence of orthogonal polynomials may admit another recurrence that may not satisfy Assumption 4.
260
+
261
+ # 4.1 CIRCULAR SPECTRAL DISTRIBUTIONS
262
+
263
+ In random matrix theory, the circular law states that if $\mathbf{A}$ is an $n \times n$ matrix with i.i.d. entries of mean $C$ and variance $R^2 / n$ , as $n \to \infty$ the spectral distribution of $\mathbf{A}$ tends to the uniform distribution on $D_{C,R}$ . In this subsection we apply Theorem 4.1 to a class of spectral distributions specified by Assumption 5, which includes the uniform distribution on $D_{C,R}$ . Even though the random matrices with i.i.d entries are not normal, we will see in section 6 that the empirical results for such matrices are consistent with our theoretical results under the normality assumption.
264
+
265
+ Assumption 5. Assume that the expected spectral distribution $\mu_A$ is supported in the complex plane on the disk $D_{C,R}$ of center $C\in \mathbb{R},C > 0$ and radius $R < C$ . Moreover, assume that the spectral density is circularly symmetric, i.e. there exists a probability measure $\mu_R$ supported on $[0,R]$ such for all $f$ measurable and $r\in [0,R]$ , $\mathrm{d}\mu_A(C + r e^{i\theta}) = \frac{1}{2\pi}\mathrm{d}\theta \mathrm{d}\mu_R(r)$ .
266
+
267
+ Proposition 4.1. If $\mu$ satisfies Assumption 5, the sequence of orthonormal polynomials is $(\phi_t)_{t\geq 0}$
268
+
269
+ $$
270
+ \phi_ {t} (\lambda) = \frac {(\lambda - C) ^ {t}}{K _ {t , R}}, \text {w h e r e} K _ {t, R} = \sqrt {\int_ {0} ^ {R} r ^ {2 t} \mathrm {d} \mu_ {R} (r)}. \tag {17}
271
+ $$
272
+
273
+ Example. The uniform distribution in $D_{C,R}$ is to $\mathrm{d}\mu_R = \frac{2r}{R^2}\mathrm{d}r$ , and $K_{t,R} = R^{t} / \sqrt{t + 1}$ .
274
+
275
+ From Proposition 4.1, the sequence of residual polynomials is given by $\phi_t(\lambda) / \phi_t(0) = \left(1 - \frac{\lambda}{C}\right)^t$ , which implies that Assumption 4 is fulfilled with $a_{t} = 1, b_{t} = -\frac{1}{C}$ . Thus, by Theorem 4.1 we have
276
+
277
+ Theorem 4.2. Given an initialization $\pmb{x}_0(\pmb{y}_0 = \pmb{x}_0)$ , if Assumption 5 is fulfilled with $R < C$ and the assumptions of Theorem 2.1 hold, then the average-case optimal first-order method is
278
+
279
+ $$
280
+ \boldsymbol {y} _ {t} = \boldsymbol {y} _ {t - 1} - \frac {1}{C} F (\boldsymbol {y} _ {t - 1}), \quad \beta_ {t} = C ^ {2 t} / K _ {t, R} ^ {2}, \quad B _ {t} = B _ {t - 1} + \beta_ {t - 1},
281
+ $$
282
+
283
+ $$
284
+ \boldsymbol {x} _ {t} = \frac {B _ {t}}{B _ {t} + \beta_ {t}} \boldsymbol {x} _ {t - 1} + \frac {\beta_ {t}}{B _ {t} + \beta_ {t}} \boldsymbol {y} _ {t}. \tag {18}
285
+ $$
286
+
287
+ Moreover, $\mathbb{E}_{(A,\pmb{x}^{\star},\pmb{x}_0)}\mathrm{dist}(\pmb{x}_t,\mathcal{X}^\star)$ converges to zero at rate $1 / B_{t}$ .
288
+
289
+ We now compare Theorem 4.2 with worst-case methods studied in Azizian et al. (2020). They give a worst-case convergence lower bound of $(R / C)^{2t}$ on the quantity $\mathrm{dist}(\pmb {z}_t,\mathcal{X}^\star)$ for first-order methods $(\pmb {z}_t)_{t\geq 0}$ on matrices with eigenvalues in the disk $D_{C,R}$ . By the classical analysis of first-order methods, this rate is achievable by gradient descent with stepsize $1 / C$ , i.e. the iterates $\pmb{y}_{t}$ defined in (18). However, by equation (79) in Proposition D.3 we have that under slight additional assumptions (those of Proposition 5.2), $\lim_{t\to \infty}\mathbb{E}\left[\mathrm{dist}(\pmb{x}_t,\mathcal{X}^\star)\right] / \mathbb{E}\left[\mathrm{dist}(\pmb{y}_t,\mathcal{X}^\star)\right] = 1 - \frac{R^2}{C^2}$ holds. That is, the average-case optimal algorithm outperforms gradient descent by a constant factor depending on the conditioning $R / C$ .
290
+
291
+ # 5 ASYMPTOTIC BEHAVIOR
292
+
293
+ The recurrence coefficients of the average-case optimal method typically converges to limiting values when $t \to \infty$ , which gives an "average-case asymptotically optimal first-order method" with constant coefficients. For the case of symmetric operators with spectrum in $[\ell, L]$ , Scieur & Pedregosa (2020) show that under mild conditions, the asymptotically optimal algorithm is the Polyak momentum method with coefficients depending only on $\ell$ and $L$ . For bilinear games, since the average-case optimal algorithm is the average-case optimal algorithm of an optimization algorithm, we can make use of their framework to obtain the asymptotic algorithm (see Theorem 3 of Scieur & Pedregosa (2020)).
294
+
295
+ Proposition 5.1. Assume that the expected spectral density $\mu_{MM^{\top}}$ of $MM^{\top}$ is supported in $[\ell, L]$ for $0 < \ell < L$ , and strictly positive in this interval. Then, the asymptotically optimal algorithm for bilinear games is the following version of Polyak momentum:
296
+
297
+ $$
298
+ \boldsymbol {g} _ {t} = F \left(\boldsymbol {x} _ {t} - F \left(\boldsymbol {x} _ {t}\right)\right) - F \left(\boldsymbol {x} _ {t}\right)
299
+ $$
300
+
301
+ $$
302
+ \boldsymbol {x} _ {t + 1} = \boldsymbol {x} _ {t} + \left(\frac {\sqrt {L} - \sqrt {\ell}}{\sqrt {L} + \sqrt {\ell}}\right) ^ {2} \left(\boldsymbol {x} _ {t - 1} - \boldsymbol {x} _ {t}\right) - \left(\frac {2}{\sqrt {L} + \sqrt {\ell}}\right) ^ {2} \boldsymbol {g} _ {t} \tag {19}
303
+ $$
304
+
305
+ Notice that the algorithm in (19) is the worst-case optimal algorithm from Proposition 4 of Azizian et al. (2020). For the case of circularly symmetric spectral densities with support on disks, we can also compute the asymptotically optimal algorithm.
306
+
307
+ Proposition 5.2. Suppose that the assumptions of Theorem 4.2 hold with $\mu_R \in \mathcal{P}([0, R])$ fulfilling $\mu_R([r, R]) = \Omega((R - r)^\kappa)$ for $r$ in $[r_0, R]$ for some $r_0 \in [0, R)$ and for some $\kappa \in \mathbb{Z}$ . Then, the average-case asymptotically optimal algorithm is, with $\pmb{y}_0 = \pmb{x}_0$ :
308
+
309
+ $$
310
+ \boldsymbol {y} _ {t} = \boldsymbol {y} _ {t - 1} - \frac {1}{C} F (\boldsymbol {y} _ {t - 1}),
311
+ $$
312
+
313
+ $$
314
+ \boldsymbol {x} _ {t} = \left(\frac {R}{C}\right) ^ {2} \boldsymbol {x} _ {t - 1} + \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) \boldsymbol {y} _ {t}. \tag {20}
315
+ $$
316
+
317
+ Moreover, the convergence rate for this algorithm is asymptotically the same one as for the optimal algorithm in Theorem 4.2. Namely, $\lim_{t\to \infty}\mathbb{E}\left[\mathrm{dist}(\pmb{x}_t,\mathcal{X}^\star)\right]B_t = 1$
318
+
319
+ The condition on $\mu_R$ simply rules out cases in which the spectral density has exponentially small mass around 1. It is remarkable that in algorithm (20) the averaging coefficients can be expressed so simply in terms of the quantity $R / C$ . Notice also that while the convergence rate of the algorithm
320
+
321
+ is slower than the convergence rate for the optimal algorithm by definition, both rates match in the limit, meaning that the asymptotically optimal algorithm also outperforms gradient descent by a constant factor $1 - \frac{R^2}{C^2}$ in the limit $t\to \infty$
322
+
323
+ # 6 EXPERIMENTS
324
+
325
+ We compare some of the proposed methods on settings with varying degrees of mismatch with our assumptions.
326
+
327
+ Bilinear Games. We consider min-max bilinear problems of the form (10), where the entries of $M$ are generated i.i.d. from a standard Gaussian distribution. We vary the ratio $r = d / n$ parameter for $d = 1000$ and compare the average-case optimal method of Theorems 3.1 and 5.1, the asymptotic worst-case optimal method of (Azizian et al., 2020) and extragradient (Korpelevich, 1976). In all cases, we use the convergence-rate optimal step-size assuming knowledge of the edges of the spectral distribution.
328
+
329
+ The spectral density for these problems is displayed in the first row of Figure 1 and the benchmark results on the second row. Average-case optimal methods always outperform other methods, and the largest gain is in the ill-conditioned regime ( $r \approx 1$ ).
330
+
331
+ Circular Distribution. For our second experiment we choose $A$ as a matrix with iid Gaussian random entries, therefore the support of the distribution of its eigenvalue is a disk. Note that $A$ does not satisfy the normality assumption of Assumption 2. Figure 1 (third row) compares the average-case optimal methods from Theorems 4.2 and 5.2 on two datasets with different levels of conditioning. Note that the methods converge despite the violation of Assumption 2, suggesting a broader applicability than the one proven in this paper. We leave this investigation for future work.
332
+
333
+ # 7 DISCUSSION AND FUTURE RESEARCH DIRECTIONS
334
+
335
+ In this paper, we presented a general framework for the design of optimal algorithms in the average-case for affine operators $F$ , whose underlying matrix is possibly non-symmetric. However, our approach presents some limitations, the major one being the restriction to normal matrices. Fortunately, the numerical experiments above suggests that this assumption can be relaxed. Developing a theory without that assumption is left for future work. Another avenue for future work is to analyze the nonlinear-case in which the non-symmetric operator $A$ is non-linear, as well as the case in which it is accessed through a stochastic estimator (as done by (Loizou et al., 2020) for the worst-case analysis).
336
+
337
+ # ACKNOWLEDGEMENTS
338
+
339
+ C. Domingo-Enrich has been partially funded by "la Caixa" Foundation (ID 100010434), under agreement LCF/BQ/AA18/11680094, and partially funded by the NYU Computer Science Department.
340
+
341
+ ![](images/92b054738669f3580bf73b96ead85508300536f2f82fc2c72c0f7155749651b8.jpg)
342
+ Bilinear Problems
343
+ Figure 1: Benchmarks and spectral density for different games. Top row: spectral density associated with bilinear games for varying values of the ratio parameter $r = n / d$ (the x-axis represents the imaginary line). Second row: Benchmarks. Average-case optimal methods always outperform other methods, and the largest gain is in the ill-conditioned regime ( $r \approx 1$ ). Third row. Benchmarks (columns 1 and 3) and eigenvalue distribution of a design matrix generated with iid entries for two different degrees of conditioning. Despite the normality assumption not being satisfied, we still observe an improvement of average-case optimal methods vs worst-case optimal ones.
344
+
345
+ # REFERENCES
346
+
347
+ Walter Van Assche. Orthogonal polynomials in the complex plane and on the real line. In Fields Institute Communications, volume 14, pp. 211-245, 1997.
348
+ Waiss Azizian, Damien Scieur, Ioannis Mitliagkas, Simon Lacoste-Julien, and Gauthier Gidel. Accelerating smooth games by manipulating spectral shapes. In Proceedings of Machine Learning Research, 2020.
349
+ David Balduzzi, Sébastien Racanière, James Martens, Jakob Foerster, Karl Tuyls, and Thore Graepel. The mechanics of $n$ -player differentiable games. In Proceedings of the International Conference on Machine Learning, 2018.
350
+ Raphael Berthier, Francis Bach, and Pierre Gaillard. Accelerated gossip in networks of given dimension using Jacobi polynomial iterations. SIAM Journal on Mathematics of Data Science, 2 (1):24-47, 2020.
351
+ Raghu Bollapragada, Damien Scieur, and Alexandre d'Aspremont. Nonlinear acceleration of momentum and primal-dual algorithms. arXiv preprint arXiv:1810.04539, 2018.
352
+ John Duchi, Elad Hazan, and Yoram Singer. Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research, 12:2121-2159, 2011.
353
+ Bernd Fischer. *Polynomial Based Iteration Methods for Symmetric Linear Systems*. Vieweg+Teubner Verlag, 1996.
354
+ Magnus R Hestenes, Eduard Stiefel, et al. Methods of conjugate gradients for solving linear systems. Journal of research of the National Bureau of Standards, 1952.
355
+ Jonathan Katz and Yehuda Lindell. Introduction to modern cryptography. CRC press, 2014.
356
+ Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations, 2015.
357
+ Donald Knuth. The art of computer programming, volume 3. Pearson Education, 1997.
358
+ GM Korpelevich. The extragradient method for finding saddle points and other problems. Matecon, 12, 1976.
359
+ Jonathan Lacotte and Mert Pilanci. Optimal randomized first-order methods for least-squares problems. Proceedings of the 37th International Conference on Machine Learning, 2020.
360
+ Nicolas Loizou, Hugo Berard, Alexia Jolicoeur-Martineau, Pascal Vincent, Simon Lacoste-Julien, and Ioannis Mitliagkas. Stochastic Hamiltonian gradient methods for smooth games. arXiv preprint arXiv:2007.04202, 2020.
361
+ Arkadi Nemirovski. Information-based complexity of convex programming. Lecture Notes, 1995.
362
+ Yurii Nesterov. Introductory Lectures on Convex Optimization. Springer, 2004.
363
+ Courtney Paquette, Bart van Merrienboer, and Fabian Pedregosa. Halting time is predictable for large models: A universality property and average-case analysis. arXiv preprint arXiv:2006.04299, 2020.
364
+ Fabian Pedregosa and Damien Scieur. Average-case acceleration through spectral density estimation. In Proceedings of the 37th International Conference on Machine Learning, 2020.
365
+ Damien Scieur and Fabian Pedregosa. Universal average-case optimality of Polyak momentum. In Proceedings of the 37th International Conference on Machine Learning, 2020.
366
+
367
+ # A PROOF OF THEOREM 2.1
368
+
369
+ # A.1 PRELIMINARIES
370
+
371
+ Before proving Theorem 2.1, we quickly analyze the distance function (1), recalled below,
372
+
373
+ $$
374
+ \operatorname {d i s t} (\boldsymbol {x}, \mathcal {X} ^ {\star}) \stackrel {{\mathrm {d e f}}} {{=}} \min _ {\boldsymbol {v} \in \mathcal {X} ^ {\star}} \| \boldsymbol {x} - \boldsymbol {v} \| ^ {2}.
375
+ $$
376
+
377
+ The definition of the distance function is not practical for the theoretical analysis. Fortunately, it is possible to find a simple expression that uses the orthogonal projection matrix $\Pi$ to the kernel $\mathrm{Ker}(A)$ . Since $\Pi$ is an orthogonal projection matrix to the kernel of a linear transformation, it satisfies
378
+
379
+ $$
380
+ \Pi = \Pi^ {T}, \quad \Pi^ {2} = \Pi , \quad \text {a n d} \quad A \Pi = 0. \tag {21}
381
+ $$
382
+
383
+ The normality assumption on $\mathbf{A}$ implies also that
384
+
385
+ $$
386
+ \Pi \boldsymbol {A} = 0. \tag {22}
387
+ $$
388
+
389
+ Indeed, the spectral decomposition of $\mathbf{A}$ is
390
+
391
+ $$
392
+ \boldsymbol {A} = [ \boldsymbol {U} _ {1} | \boldsymbol {U} _ {2} ] \left[ \begin{array}{c c} \boldsymbol {\Lambda} & 0 \\ 0 & 0 \end{array} \right] [ \boldsymbol {U} _ {1} | \boldsymbol {U} _ {2} ] ^ {*},
393
+ $$
394
+
395
+ and then $\Pi = U_2U_2^*$ . The next proposition uses $\Pi$ to derive the explicit solution of the (1).
396
+
397
+ Proposition A.1. We have that
398
+
399
+ $$
400
+ \operatorname {d i s t} (\boldsymbol {y}, \mathcal {X} ^ {\star}) = \| (\boldsymbol {I} - \Pi) (\boldsymbol {y} - \boldsymbol {x} ^ {\star}) \| ^ {2} \quad \forall \boldsymbol {x} ^ {\star} \in \mathcal {X} ^ {\star}.
401
+ $$
402
+
403
+ Proof. We first parametrize the set of solution $\mathcal{X}^{\star}$ . By definition we have
404
+
405
+ $$
406
+ \mathcal {X} ^ {\star} = \left\{\boldsymbol {x}: \boldsymbol {A} \left(\boldsymbol {x} - \boldsymbol {x} ^ {\star}\right) = 0 \right\}.
407
+ $$
408
+
409
+ Which can be written in terms of the kernel of $A$ as
410
+
411
+ $$
412
+ \mathcal {X} ^ {\star} = \left\{\boldsymbol {x} ^ {\star} + \Pi \boldsymbol {w}: \boldsymbol {w} \in \mathbb {R} ^ {d} \right\}.
413
+ $$
414
+
415
+ From this, we can rewrite the distance function (1) as
416
+
417
+ $$
418
+ \operatorname {d i s t} (\boldsymbol {y}, \mathcal {X} ^ {\star}) = \min _ {\boldsymbol {w} \in \mathbb {R} ^ {d}} \| \boldsymbol {y} - (\boldsymbol {x} ^ {\star} + \Pi \boldsymbol {w}) \| ^ {2}.
419
+ $$
420
+
421
+ The minimum can be attained at different points, but in particular at $\pmb{w} = -(\pmb{y} - \pmb{x}^{\star})$ , which proves the statement.
422
+
423
+ We now simplifies further the result of the previous proposition in the case where $\boldsymbol{x}_t$ is generated by a first order method.
424
+
425
+ Proposition A.2. For every iterate $\pmb{x}_t$ of a first-order methods, i.e., $\pmb{x}_t$ satisfies
426
+
427
+ $$
428
+ \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} = P _ {t} (\boldsymbol {A}) (\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}), \quad \deg (P _ {t}) \leq t, \quad P (0) = \boldsymbol {I},
429
+ $$
430
+
431
+ we have that
432
+
433
+ $$
434
+ \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \mathcal {X} ^ {\star}\right) = \left\| \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} \right\| ^ {2} - \left\| \Pi \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \right\| ^ {2}.
435
+ $$
436
+
437
+ Proof. We start with the result of Proposition A.1,
438
+
439
+ $$
440
+ \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \mathcal {X} ^ {\star}\right) = \left\| (\boldsymbol {I} - \Pi) \left(\boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star}\right) \right\| ^ {2}.
441
+ $$
442
+
443
+ The norm can be split into
444
+
445
+ $$
446
+ \begin{array}{l} \| (\boldsymbol {I} - \Pi) (\boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star}) \| ^ {2} = \| \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} \| ^ {2} + \| \underbrace {\Pi^ {2}} _ {= \Pi \text {b y (2 1)}} (\boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star}) \| ^ {2} - 2 \| \Pi (\boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star}) \| ^ {2} \\ = \left\| \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} \right\| ^ {2} - \left\| \Pi \left(\boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star}\right) \right\| ^ {2}. \\ \end{array}
447
+ $$
448
+
449
+ Since $x_{t}$ is generated by a first order method, we have
450
+
451
+ $$
452
+ \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} = P _ {t} (\boldsymbol {A}) (\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}), \quad P _ {t} (0) = 1.
453
+ $$
454
+
455
+ Since $P(0) = 1$ , the polynomial can be factorized as $P(\mathbf{A}) = \mathbf{I} + \mathbf{A}\mathbf{Q}_{t - 1}(\mathbf{A}), \mathbf{Q}_{t - 1}$ being a polynomial of degree $t - 1$ . Therefore, $\| \Pi (\pmb{x}_t - \pmb{x}^\star)\|^2$ reads
456
+
457
+ $$
458
+ \begin{array}{l} \left\| \Pi (\boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star}) \right\| ^ {2} = \left\| \Pi \left(\boldsymbol {I} + \boldsymbol {A} \boldsymbol {Q} _ {t - 1} (\boldsymbol {A})\right) (\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}) \right\| ^ {2} \\ = \| \Pi (\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}) + \underbrace {\Pi A} _ {= 0 \text {b y (2 2)}} Q _ {t - 1} (\boldsymbol {A}) (\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}) \| ^ {2} \\ = \left\| \Pi \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \right\| ^ {2}, \\ \end{array}
459
+ $$
460
+
461
+ which prove the statement.
462
+
463
+ ![](images/08c544e5dba4f818848d85726015039694724f159fe7e44d78bbc5eedf180ce4.jpg)
464
+
465
+ # A.2 PROOF OF THE THEOREM
466
+
467
+ We are now ready to prove the main result.
468
+
469
+ Theorem 2.1. Consider the application of a first-order method associated to the sequence of polynomials $\{P_t\}$ (Proposition 2.1) on the problem (NSO). Let $\mu$ be the expected spectral distribution of $\mathbf{A}$ . Under Assumptions 1 and 2, we have
470
+
471
+ $$
472
+ \mathbb {E} \left[ \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \mathcal {X} ^ {\star}\right) \right] = R ^ {2} \int_ {\mathbb {C} \backslash \{0 \}} \left| P _ {t} \right| ^ {2} \mathrm {d} \mu , \tag {7}
473
+ $$
474
+
475
+ Proof. We start with the result of Proposition A.2,
476
+
477
+ $$
478
+ \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \mathcal {X} ^ {\star}\right) = \left\| \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} \right\| ^ {2} - \left\| \Pi \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \right\| ^ {2}.
479
+ $$
480
+
481
+ We now write the expectation of the distance function,
482
+
483
+ $$
484
+ \begin{array}{l} \mathbb {E} \left[ \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \mathcal {X} ^ {\star}\right) \right] = \mathbb {E} \left[ \left\| \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} \right\| ^ {2} - \left\| \Pi \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \right\| ^ {2} \right] \\ = \mathbb {E} \left[ \| P _ {t} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \| ^ {2} - \| \Pi \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \| ^ {2} \right] \\ = \mathbb {E} \left[ \operatorname {t r} P _ {t} (\boldsymbol {A}) P _ {t} (\boldsymbol {A}) ^ {T} \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) ^ {T} - \operatorname {t r} \Pi^ {2} (\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) ^ {T} \right] \\ = \mathbb {E} _ {A} \left[ \operatorname {t r} P _ {t} (\boldsymbol {A}) P _ {t} (\boldsymbol {A}) ^ {T} \mathbb {E} \left[ \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) ^ {T} | \boldsymbol {A} \right] - \operatorname {t r} \Pi \mathbb {E} \left[ \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) ^ {T} | \boldsymbol {A} \right] \right] \\ = R \mathbb {E} _ {A} \left[ \operatorname {t r} P _ {t} (\boldsymbol {A}) P _ {t} (\boldsymbol {A}) ^ {T} - \operatorname {t r} \Pi \right] \\ = R \mathbb {E} \left[ \sum_ {i = 1} ^ {d} | P (\lambda_ {i}) | ^ {2} - \operatorname {t r} \Pi \right] \\ = R \mathbb {E} \left[ \int_ {\mathbb {C} \backslash \{0 \}} | P (\lambda) | ^ {2} \delta_ {\lambda_ {i}} (\lambda) + | P (0) | ^ {2} \cdot [ \# \text {z e r o e i g e n v a l u e s} ] - \operatorname {t r} \Pi \right] \\ \end{array}
485
+ $$
486
+
487
+ However, $|P(0)|^2 = 1$ and $\operatorname{tr} \Pi$ corresponds to the number of zero eigenvalues of $A$ , therefore,
488
+
489
+ $$
490
+ E \left[ \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \mathcal {X} ^ {\star}\right) \right] = R \mathbb {E} \left[ \int_ {\mathbb {C} \backslash \{0 \}} | P (\lambda) | ^ {2} \delta_ {\lambda_ {i}} (\lambda) \right] = R \int_ {\mathbb {C} \backslash \{0 \}} P (\lambda) \mu (\lambda).
491
+ $$
492
+
493
+ ![](images/e448e52c9f88f956b7bac2caf110d04168507fec8574ead5cb0fb4c4dd0cde5a.jpg)
494
+
495
+ # B PROOFS OF THEOREM 3.1 AND PROPOSITION 3.1
496
+
497
+ Proposition B.1. [Block determinant formula] If $A, B, C, D$ are (not necessarily square) matrices,
498
+
499
+ $$
500
+ \det \left[ \begin{array}{l l} \boldsymbol {A} & \boldsymbol {B} \\ \boldsymbol {C} & \boldsymbol {D} \end{array} \right] = \det (\boldsymbol {D}) \det (\boldsymbol {A} - \boldsymbol {B} \boldsymbol {D} ^ {- 1} \boldsymbol {C}), \tag {23}
501
+ $$
502
+
503
+ if $D$ is invertible.
504
+
505
+ Definition 6 (Pushforward of a measure). Recall that the pushforward $f_*\mu$ of a measure $\mu$ by a function $f$ is defined as the measure such that for all measurable $g$ ,
506
+
507
+ $$
508
+ \int g (\lambda) \mathrm {d} (f _ {*} \mu) (\lambda) = \int g (f (\lambda)) \mathrm {d} \mu (\lambda). \tag {24}
509
+ $$
510
+
511
+ Equivalently, if $X$ is a random variable with distribution $\mu$ , then $f(X)$ has distribution $f_{*}\mu$ .
512
+
513
+ Proposition B.2. Assume that the dimensions of $M \in \mathbb{R}^{d_x \times d_y}$ fulfill $d_x \leq d_y$ and let $r = d_x / d_y$ . Let $\mu_{MM^\top}$ be the expected spectral distribution of the random matrix $MM^\top \in \mathbb{R}^{d_x \times d_x}$ , and assume that it is absolutely continuous with respect to the Lebesgue measure. The expected spectral distribution of $A$ is contained in the imaginary line and is given by
514
+
515
+ $$
516
+ \mu_ {\mathbf {A}} (i \lambda) = \left(1 - \frac {2}{1 + \frac {1}{r}}\right) \delta_ {0} (\lambda) + \frac {2 | \lambda |}{1 + \frac {1}{r}} \mu_ {M M ^ {\top}} \left(\lambda^ {2}\right). \tag {25}
517
+ $$
518
+
519
+ for $\lambda \in \mathbb{R}$ . If $d_{x} \geq d_{y}$ , then (25) holds with $\mu_{M^{\top}M}$ in place of $\mu_{MM^{\top}}$ and $1 / r$ in place of $r$ .
520
+
521
+ Proof. By the block determinant formula, we have that for $s \neq 0$
522
+
523
+ $$
524
+ \begin{array}{l} \det \left(s I _ {d _ {1} + d _ {2}} - A\right) = \left| \begin{array}{l l} s I _ {d _ {1}} & - M \\ M ^ {\top} & s I _ {d _ {2}} \end{array} \right| = \det \left(s I _ {d _ {2}}\right) \det \left(s I _ {d _ {1}} + M s ^ {- 1} I _ {d _ {2}} M ^ {\top}\right) \tag {26} \\ = s ^ {d _ {2} - d _ {1}} \det \left(s ^ {2} \boldsymbol {I} _ {d _ {1}} + \boldsymbol {M} \boldsymbol {M} ^ {\top}\right) \\ \end{array}
525
+ $$
526
+
527
+ Thus, for every eigenvalue $-\lambda \leq 0$ of $-MM^{\top}$ , both $i\sqrt{\lambda}$ and $-i\sqrt{\lambda}$ are eigenvalues of $\mathbf{A}$ . Since $\mathrm{rank}(MM^{\top}) = \mathrm{rank}(M)$ , we have $\mathrm{rank}(\mathbf{A}) = 2\mathrm{rank}(M)$ . Thus, the rest of the eigenvalues of $\mathbf{A}$ are 0 and there is a total of $d - 2d_{1} = d_{2} - d_{1}$ of them. Notice that
528
+
529
+ $$
530
+ \frac {d _ {1}}{d _ {1} + d _ {2}} = \frac {1}{\frac {d _ {1} + d _ {2}}{d _ {1}}} = \frac {1}{1 + \frac {1}{r}} \tag {27}
531
+ $$
532
+
533
+ Let $f_{+}(\lambda) = i\sqrt{\lambda}, f_{-}(\lambda) = -i\sqrt{\lambda}$ , and let $(f_{+})_{*}\mu_{MM^{\top}}$ (resp., $(f_{-})_{*}\mu_{MM^{\top}}$ ) be the pushforward measure of $\mu_{MM^{\top}}$ by the function $f_{+}$ (resp., $f_{-}$ ). Thus, by the definition of the pushforward measure (Definition 6),
534
+
535
+ $$
536
+ \mu_ {\mathbf {A}} (i \lambda) = \left(1 - \frac {2}{1 + \frac {1}{r}}\right) \delta_ {0} (\lambda) + \frac {1}{1 + \frac {1}{r}} \left(f _ {+}\right) * \mu_ {M M ^ {\top}} (\lambda) + \frac {1}{1 + \frac {1}{r}} \left(f _ {-}\right) * \mu_ {M M ^ {\top}} (\lambda) \tag {28}
537
+ $$
538
+
539
+ We compute the pushforwards $(f_{+})_{*}\mu_{MM^{\top}}$ , $(f_{-})_{*}\mu_{MM^{\top}}$ performing the change of variables $y = \pm i\sqrt{\lambda}$ under the assumption that $\mu_{MM^{\top}}(\lambda) = \rho_{MM^{\top}}(\lambda)d\lambda$ :
540
+
541
+ $$
542
+ \int_ {\mathbb {R} _ {\geq 0}} g (\pm i \sqrt {\lambda}) \mathrm {d} \mu_ {M M ^ {\top}} (\lambda) = \int_ {\mathbb {R} _ {\geq 0}} g (\pm i \sqrt {\lambda}) \rho_ {M M ^ {\top}} (\lambda) d \lambda = \int_ {\pm i \mathbb {R} _ {\geq 0}} g (y) \rho_ {M M ^ {\top}} (| y | ^ {2}) 2 | y | \mathrm {d} | y |, \tag {29}
543
+ $$
544
+
545
+ which means that the density of $(f_{+})_{*}\mu_{MM^{\top}}$ at $y\in i\mathbb{R}_{\geq 0}$ is $2|y|\rho_{MM^{\top}}(|y|^{2})$ and the density of $(f_{-})_{*}\mu_{MM^{\top}}$ at $y\in -i\mathbb{R}_{\geq 0}$ is also $2|y|\rho_{MM^{\top}}(|y|^{2})$ .
546
+
547
+ # Proposition B.3. The condition
548
+
549
+ $$
550
+ \forall P, Q \text {p o l y n o m i a l s} \langle P (\lambda), \lambda Q (\lambda) \rangle = 0 \Longrightarrow \langle \lambda P (\lambda), Q (\lambda) \rangle = 0 \tag {30}
551
+ $$
552
+
553
+ is sufficient for any sequence $(P_k)_{k\geq 0}$ of orthogonal polynomials of increasing degrees to satisfy a three-term recurrence of the form
554
+
555
+ $$
556
+ \gamma_ {k} P _ {k} (\lambda) = (\lambda - \alpha_ {k}) P _ {k - 1} (\lambda) - \beta_ {k} P _ {k - 2} (\lambda), \tag {31}
557
+ $$
558
+
559
+ where
560
+
561
+ $$
562
+ \gamma_ {k} = \frac {\left\langle \lambda P _ {k - 1} (\lambda) , P _ {k} (\lambda) \right\rangle}{\left\langle P _ {k} (\lambda) , P _ {k} (\lambda) \right\rangle}, \quad \alpha_ {k} = \frac {\left\langle \lambda P _ {k - 1} (\lambda) , P _ {k - 1} (\lambda) \right\rangle}{\left\langle P _ {k - 1} (\lambda) , P _ {k - 1} (\lambda) \right\rangle}, \quad \beta_ {k} = \frac {\left\langle \lambda P _ {k - 1} (\lambda) , P _ {k - 2} (\lambda) \right\rangle}{\left\langle P _ {k - 2} (\lambda) , P _ {k - 2} (\lambda) \right\rangle} \tag {32}
563
+ $$
564
+
565
+ Proof. Since $\lambda P_{k-1}(\lambda)$ is a polynomial of degree $k$ , and $(P_j)_{0 \leq j \leq k}$ is a basis of the polynomials of degree up to $k$ , we can write
566
+
567
+ $$
568
+ \lambda P _ {k - 1} (\lambda) = \sum_ {j = 0} ^ {k} \frac {\left\langle \lambda P _ {k - 1} , P _ {j} \right\rangle}{\left\langle P _ {j} , P _ {j} \right\rangle} P _ {j} (\lambda) \tag {33}
569
+ $$
570
+
571
+ Now, remark that for all $j < k - 2$ , $\langle P_{k-1}, \lambda P_j \rangle = 0$ because the inner product of $P_{k-1}$ with a polynomial of degree at most $k - 2$ . If we make use of the condition (30), this implies that $\langle \lambda P_{k-1}, P_j \rangle = 0$ for all $j < k - 2$ . Plugging this into (33), we obtain (31).
572
+
573
+ Proposition B.4. Let $\Pi_t^\mathbb{R}$ be the set of polynomials with real coefficients and degree at most $t$ . For $t \geq 0$ even, the minimum of the problem
574
+
575
+ $$
576
+ \min _ {P _ {t} \in \Pi_ {t} ^ {\mathbb {R}}, P _ {t} (0) = 1} \int_ {i \mathbb {R} \backslash \{0 \}} | P _ {t} (\lambda) | ^ {2} | \lambda | \rho_ {\boldsymbol {M M} ^ {\top}} \left(| \lambda | ^ {2}\right) d | \lambda | \tag {34}
577
+ $$
578
+
579
+ is attained by an even polynomial with real coefficients.
580
+
581
+ Proof. Since $\mathrm{d}\mu(i\lambda) \stackrel{\mathrm{def}}{=} |\lambda| \rho_{MM^{\top}}(|\lambda|^2) \mathrm{d}|\lambda|$ is supported in the imaginary axis and is symmetric with respect to 0, for all polynomials $P, Q$ ,
582
+
583
+ $$
584
+ \langle \lambda P (\lambda), Q (\lambda) \rangle = \int_ {i \mathbb {R}} \lambda P (\lambda) Q (\lambda) ^ {*} d \mu (\lambda) = - \int_ {i \mathbb {R}} P (\lambda) \lambda^ {*} Q (\lambda) ^ {*} d \mu (\lambda) = - \langle P (\lambda), \lambda Q (\lambda) \rangle . \tag {35}
585
+ $$
586
+
587
+ Hence, $\langle P(\lambda),\lambda Q(\lambda)\rangle = 0$ implies $\langle \lambda P(\lambda),Q(\lambda)\rangle = 0$ . By Proposition B.3, a three-term recurrence (31) and (32) for the orthonormal sequence $(\phi_t)_{t\geq 0}$ of polynomials holds.
588
+
589
+ By Proposition B.5, the orthonormal polynomials $(\phi_t)_{t\geq 0}$ of even (resp. odd) degree are even (resp. odd) and have real coefficients. Hence, for all $t\geq 0$ even
590
+
591
+ $$
592
+ \frac {\sum_ {k = 0} ^ {t} \phi_ {k} (\lambda) \phi_ {k} (0) ^ {*}}{\sum_ {k = 0} ^ {t} | \phi_ {k} (0) | ^ {2}} = \frac {\sum_ {k = 0} ^ {t / 2} \phi_ {2 k} (\lambda) \phi_ {2 k} (0) ^ {*}}{\sum_ {k = 0} ^ {t / 2} | \phi_ {2 k} (0) | ^ {2}} \tag {36}
593
+ $$
594
+
595
+ is an even polynomial with real coefficients. By Theorem 2.3, this polynomial attains the minimum of the problem
596
+
597
+ $$
598
+ \min _ {P _ {t} \in \Pi_ {t} ^ {\mathbb {C}}, P _ {t} (0) = 1} \int_ {i \mathbb {R} \backslash \{0 \}} | P _ {t} (\lambda) | ^ {2} | \lambda | \rho_ {M M ^ {\top}} \left(| \lambda | ^ {2}\right) d | \lambda | \tag {37}
599
+ $$
600
+
601
+ and, a fortiori, the minimum of the problem in (34), in which the minimization is restricted polynomials with real coefficients instead of complex coefficients.
602
+
603
+ Proposition B.5. The polynomials $(\phi_t)_{t\geq 0}$ of the orthonormal sequence corresponding to the measure $\mu (i\lambda) = |\lambda |\rho_{MM^{\top}}(|\lambda |^{2})d|\lambda |$ have real coefficients and are even (resp. odd) for even (resp. odd) $k$ .
604
+
605
+ Proof. The proof is by induction. The base case follows from the choice $\phi_0 = 1$ . Assuming that $\phi_{k - 1}\in \mathbb{R}[X]$ by the induction hypothesis, we show that $\alpha_{k} = 0$ (where $\alpha_{k}$ is the coefficient from (31) and (32)):
606
+
607
+ $$
608
+ \begin{array}{l} \langle \lambda \phi_ {k - 1} (\lambda), \phi_ {k - 1} (\lambda) \rangle = \int_ {i \mathbb {R}} \lambda | \phi_ {k - 1} (\lambda) | ^ {2} | \lambda | \rho_ {M M ^ {\top}} (| \lambda | ^ {2}) d | \lambda | \\ = \int_ {\mathbb {R} _ {\geq 0}} i \lambda \left(\left| \phi_ {k - 1} (i \lambda) \right| ^ {2} - \left| \phi_ {k - 1} (- i \lambda) \right| ^ {2}\right) \lambda \rho_ {M M ^ {\top}} \left(\lambda^ {2}\right) d \lambda = 0 \tag {38} \\ \end{array}
609
+ $$
610
+
611
+ The last equality follows from $|\phi_{k-1}(i\lambda)|^2 = |\phi_{k-1}(-i\lambda)|^2$ , which holds because $\phi_{k-1}(i\lambda)^* = \phi_{k-1}(-i\lambda)$ , and in turn this is true because $\phi_{k-1} \in \mathbb{R}[X]$ by the induction hypothesis.
612
+
613
+ Once we have seen that $\alpha_{k} = 0$ , it is straightforward to apply the induction hypothesis once again to show that $\phi_{k}$ also satisfies the even/odd property. Namely, for $k$ even (resp. odd), $\gamma_{k}P_{k} = \lambda P_{k - 1} - \beta_{k}P_{k - 2}$ , and the two polynomials in the right-hand side have even (resp. odd) degrees.
614
+
615
+ Finally, $\phi_{k}$ must have real coefficients because $\phi_{k - 1}$ and $\phi_{k - 2}$ have real coefficients by the induction hypothesis, and the recurrence coefficient $\beta_{k}$ is real, as
616
+
617
+ $$
618
+ \begin{array}{l} \langle \lambda P _ {k - 1} (\lambda), P _ {k - 2} (\lambda) \rangle = \int_ {i \mathbb {R}} \lambda \phi_ {k - 1} (\lambda) \phi_ {k - 2} (\lambda) ^ {*} | \lambda | \rho_ {M M ^ {\top}} (| \lambda | ^ {2}) d | \lambda | \\ = \int_ {\mathbb {R} _ {\geq 0}} i \lambda \left(\phi_ {k - 1} (i \lambda) \phi_ {k - 2} (i \lambda) ^ {*} - \phi_ {k - 1} (i \lambda) ^ {*} \phi_ {k - 2} (i \lambda)\right) \lambda \rho_ {M M ^ {\top}} \left(\lambda^ {2}\right) d \lambda \\ = - \int_ {\mathbb {R} _ {\geq 0}} 2 \lambda \operatorname {I m} \left(\phi_ {k - 1} (i \lambda) \phi_ {k - 2} (i \lambda) ^ {*}\right) \lambda \rho_ {M M ^ {\top}} \left(\lambda^ {2}\right) d \lambda \in \mathbb {R}. \tag {39} \\ \end{array}
619
+ $$
620
+
621
+ ![](images/26b1ca0715cc2c88def55f9d15d7e5925c08a7e650c2d013b14d96c2531a4296.jpg)
622
+
623
+ Proposition B.6. Let $t \geq 0$ even. Assume that on $\mathbb{R}_{>0}$ , the expected spectral density $\mu_{MM^{\top}}$ has Radon-Nikodym derivative $\rho_{MM^{\top}}$ with respect to the Lebesgue measure. If
624
+
625
+ $$
626
+ Q _ {t / 2} ^ {\star} \stackrel {\text {d e f}} {=} \underset { \begin{array}{c} P _ {t / 2} \in \Pi_ {t / 2} ^ {\mathbb {R}}, \\ P _ {t / 2} (0) = 1 \end{array} } {\arg \min } \int_ {\mathbb {R} > 0} P _ {t / 2} (\lambda) ^ {2} \mathrm {d} \mu_ {- \boldsymbol {A} ^ {2}} (\lambda), \tag {40}
627
+ $$
628
+
629
+ and
630
+
631
+ $$
632
+ P _ {t} ^ {\star} \stackrel {\text {d e f}} {=} \underset { \begin{array}{c} P _ {t} \in \Pi_ {t} ^ {\mathbb {R}}, \\ P _ {t} (0) = 1 \end{array} } {\arg \min } \int_ {i \mathbb {R} \backslash \{0 \}} | P _ {t} (\lambda) | ^ {2} | \lambda | \rho_ {M M ^ {\top}} (| \lambda | ^ {2}) d | \lambda |, \tag {41}
633
+ $$
634
+
635
+ then $P_{t}^{\star}(\lambda) = Q_{t / 2}^{\star}(-\lambda^{2})$
636
+
637
+ Proof. First, remark that the equalities in (40) and (41) are well defined because the arg min are unique by Theorem 2.3. Without loss of generality, assume that $d_x \leq d_y$ (otherwise switch the players), and let $r \stackrel{\mathrm{def}}{=} d_x / d_y < 1$ . Since,
638
+
639
+ $$
640
+ - \boldsymbol {A} ^ {2} = \left[ \begin{array}{c c} M M ^ {\top} & 0 \\ 0 & M ^ {\top} M \end{array} \right], \tag {42}
641
+ $$
642
+
643
+ each eigenvalue of $MM^{\top} \in \mathbb{R}^{d_x \times d_x}$ is an eigenvalue of $-A^2$ with doubled duplicity, and the rest of eigenvalues are zero. Hence, we have $\mu_{-A^2} = \left(1 - 2 / (1 + \frac{1}{r})\right)\delta_0 + 2\mu_{MM^{\top}} / (1 + \frac{1}{r})$ . Thus, for all $t \geq 0$ ,
644
+
645
+ $$
646
+ Q _ {t} ^ {\star} = \arg \min _ { \begin{array}{c} P _ {t} \in \Pi_ {t} ^ {\mathbb {R}}, \\ P _ {t} (0) = 1 \end{array} } \int_ {\mathbb {R} _ {> 0}} P _ {t} (\lambda) ^ {2} \mathrm {d} \mu_ {- \boldsymbol {A} ^ {2}} (\lambda) = \underset { \begin{array}{c} P _ {t} \in \Pi_ {t} ^ {\mathbb {R}}, \\ P _ {t} (0) = 1 \end{array} } {\arg \min } \int_ {\mathbb {R} _ {> 0}} P _ {t} (\lambda) ^ {2} \rho_ {\boldsymbol {M M} ^ {\top}} (\lambda) \mathrm {d} \lambda \tag {43}
647
+ $$
648
+
649
+ By Proposition B.4, for an even $t \geq 0$ the minimum in (41) is attained by an even polynomial with real coefficients. Hence,
650
+
651
+ $$
652
+ \begin{array}{l} \min_{\substack{P_{t}\in \Pi_{t}^{\mathbb{R}},\\ P_{t}(0) = 1}}\int_{i\mathbb{R}\setminus \{0\}}|P_{t}(\lambda)|^{2}|\lambda |\rho_{\boldsymbol{M}\boldsymbol{M}^{\top}}(|\lambda |^{2}) \mathrm{d}|\lambda | = \min_{\substack{P_{t / 2}\in \Pi_{t / 2}^{\mathbb{R}},\\ P_{t / 2}(0) = 1}}\int_{i\mathbb{R}\setminus \{0\}}|P_{t / 2}(\lambda^{2})|^{2}|\lambda |\rho_{\boldsymbol{M}\boldsymbol{M}^{\top}}(|\lambda |^{2}) \mathrm{d}|\lambda | \\ = 2\min_{\substack{P_{t / 2}\in \Pi^{\mathbb{R}}_{t / 2},\\ P_{t / 2}(0) = 1}}\int_{\mathbb{R}_{>0}}|P_{t / 2}((i\lambda)^{2})|^{2}\lambda \rho_{MM^{\top}}(\lambda^{2}) \mathrm{d}\lambda \\ = 2\min_{\substack{P_{t / 2}\in \Pi^{\mathbb{R}}_{t / 2},\\ P_{t / 2}(0) = 1}}\int_{\mathbb{R}_{>0}}P_{t / 2}(\lambda^{2})^{2}\lambda \rho_{MM^{\top}}(\lambda^{2}) \mathrm{d}\lambda \\ = \min _ {\substack {P _ {t / 2} \in \Pi_ {t / 2} ^ {\mathbb {R}}, \\ P _ {t / 2} (0) = 1}} \int_ {\mathbb {R} > 0} P _ {t / 2} (\lambda) ^ {2} \rho_ {\boldsymbol {M M} ^ {\top}} (\lambda) \mathrm {d} \lambda \tag{44} \\ \end{array}
653
+ $$
654
+
655
+ Moreover, for any polynomial $Q_{t/2}$ that attains the minimum on the right-most term, the polynomial $P_t(\lambda) = Q_{t/2}(-\lambda^2)$ attains the minimum on the left-most term. In particular, using (43), $P_t^\star(\lambda) \stackrel{\mathrm{def}}{=} Q_{t/2}^\star(-\lambda^2)$ attains the minimum on the left-most term.
656
+
657
+ Theorem 3.1. Suppose that Assumption 1 holds and that the expected spectral distribution of $MM^{\top}$ is absolutely continuous with respect to the Lebesgue measure. Then, the method (11) is average-case optimal for bilinear games when $h_t$ , $m_t$ are chosen to be the coefficients of the average-case optimal minimization of $\frac{1}{2}\| F(\pmb{x})\|^2$ .
658
+
659
+ Proof. Making use of Theorem 2.1 and Proposition B.2, we obtain that for any first-order method using the vector field $F$ ,
660
+
661
+ $$
662
+ \mathbb {E} \left[ \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \mathcal {X} ^ {\star}\right) \right] = R ^ {2} \int_ {\mathbb {C} \backslash \{0 \}} | P _ {t} (\lambda) | ^ {2} \mathrm {d} \mu_ {\boldsymbol {A}} (\lambda) = \frac {2 R ^ {2}}{1 + \frac {1}{r}} \int_ {i \mathbb {R} \backslash \{0 \}} | P _ {t} (\lambda) | ^ {2} | \lambda | \rho_ {M M ^ {\top}} (| \lambda | ^ {2}) \mathrm {d} | \lambda | \tag {45}
663
+ $$
664
+
665
+ Let $Q_{t/2}^{\star}, P_t^{\star}$ be as defined in (41) and (40). For $t \geq 0$ even the iteration $t$ of the average-case optimal method for the bilinear game must satisfy
666
+
667
+ $$
668
+ \boldsymbol {x} _ {t} - P _ {\mathcal {X} ^ {*}} \left(\boldsymbol {x} _ {0}\right) = P _ {t} ^ {*} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - P _ {\mathcal {X} ^ {*}} \left(\boldsymbol {x} _ {0}\right)\right) = Q _ {t / 2} ^ {*} (- \boldsymbol {A} ^ {2}) \left(\boldsymbol {x} _ {0} - P _ {\mathcal {X} ^ {*}} \left(\boldsymbol {x} _ {0}\right)\right) \tag {46}
669
+ $$
670
+
671
+ On the other hand, the first-order methods for the minimization of the function $\frac{1}{2}\| F(\pmb{x})\|^2$ make use of the vector field $\nabla \left(\frac{1}{2}\| F(\pmb{x})\|^2\right) = \pmb{A}^\top (\pmb{A}\pmb{x} + \pmb{b}) = -\pmb{A}^2 (\pmb{x} - \pmb{x}^\star)$ . Let $\mu_{-\pmb{A}^2}$ be the spectral density of $-\pmb{A}^2$ . By Theorem 2.1, the average-case optimal first-order method for the minimization problem is the one for which the residual polynomial $P_t$ (Proposition 2.1) minimizes the functional $\int_{\mathbb{R}} P_t^2 \mathrm{d}\mu_{-\pmb{A}^2}$ . That is, the residual polynomial is $Q_t^\star$ . From (46), we see that the $t$ -th iterate of the average-case optimal method for $F$ is equal to the $t/2$ -th iterator of the average-case optimal method for $\nabla \left(\frac{1}{2}\| F(\pmb{x})\|^2\right)$ .
672
+
673
+ # C PROOFS OF THEOREM 4.1 AND THEOREM 4.2
674
+
675
+ Theorem 4.1. Under Assumption 4 and the assumptions of Theorem 2.1, the following algorithm is optimal in the average case, with $\mathbf{y}_{-1} = \mathbf{y}_0 = \mathbf{x}_0$ :
676
+
677
+ $$
678
+ \boldsymbol {y} _ {t} = a _ {t} \boldsymbol {y} _ {t - 1} + (1 - a _ {t}) \boldsymbol {y} _ {t - 2} + b _ {t} F (\boldsymbol {y} _ {t - 1})
679
+ $$
680
+
681
+ $$
682
+ \boldsymbol {x} _ {t} = \frac {B _ {t}}{B _ {t} + \beta_ {t}} \boldsymbol {x} _ {t - 1} + \frac {\beta_ {t}}{B _ {t} + \beta_ {t}} \boldsymbol {y} _ {t}, \quad \beta_ {t} = \phi_ {t} ^ {2} (0), \quad B _ {t} = B _ {t - 1} + \beta_ {t - 1}, \quad B _ {0} = 0. \tag {16}
683
+ $$
684
+
685
+ where $(\phi_k(0))_{k\geq 0}$ can be computed using the three-term recurrence (upon normalization). Moreover, $\mathbb{E}_{(\pmb {A},\pmb{x}^{\star},\pmb{x}_0)}\mathrm{dist}(\pmb {x}_t,\mathcal{X}^\star)$ converges to zero at rate $1 / B_{t}$
686
+
687
+ Proof. We prove by induction that
688
+
689
+ $$
690
+ \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} = \frac {\sum_ {k = 0} ^ {t} \phi_ {k} (\boldsymbol {A}) \phi_ {k} (0) ^ {*}}{\sum_ {k = 0} ^ {t} \phi_ {k} (0) ^ {2}} \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \tag {47}
691
+ $$
692
+
693
+ The base step $t = 0$ holds trivially because $\phi_0 = 1$ . Assume that (47) holds for $t - 1$ . Subtracting $x^{\star}$ from (16), we have
694
+
695
+ $$
696
+ \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} = \frac {\sum_ {k = 0} ^ {t - 1} \phi_ {k} (0) ^ {2}}{\sum_ {k = 0} ^ {t} \phi_ {k} (0) ^ {2}} \left(\boldsymbol {x} _ {t - 1} - \boldsymbol {x} ^ {\star}\right) + \frac {\phi_ {t} (0) ^ {2}}{\sum_ {k = 0} ^ {t} \phi_ {k} (0) ^ {2}} \left(\boldsymbol {y} _ {t} - \boldsymbol {x} ^ {\star}\right) \tag {48}
697
+ $$
698
+
699
+ If
700
+
701
+ $$
702
+ \phi_ {t} (0) ^ {2} \left(\boldsymbol {y} _ {t} - \boldsymbol {x} ^ {\star}\right) = \phi_ {t} (0) \phi_ {t} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right), \tag {49}
703
+ $$
704
+
705
+ by the induction hypothesis for $t - 1$ and (48), we have
706
+
707
+ $$
708
+ \begin{array}{l} \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} = \frac {\sum_ {k = 0} ^ {t - 1} \phi_ {t} (0) \phi_ {t} (\boldsymbol {A})}{\sum_ {k = 0} ^ {t} \phi_ {k} (0) ^ {2}} \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) + \frac {\phi_ {t} (0) \phi_ {t} (\boldsymbol {A})}{\sum_ {k = 0} ^ {t} \phi_ {k} (0) ^ {2}} \left(x _ {0} - x _ {*}\right) \tag {50} \\ = \frac {\sum_ {k = 0} ^ {t} \phi_ {t} (0) \phi_ {t} (\boldsymbol {A})}{\sum_ {k = 0} ^ {t} \phi_ {k} (0) ^ {2}} (x _ {0} - x _ {*}), \\ \end{array}
709
+ $$
710
+
711
+ which concludes the proof of (47). The only thing left is to show (49), again by induction. The base case follows readily from $\pmb{y}_0 = \pmb{x}_0$ in (16). Dividing by $\phi_t(0)^2$ , we rewrite (49) as
712
+
713
+ $$
714
+ \boldsymbol {y} _ {t} - \boldsymbol {x} ^ {\star} = \frac {\phi_ {t} (\boldsymbol {A})}{\phi_ {t} (0)} \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) = \psi_ {t} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right), \tag {51}
715
+ $$
716
+
717
+ where $\psi_t$ is the $t$ -th orthogonal residual polynomial of sequence. By Assumption 4, $\psi_t$ must satisfy the recurrence in (15). If we subtract $x_*$ from the second line of (16), we apply the induction hypothesis and then the recurrence in (15), we obtain
718
+
719
+ $$
720
+ \begin{array}{l} \boldsymbol {y} _ {t} - \boldsymbol {x} ^ {\star} = a _ {t} \left(\boldsymbol {y} _ {t - 1} - \boldsymbol {x} ^ {\star}\right) + \left(1 - a _ {t}\right) \left(\boldsymbol {y} _ {t - 2} - \boldsymbol {x} ^ {\star}\right) + b _ {t} F \left(\boldsymbol {y} _ {t - 1}\right) \\ = a _ {t} \left(\boldsymbol {y} _ {t - 1} - \boldsymbol {x} ^ {\star}\right) + \left(1 - a _ {t}\right) \left(\boldsymbol {y} _ {t - 2} - \boldsymbol {x} ^ {\star}\right) + b _ {t} \boldsymbol {A} \left(\boldsymbol {y} _ {t - 1} - \boldsymbol {x} _ {*}\right) \\ = a _ {t} \psi_ {t - 1} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) + (1 - a _ {t}) \psi_ {t - 2} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) + b _ {t} \boldsymbol {A} \psi_ {t - 1} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \tag {52} \\ = \psi_ {t} (\boldsymbol {A}) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right), \\ \end{array}
721
+ $$
722
+
723
+ thus concluding the proof of (49).
724
+
725
+ ![](images/ddbdf6b720a26c59661c0cb92463857343c989b33c8e74a3ffa20a551ab723ed.jpg)
726
+
727
+ Proposition C.1. Suppose that Assumption 5 holds with $C = 0$ , that is, the circular support of $\mu$ is centered at 0. Then, the basis of orthonormal polynomials for the scalar product
728
+
729
+ $$
730
+ \langle P, Q \rangle = \int_ {D _ {R, 0}} P (\lambda) Q (\lambda) ^ {*} \mathrm {d} \mu (\lambda) \quad i s \quad \phi_ {k} (\lambda) = \frac {\lambda^ {k}}{D _ {k , R}}, \quad \forall k \geq 0, \tag {53}
731
+ $$
732
+
733
+ where $K_{k,R} = \sqrt{2\pi\int_0^Rr^{2k}d\mu_R(r)}$
734
+
735
+ Proof. First, we will show that if $\mu$ satisfies Assumption 5 with $C = 0$ , then $\langle \lambda^i, \lambda^j \rangle = 0$ if $j, k \geq 0$ with $j \neq k$ (without loss of generality, suppose that $j > k$ ).
736
+
737
+ $$
738
+ \begin{array}{l} \langle \lambda^ {j}, \lambda^ {k} \rangle = \int_ {D _ {R, 0}} \lambda^ {j} \left(\lambda^ {*}\right) ^ {k} d \mu (\lambda) = \int_ {D _ {R, 0}} \lambda^ {j - k} | \lambda | ^ {2 k} d \mu (\lambda) \\ = \int_ {0} ^ {R} \frac {1}{2 \pi} \int_ {0} ^ {2 \pi} (r e ^ {i \theta}) ^ {j - k} r ^ {2 k} \mathrm {d} \theta \mathrm {d} \mu_ {R} (r) = \frac {1}{2 \pi} \int_ {0} ^ {2 \pi} e ^ {i \theta (j - k)} \mathrm {d} \theta \int_ {0} ^ {R} r ^ {j + k} \mathrm {d} \mu_ {R} (r) \tag {54} \\ = \frac {e ^ {i 2 \pi} - 1}{2 \pi i (j - k)} \int_ {0} ^ {R} r ^ {j + k} \mathrm {d} \mu_ {R} (r) = 0 \\ \end{array}
739
+ $$
740
+
741
+ And for all $k\geq 0$
742
+
743
+ $$
744
+ \langle \lambda^ {k}, \lambda^ {k} \rangle = \int_ {D _ {R, 0}} | \lambda^ {k} | ^ {2} \mathrm {d} \mu (\lambda) = \int_ {0} ^ {R} \frac {1}{2 \pi} \int_ {0} ^ {2 \pi} r ^ {2 k} \mathrm {d} \theta \mathrm {d} \mu_ {R} (r) = \int_ {0} ^ {2 \pi} r ^ {2 k} \mathrm {d} \mu_ {R} (r). \tag {55}
745
+ $$
746
+
747
+ ![](images/3ff7dd55f0fd6f067ac71ded079425782f02e58e2e7cef5bb5c0ab98473f5924.jpg)
748
+
749
+ Proposition 4.1. If $\mu$ satisfies Assumption 5, the sequence of orthonormal polynomials is $(\phi_t)_{t\geq 0}$
750
+
751
+ $$
752
+ \phi_ {t} (\lambda) = \frac {(\lambda - C) ^ {t}}{K _ {t , R}}, \text {w h e r e} K _ {t, R} = \sqrt {\int_ {0} ^ {R} r ^ {2 t} \mathrm {d} \mu_ {R} (r)}. \tag {17}
753
+ $$
754
+
755
+ Proof. The result follows from Proposition C.1 using the change of variables $z \to z + C$ . To compute the measure $\mu_R$ for the uniform measure on $D_{C,R}$ , we perform a change of variables to circular coordinates:
756
+
757
+ $$
758
+ \begin{array}{l} \int_ {D _ {C, R}} f (\lambda) \mathrm {d} \mu (\lambda) = \frac {1}{\pi R ^ {2}} \int_ {0} ^ {R} \int_ {0} ^ {2 \pi} f (C + r e ^ {i \theta}) r \mathrm {d} \theta \mathrm {d} r = \int_ {0} ^ {R} \int_ {0} ^ {2 \pi} f (C + r e ^ {i \theta}) \mathrm {d} \theta \mathrm {d} \mu_ {R} (r). \\ \Rightarrow \mathrm {d} \mu_ {R} (r) = \frac {r}{\pi R ^ {2}} \mathrm {d} r \tag {56} \\ \end{array}
759
+ $$
760
+
761
+ And
762
+
763
+ $$
764
+ \int_ {0} ^ {R} r ^ {2 t} \mathrm {d} \mu_ {R} (r) = \frac {1}{\pi R ^ {2}} \int_ {0} ^ {R} r ^ {2 t + 1} \mathrm {d} r = \frac {1}{\pi} \frac {R ^ {2 t}}{2 t + 2} \Rightarrow K _ {t, R} = R ^ {t} / \sqrt {t + 1}. \tag {57}
765
+ $$
766
+
767
+ ![](images/2c6b973f979e018eab001056596cccd6456f2712fd507689d8c23fcb4294950d.jpg)
768
+
769
+ Theorem 4.2. Given an initialization $\pmb{x}_0(\pmb{y}_0 = \pmb{x}_0)$ , if Assumption 5 is fulfilled with $R < C$ and the assumptions of Theorem 2.1 hold, then the average-case optimal first-order method is
770
+
771
+ $$
772
+ \boldsymbol {y} _ {t} = \boldsymbol {y} _ {t - 1} - \frac {1}{C} F (\boldsymbol {y} _ {t - 1}), \quad \beta_ {t} = C ^ {2 t} / K _ {t, R} ^ {2}, \quad B _ {t} = B _ {t - 1} + \beta_ {t - 1},
773
+ $$
774
+
775
+ $$
776
+ \boldsymbol {x} _ {t} = \frac {B _ {t}}{B _ {t} + \beta_ {t}} \boldsymbol {x} _ {t - 1} + \frac {\beta_ {t}}{B _ {t} + \beta_ {t}} \boldsymbol {y} _ {t}. \tag {18}
777
+ $$
778
+
779
+ Moreover, $\mathbb{E}_{(A,\pmb{x}^{\star},\pmb{x}_0)}\mathrm{dist}(\pmb{x}_t,\mathcal{X}^\star)$ converges to zero at rate $1 / B_{t}$ .
780
+
781
+ Proof. By Proposition 4.1, the sequence of residual orthogonal polynomials is given by $\psi_t(\lambda) = \phi_t(\lambda) / \phi_t(0) = \left(1 - \frac{\lambda}{C}\right)^t$ . Hence, Assumption 4 is fulfilled with $a_t = 1, b_t = -\frac{1}{C}$ , as $\psi_t(\lambda) = \psi_{t-1}(\lambda) - \frac{\lambda}{C} \psi_{t-1}(\lambda)$ . We apply Theorem 4.1 and make use of the fact that $\phi_k(0)^2 = \frac{C^{2k}}{K_{t,R}^2}$ . See Proposition D.3 for the rate on $\mathrm{dist}(\boldsymbol{x}_t, \mathcal{X}^\star)$ .
782
+
783
+ # D PROOF OF PROPOSITION 5.2
784
+
785
+ Proposition D.1. Suppose that the assumptions of Theorem 4.2 hold with the probability measure $\mu_R$ fulfilling $\mu_R([r,R]) = \Omega((R - r)^\kappa)$ for $r$ in $[r_0,R]$ for some $r_0 \in [0,R)$ and for some $\kappa \in \mathbb{Z}$ . Then,
786
+
787
+ $$
788
+ \lim _ {t \rightarrow \infty} \frac {\frac {C ^ {2 t}}{K _ {t , R} ^ {2}}}{\sum_ {k = 0} ^ {t} \frac {C ^ {2 k}}{K _ {k , R} ^ {2}}} = 1 - \frac {R ^ {2}}{C ^ {2}}. \tag {58}
789
+ $$
790
+
791
+ Proof. Given $\epsilon > 0$ , let $c_{\epsilon} \in \mathbb{Z}_{\geq 0}$ be the minimum such that
792
+
793
+ $$
794
+ \frac {1}{\sum_ {i = 0} ^ {c _ {\epsilon}} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {i}} \leq (1 + \epsilon) \frac {1}{\sum_ {i = 0} ^ {\infty} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {i}} = (1 + \epsilon) \left(1 - \frac {R ^ {2}}{C ^ {2}}\right) \tag {59}
795
+ $$
796
+
797
+ Define $Q_{t,R} \stackrel{\mathrm{def}}{=} \frac{R^{2t}}{K_{t,R}^2}$ . Then,
798
+
799
+ $$
800
+ \frac {\frac {C ^ {2 t}}{K _ {t , R} ^ {2}}}{\sum_ {k = 0} ^ {t} \frac {C ^ {2 k}}{K _ {k , R} ^ {2}}} = \frac {\frac {C ^ {2 t}}{R ^ {2 t}} Q _ {t , R}}{\sum_ {k = 0} ^ {t} \frac {C ^ {2 k}}{R ^ {2 k}} Q _ {k , R}} = \frac {Q _ {t , R}}{\sum_ {k = 0} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k} Q _ {k , R}} \tag {60}
801
+ $$
802
+
803
+ Now, on one hand, using that $Q_{t,R}$ is an increasing sequence on $t$ ,
804
+
805
+ $$
806
+ \frac {Q _ {t , R}}{\sum_ {k = 0} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k} Q _ {k , R}} \geq \frac {1}{\sum_ {k = 0} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k}} \geq \frac {1}{\sum_ {k = 0} ^ {\infty} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {k}} = 1 - \frac {R ^ {2}}{C ^ {2}} \tag {61}
807
+ $$
808
+
809
+ On the other hand, for $t \geq c_{\epsilon}$ ,
810
+
811
+ $$
812
+ \frac {Q _ {t , R}}{\sum_ {k = 0} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k} Q _ {k , R}} \leq \frac {Q _ {t , R}}{\sum_ {k = t - c _ {\epsilon}} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k} Q _ {k , R}} = \frac {Q _ {t , R}}{\sum_ {k = t - c _ {\epsilon}} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k} \left(Q _ {t , R} - \int_ {k} ^ {t} \frac {d}{d s} Q _ {s , R} d s\right)} \tag {62}
813
+ $$
814
+
815
+ Thus, we want to upper-bound $\int_{k}^{t}\frac{d}{ds} Q_{s,R}\mathrm{d}s$ . First, notice that
816
+
817
+ $$
818
+ \frac {d}{d s} Q _ {s, R} = \frac {d}{d s} \left(\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \mathrm {d} \mu_ {R} (r)\right) ^ {- 1} = \frac {\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \left(- \log \left(\frac {r}{R}\right)\right) \mathrm {d} \mu_ {R} (r)}{\left(\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \mathrm {d} \mu_ {R} (r)\right) ^ {2}} \tag {63}
819
+ $$
820
+
821
+ By concavity of the logarithm function we obtain $\log \left(\frac{R}{r}\right) \leq \frac{R}{r_0} - 1$ for $r \in [r_0, R]$ . Choose $r_0$ close enough to $R$ so that $\frac{R}{r_0} - 1 \leq \epsilon / c_{\epsilon}$ . We obtain that
822
+
823
+ $$
824
+ \int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \log \left(\frac {R}{r}\right) d \mu_ {R} (r) \leq \int_ {0} ^ {r _ {0}} \left(\frac {r}{R}\right) ^ {2 s} \log \left(\frac {R}{r}\right) d \mu_ {R} (r) + \int_ {r _ {0}} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \left(\frac {R}{r _ {0}} - 1\right) d \mu_ {R} (r). \tag {64}
825
+ $$
826
+
827
+ Thus,
828
+
829
+ $$
830
+ \int_ {k} ^ {t} \frac {d}{d s} Q _ {s, R} \mathrm {d} s \leq \int_ {k} ^ {t} \frac {\int_ {0} ^ {r _ {0}} \left(\frac {r}{R}\right) ^ {2 s} \log \left(\frac {R}{r}\right) \mathrm {d} \mu_ {R} (r)}{\left(\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \mathrm {d} \mu_ {R} (r)\right) ^ {2}} \mathrm {d} s + \int_ {k} ^ {t} \frac {\int_ {r _ {0}} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \left(\frac {R}{r _ {0}} - 1\right) \mathrm {d} \mu_ {R} (r)}{\left(\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \mathrm {d} \mu_ {R} (r)\right) ^ {2}} \mathrm {d} s. \tag {65}
831
+ $$
832
+
833
+ Using that $\log x\leq x$ , for $k\in [t - c_{\epsilon},t]$ we can bound the first term of (65) as
834
+
835
+ $$
836
+ \begin{array}{l} \int_ {k} ^ {t} \frac {\int_ {0} ^ {r _ {0}} \left(\frac {r}{R}\right) ^ {2 s} \log \left(\frac {R}{r}\right) \mathrm {d} \mu_ {R} (r)}{\left(\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \mathrm {d} \mu_ {R} (r)\right) ^ {2}} \mathrm {d} s \leq \int_ {k} ^ {t} \frac {\int_ {0} ^ {r _ {0}} \left(\frac {r}{R}\right) ^ {2 s - 1} \mathrm {d} \mu_ {R} (r)}{\left(\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \mathrm {d} \mu_ {R} (r)\right) ^ {2}} \mathrm {d} s \\ \leq (t - k) \frac {\left(\frac {r _ {0}}{R}\right) ^ {2 k - 1}}{\left(\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 t} \mathrm {d} \mu_ {R} (r)\right) ^ {2}} \tag {66} \\ \leq c _ {\epsilon} \left(\frac {r _ {0}}{R}\right) ^ {2 (t - c _ {\epsilon}) - 1} Q _ {t, R} ^ {2} \\ \leq c _ {\epsilon} \left(\frac {r _ {0}}{R}\right) ^ {2 (t - c _ {\epsilon}) - 1} \frac {1}{(c _ {1}) ^ {2}} (2 t + 1) ^ {2 \kappa} \xrightarrow {t \to \infty} 0. \\ \end{array}
837
+ $$
838
+
839
+ In the last inequality we use that by Proposition D.2, for $t$ large enough, $Q_{t,R} = \frac{R^{2t}}{K_{t,R}^2} \leq (2t + 1)^k / c_1$ . For $k \in [t - c_\epsilon, t]$ , the second term of (65) can be bounded as
840
+
841
+ $$
842
+ \begin{array}{l} \int_ {k} ^ {t} \frac {\int_ {r _ {0}} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \frac {R}{r _ {0}} \mathrm {d} \mu_ {R} (r)}{\left(\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 s} \mathrm {d} \mu_ {R} (r)\right) ^ {2}} \mathrm {d} s \leq (t - k) \left(\frac {R}{r _ {0}} - 1\right) \frac {1}{\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 t} \mathrm {d} \mu_ {R} (r)} \\ \leq c _ {\epsilon} \left(\frac {R}{r _ {0}} - 1\right) \frac {1}{\int_ {0} ^ {R} \left(\frac {r}{R}\right) ^ {2 t} \mathrm {d} \mu_ {R} (r)} \tag {67} \\ \le \epsilon Q _ {t, R}. \\ \end{array}
843
+ $$
844
+
845
+ From (65), (66) and (67), we obtain that for $t$ large enough, for $k \in [t - c_{\epsilon}, t]$ ,
846
+
847
+ $$
848
+ \int_ {k} ^ {t} \frac {d}{d s} Q _ {s, R} d s \leq 2 \epsilon Q _ {t, R}. \tag {68}
849
+ $$
850
+
851
+ Hence, we can bound the right-hand side of (62):
852
+
853
+ $$
854
+ \begin{array}{l} \frac {Q _ {t , R}}{\sum_ {k = t - c _ {\epsilon}} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k} \left(Q _ {t , R} - \int_ {k} ^ {t} \frac {d}{d s} Q _ {s , R} d s\right)} \leq \frac {Q _ {t , R}}{\sum_ {k = t - c _ {\epsilon}} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k} \left(Q _ {t , R} - 2 \epsilon Q _ {t , R}\right)} \tag {69} \\ = \frac {1}{(1 - 2 \epsilon) \sum_ {k = t - c _ {\epsilon}} ^ {t} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {t - k}} = \frac {1}{(1 - 2 \epsilon) \sum_ {k = 0} ^ {c _ {\epsilon}} \left(\frac {R ^ {2}}{C ^ {2}}\right) ^ {k}} \leq \frac {1 + \epsilon}{1 - 2 \epsilon} \left(1 - \frac {R ^ {2}}{C ^ {2}}\right). \\ \end{array}
855
+ $$
856
+
857
+ The last inequality follows from the definition of $c_{\epsilon}$ in (59). Since $\epsilon$ is arbitrary, by the sandwich theorem applied on (60), (61) and (69),
858
+
859
+ $$
860
+ \lim _ {t \rightarrow \infty} \frac {\frac {C ^ {2 t}}{K _ {t , R} ^ {2}}}{\sum_ {k = 0} ^ {t} \frac {C ^ {2 k}}{K _ {k , R} ^ {2}}} = 1 - \frac {R ^ {2}}{C ^ {2}}. \tag {70}
861
+ $$
862
+
863
+ ![](images/7e9d653274ae9687fd6ab6de2600da77f75481df669b1aebec5bd3f497d40c56.jpg)
864
+
865
+ Proposition D.2. Under the assumptions of Theorem 4.2, we have that there exists $c_{1} > 0$ such that for $t$ large enough,
866
+
867
+ $$
868
+ K _ {t, R} ^ {- 2} \geq c _ {1} R ^ {2 t} (2 t + 1) ^ {- \kappa}. \tag {71}
869
+ $$
870
+
871
+ Proof. By the assumption on $\mu_R$ , there exist $r_0, c_1, \kappa > 0$ such that
872
+
873
+ $$
874
+ \begin{array}{l} K _ {t, R} ^ {2} \stackrel {\mathrm {d e f}} {=} 2 \pi \int_ {0} ^ {R} r ^ {2 t} \mathrm {d} \mu_ {R} (r) = 2 \pi \int_ {0} ^ {r _ {0}} r ^ {2 t} \mathrm {d} \mu_ {R} (r) + 2 \pi \int_ {r _ {0}} ^ {R} r ^ {2 t} \mathrm {d} \mu_ {R} (r) \\ \geq 2 \pi c _ {1} \int_ {r _ {0}} ^ {R} r ^ {2 t} (R - r) ^ {\kappa - 1} \mathrm {d} r = - 2 \pi c _ {1} \int_ {0} ^ {r _ {0}} r ^ {2 t} (R - r) ^ {\kappa - 1} \mathrm {d} r + 2 \pi c _ {1} \int_ {0} ^ {R} r ^ {2 t} (R - r) ^ {\kappa - 1} \mathrm {d} r \\ \geq - 2 \pi c _ {1} R r _ {0} ^ {2 t} + 2 \pi c _ {1} R ^ {2 t + \kappa} B (2 t + 1, \kappa). \tag {72} \\ \end{array}
875
+ $$
876
+
877
+ where the beta function $B(x,y)$ is defined as
878
+
879
+ $$
880
+ B (x, y) \stackrel {\text {d e f}} {=} \int_ {0} ^ {1} r ^ {x + 1} (1 - r) ^ {y + 1} \mathrm {d} r. \tag {73}
881
+ $$
882
+
883
+ Using the link between the beta function and the gamma function $B(x,y) = \Gamma (x)\Gamma (y) / \Gamma (x + y)$ , and Stirling's approximation, we obtain that for fixed $y$ and large $x$ ,
884
+
885
+ $$
886
+ B (x, y) \sim \Gamma (y) x ^ {- y}. \tag {74}
887
+ $$
888
+
889
+ Hence, for $t$ large enough, $B(2t + 1,\kappa)\sim \Gamma (\kappa)(2t + 1)^{-\kappa} = (\kappa -1)!(2t + 1)^{-\kappa}$ . Hence, from (72) we obtain that there exist $c_{1}^{\prime}$ depending only on $\kappa$ and $r_0$ such that for $t$ large enough
890
+
891
+ $$
892
+ K _ {t, R} ^ {2} \geq - 2 \pi c _ {1} R r _ {0} ^ {2 t} + 2 \pi c _ {1} R ^ {2 t + \kappa} (k - 1)! (2 t + 1) ^ {- \kappa} \geq c _ {1} ^ {\prime} R ^ {2 t} (2 t + 1) ^ {- \kappa}. \tag {75}
893
+ $$
894
+
895
+ ![](images/7bfe370b70dcd593b78602bdf21d5d6ed327de7142bf4a99982d9a9a261068f5.jpg)
896
+
897
+ Proposition 5.2. Suppose that the assumptions of Theorem 4.2 hold with $\mu_R \in \mathcal{P}([0, R])$ fulfilling $\mu_R([r, R]) = \Omega((R - r)^\kappa)$ for $r$ in $[r_0, R]$ for some $r_0 \in [0, R)$ and for some $\kappa \in \mathbb{Z}$ . Then, the average-case asymptotically optimal algorithm is, with $\pmb{y}_0 = \pmb{x}_0$ :
898
+
899
+ $$
900
+ \boldsymbol {y} _ {t} = \boldsymbol {y} _ {t - 1} - \frac {1}{C} F (\boldsymbol {y} _ {t - 1}),
901
+ $$
902
+
903
+ $$
904
+ \boldsymbol {x} _ {t} = \left(\frac {R}{C}\right) ^ {2} \boldsymbol {x} _ {t - 1} + \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) \boldsymbol {y} _ {t}. \tag {20}
905
+ $$
906
+
907
+ Moreover, the convergence rate for this algorithm is asymptotically the same one as for the optimal algorithm in Theorem 4.2. Namely, $\lim_{t\to \infty}\mathbb{E}\left[\mathrm{dist}(\pmb{x}_t,\mathcal{X}^\star)\right]B_t = 1$
908
+
909
+ Proof. The proof follows directly from Theorem 4.2 and Proposition D.1. See (77) and (79) in Proposition D.3 for the statement regarding the convergence rate.
910
+
911
+ Proposition D.3. For the average-case optimal algorithm (18),
912
+
913
+ $$
914
+ \mathbb {E} \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \boldsymbol {\chi} ^ {\star}\right) = \xi_ {o p t} (t) \stackrel {\text {d e f}} {=} \frac {1}{\sum_ {k = 0} ^ {t} \frac {C ^ {2 k}}{K _ {k , R} ^ {2}}} \tag {76}
915
+ $$
916
+
917
+ For the average-case asymptotically optimal algorithm (20),
918
+
919
+ $$
920
+ \mathbb {E} \operatorname {d i s t} \left(\boldsymbol {x} _ {t}, \boldsymbol {\chi} ^ {\star}\right) = \xi_ {\text {a s y m p}} (t) \stackrel {\text {d e f}} {=} \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) ^ {2} \sum_ {k = 1} ^ {t} \frac {K _ {k , R} ^ {2}}{C ^ {2 k}} \left(\frac {R}{C}\right) ^ {4 (t - k)} + \left(\frac {R}{C}\right) ^ {4 t} \tag {77}
921
+ $$
922
+
923
+ For the iterates $\mathbf{y}_t$ in (18), i.e. gradient descent with stepsize $1 / C$ , we have
924
+
925
+ $$
926
+ \mathbb {E} \operatorname {d i s t} \left(\boldsymbol {y} _ {t}, \mathcal {X} ^ {\star}\right) = \xi_ {G D} (t) \stackrel {\text {d e f}} {=} \frac {K _ {t , R} ^ {2}}{C ^ {2 t}} \tag {78}
927
+ $$
928
+
929
+ Moreover, for all $t \geq 0$ , we have $\xi_{opt}(t) \leq \xi_{asymp}(t)$ , and under the assumptions of (5.1),
930
+
931
+ $$
932
+ \lim _ {t \rightarrow \infty} \frac {\xi_ {o p t} (t)}{\xi_ {a s y m p} (t)} = 1, \quad \lim _ {t \rightarrow \infty} \frac {\xi_ {o p t} (t)}{\xi_ {G D} (t)} = \frac {\xi_ {a s y m p} (t)}{\xi_ {G D} (t)} = 1 - \left(\frac {R}{C}\right) ^ {2} \tag {79}
933
+ $$
934
+
935
+ Proof. To show (76), (77), (78), we use the expression $\pmb{x}_t - \pmb{x}^\star = P_t(\pmb{A})(\pmb{x}_0 - \pmb{x}^\star)$ (Proposition 2.1) and then evaluate $\| P_t\|_\mu^2 = \int_{\mathbb{C}\setminus \{0\}}|P_t|^2 \, \mathrm{d}\mu$ (Theorem 2.1).
936
+
937
+ For (76), the value of $\| P_t\|_\mu^2$ follows directly from Theorem 2.3, which states that the value for the optimal residual polynomial $P_{t}$ is
938
+
939
+ $$
940
+ \frac {1}{\sum_ {k = 0} ^ {t} | \phi_ {k} (0) | ^ {2}} = \frac {1}{\sum_ {k = 0} ^ {t} \frac {C ^ {2 k}}{K _ {k , R} ^ {2}}}. \tag {80}
941
+ $$
942
+
943
+ A simple proof by induction shows that for the asymptotically optimal algorithm (20), the following expression holds for all $t \geq 0$ :
944
+
945
+ $$
946
+ \boldsymbol {x} _ {t} - \boldsymbol {x} ^ {\star} = \left(\left(\frac {R}{C}\right) ^ {2 t} + \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) \sum_ {k = 1} ^ {t} \left(1 - \frac {\boldsymbol {A}}{C}\right) ^ {k} \left(\frac {R}{C}\right) ^ {2 (t - k)}\right) \left(\boldsymbol {x} _ {0} - \boldsymbol {x} ^ {\star}\right) \tag {81}
947
+ $$
948
+
949
+ Thus,
950
+
951
+ $$
952
+ \begin{array}{l} P _ {t} (\lambda) = \left(\frac {R}{C}\right) ^ {2 t} + \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) \sum_ {k = 1} ^ {t} \left(1 - \frac {\lambda}{C}\right) ^ {k} \left(\frac {R}{C}\right) ^ {2 (t - k)} \tag {82} \\ = \left(\frac {R}{C}\right) ^ {2 t} \phi_ {0} (\lambda) + \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) \sum_ {k = 1} ^ {t} \frac {K _ {k , R}}{C ^ {k}} \phi_ {k} (\lambda) \left(\frac {R}{C}\right) ^ {2 (t - k)}, \\ \end{array}
953
+ $$
954
+
955
+ which concludes the proof of (77), as
956
+
957
+ $$
958
+ \left\| P _ {t} \right\| _ {\mu} ^ {2} = \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) ^ {2} \sum_ {k = 1} ^ {t} \frac {K _ {k , R} ^ {2}}{C ^ {2 k}} \left(\frac {R}{C}\right) ^ {4 (t - k)} + \left(\frac {R}{C}\right) ^ {4 t}. \tag {83}
959
+ $$
960
+
961
+ By equation (52),
962
+
963
+ $$
964
+ \boldsymbol {y} _ {t} - \boldsymbol {x} ^ {\star} = \left(1 - \frac {\boldsymbol {A}}{C}\right) ^ {t} \left(\boldsymbol {y} _ {0} - \boldsymbol {x} ^ {\star}\right) = \frac {K _ {t , R}}{C ^ {t}} \phi_ {k} (\boldsymbol {A}) \left(\boldsymbol {y} _ {0} - \boldsymbol {x} ^ {\star}\right) \tag {84}
965
+ $$
966
+
967
+ Thus, for the $\mathbf{y}_t$ iterates, $\| P_t\|_\mu^2 = \frac{K_{t,R}^2}{C^{2t}}$ , and (78) follows.
968
+
969
+ Now, $\xi_{\mathrm{opt}}(t) \leq \xi_{\mathrm{asymp}}(t), \forall t \geq 0$ is a consequence of $\xi_{\mathrm{opt}}(t)$ being the rate of the optimal algorithm. And
970
+
971
+ $$
972
+ \lim _ {t \rightarrow \infty} \frac {\xi_ {\mathrm {o p t}} (t)}{\xi_ {\mathrm {G D}} (t)} = \lim _ {t \rightarrow \infty} \frac {\frac {C ^ {2 t}}{K _ {t , R} ^ {2}}}{\sum_ {k = 0} ^ {t} \frac {C ^ {2 k}}{K _ {k , R} ^ {2}}} = 1 - \frac {R ^ {2}}{C ^ {2}} \tag {85}
973
+ $$
974
+
975
+ follows from Proposition D.1. To show $\lim_{t\to \infty}\frac{\xi_{\mathrm{opt}}(t)}{\xi_{\mathrm{GD}}(t)} = 1 - \frac{R^2}{C^2}$ , which concludes the proof, we rewrite
976
+
977
+ $$
978
+ \xi_ {\text {a s y m p}} (t) = \left(\frac {R}{C}\right) ^ {2 t} \left(\left(1 - \left(\frac {R}{C}\right) ^ {2}\right) ^ {2} \sum_ {k = 1} ^ {t} \frac {1}{Q _ {k , R}} \left(\frac {R}{C}\right) ^ {2 (t - k)} + \left(\frac {R}{C}\right) ^ {2 t}\right), \tag {86}
979
+ $$
980
+
981
+ using that by definition, $Q_{k,R} = R^{2k} / K_{k,R}^2$ . Now, let $c_{\epsilon} \in \mathbb{Z}_{\geq 0}$ such that
982
+
983
+ $$
984
+ \sum_ {k = c _ {\epsilon}} ^ {\infty} \left(\frac {R}{C}\right) ^ {2 k} \leq \epsilon . \tag {87}
985
+ $$
986
+
987
+ Using the same argument as in Proposition D.1 (see (68)), for $t$ large enough and $k \in [t - c_{\epsilon}, t]$ ,
988
+
989
+ $$
990
+ \int_ {k} ^ {t} \frac {d}{d s} Q _ {s, R} d s \leq 2 \epsilon Q _ {t, R}. \tag {88}
991
+ $$
992
+
993
+ Hence, for $t$ large enough,
994
+
995
+ $$
996
+ \begin{array}{l} \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) ^ {2} \sum_ {k = 1} ^ {t} \frac {1}{Q _ {k , R}} \left(\frac {R}{C}\right) ^ {2 (t - k)} + \left(\frac {R}{C}\right) ^ {2 t} \\ = \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) ^ {2} \left(\sum_ {k = t - c _ {\epsilon}} ^ {t} \frac {1}{Q _ {t , R} - \int_ {k} ^ {t} \frac {d}{d s} Q _ {s , R}} \left(\frac {R}{C}\right) ^ {2 (t - k)} + \sum_ {k = 1} ^ {t - c _ {\epsilon}} \frac {1}{Q _ {k , R}} \left(\frac {R}{C}\right) ^ {2 (t - k)}\right) + \left(\frac {R}{C}\right) ^ {2 t} \\ \leq \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) ^ {2} \left(\frac {1}{(1 - 2 \epsilon) Q _ {t , R}} \sum_ {k = t - c _ {\epsilon}} ^ {t} \left(\frac {R}{C}\right) ^ {2 (t - k)} + \sum_ {k = 1} ^ {t - c _ {\epsilon}} \left(\frac {R}{C}\right) ^ {2 (t - k)}\right) + \epsilon \\ \leq \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) \left(\frac {1}{(1 - 2 \epsilon) Q _ {t , R}} + \left(1 - \left(\frac {R}{C}\right) ^ {2}\right) \epsilon\right) + \epsilon , \tag {89} \\ \end{array}
997
+ $$
998
+
999
+ which can be made arbitrarily close to $\left(1 - \left(\frac{R}{C}\right)^2\right)\frac{1}{Q_{t,R}}$ by taking $\epsilon > 0$ small enough. Plugging this into (86), we obtain that we can make $\xi_{\mathrm{asymp}}(t)$ arbitrarily close to $\left(1 - \left(\frac{R}{C}\right)^2\right)\left(\frac{R}{C}\right)^{2t}\frac{1}{Q_{t,R}} = \left(1 - \left(\frac{R}{C}\right)^2\right)\xi_{\mathrm{GD}}(t)$ by taking $t$ large enough.
averagecaseaccelerationforbilineargamesandnormalmatrices/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c62bd8349adbc9d9d35ad10c830aad5d11c26ffe8fe3c0d2edd7c6870a670adf
3
+ size 1321387
averagecaseaccelerationforbilineargamesandnormalmatrices/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:395f072646c7eb93d055c99c57c69e24abe6207ad45f7170dc8b1a68ff7b8775
3
+ size 1015280
bagoftricksforadversarialtraining/0e009d16-83be-41b1-8610-beb69bfecf8e_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bba8ff54b6646d8bba05dd67b75e002898d92de3a061710022b2c7352230d899
3
+ size 119972
bagoftricksforadversarialtraining/0e009d16-83be-41b1-8610-beb69bfecf8e_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abbd115ef5e78121f9f7798d521fd487c54b84136cd3a62fc352df1a8fb6f563
3
+ size 152380