SlowGuess commited on
Commit
0b24f85
·
verified ·
1 Parent(s): 8f75b11

Add Batch c2ddbeb7-30f6-4f38-9f90-8187ee90d316

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/e1fc6c9c-4fe3-4bc9-8957-c1f3dec8836f_content_list.json +3 -0
  2. redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/e1fc6c9c-4fe3-4bc9-8957-c1f3dec8836f_model.json +3 -0
  3. redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/e1fc6c9c-4fe3-4bc9-8957-c1f3dec8836f_origin.pdf +3 -0
  4. redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/full.md +296 -0
  5. redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/images.zip +3 -0
  6. redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/layout.json +3 -0
  7. reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/a4158dee-af5b-4014-9608-b144bb9ff1be_content_list.json +3 -0
  8. reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/a4158dee-af5b-4014-9608-b144bb9ff1be_model.json +3 -0
  9. reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/a4158dee-af5b-4014-9608-b144bb9ff1be_origin.pdf +3 -0
  10. reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/full.md +585 -0
  11. reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/images.zip +3 -0
  12. reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/layout.json +3 -0
  13. weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/f7f37953-e3be-4d76-9b64-a0fdf5d77a23_content_list.json +3 -0
  14. weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/f7f37953-e3be-4d76-9b64-a0fdf5d77a23_model.json +3 -0
  15. weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/f7f37953-e3be-4d76-9b64-a0fdf5d77a23_origin.pdf +3 -0
  16. weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/full.md +325 -0
  17. weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/images.zip +3 -0
  18. weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/layout.json +3 -0
  19. weightedroccurveincostspaceextendingauctocostsensitivelearning/f607b8a6-a90d-4725-8e04-9ef5dbbf4297_content_list.json +3 -0
  20. weightedroccurveincostspaceextendingauctocostsensitivelearning/f607b8a6-a90d-4725-8e04-9ef5dbbf4297_model.json +3 -0
  21. weightedroccurveincostspaceextendingauctocostsensitivelearning/f607b8a6-a90d-4725-8e04-9ef5dbbf4297_origin.pdf +3 -0
  22. weightedroccurveincostspaceextendingauctocostsensitivelearning/full.md +398 -0
  23. weightedroccurveincostspaceextendingauctocostsensitivelearning/images.zip +3 -0
  24. weightedroccurveincostspaceextendingauctocostsensitivelearning/layout.json +3 -0
  25. weitzmansruleforpandorasboxwithcorrelations/a7f0b3be-a11f-4a26-ba19-8c7b79febafe_content_list.json +3 -0
  26. weitzmansruleforpandorasboxwithcorrelations/a7f0b3be-a11f-4a26-ba19-8c7b79febafe_model.json +3 -0
  27. weitzmansruleforpandorasboxwithcorrelations/a7f0b3be-a11f-4a26-ba19-8c7b79febafe_origin.pdf +3 -0
  28. weitzmansruleforpandorasboxwithcorrelations/full.md +665 -0
  29. weitzmansruleforpandorasboxwithcorrelations/images.zip +3 -0
  30. weitzmansruleforpandorasboxwithcorrelations/layout.json +3 -0
  31. whatamessmultidomainevaluationofzeroshotsemanticsegmentation/e8b1c9b1-4de1-411f-af83-670ef5d9788e_content_list.json +3 -0
  32. whatamessmultidomainevaluationofzeroshotsemanticsegmentation/e8b1c9b1-4de1-411f-af83-670ef5d9788e_model.json +3 -0
  33. whatamessmultidomainevaluationofzeroshotsemanticsegmentation/e8b1c9b1-4de1-411f-af83-670ef5d9788e_origin.pdf +3 -0
  34. whatamessmultidomainevaluationofzeroshotsemanticsegmentation/full.md +225 -0
  35. whatamessmultidomainevaluationofzeroshotsemanticsegmentation/images.zip +3 -0
  36. whatamessmultidomainevaluationofzeroshotsemanticsegmentation/layout.json +3 -0
  37. whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/af41eb11-cc6a-4764-a585-c8ce802c8fc5_content_list.json +3 -0
  38. whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/af41eb11-cc6a-4764-a585-c8ce802c8fc5_model.json +3 -0
  39. whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/af41eb11-cc6a-4764-a585-c8ce802c8fc5_origin.pdf +3 -0
  40. whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/full.md +0 -0
  41. whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/images.zip +3 -0
  42. whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/layout.json +3 -0
  43. whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/aa5e9a5b-f5b2-469e-9199-387531b9ac3f_content_list.json +3 -0
  44. whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/aa5e9a5b-f5b2-469e-9199-387531b9ac3f_model.json +3 -0
  45. whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/aa5e9a5b-f5b2-469e-9199-387531b9ac3f_origin.pdf +3 -0
  46. whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/full.md +421 -0
  47. whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/images.zip +3 -0
  48. whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/layout.json +3 -0
  49. whatcanwelearnfromunlearnabledatasets/94126f6c-48a3-4232-a5d9-f65f5096c8e7_content_list.json +3 -0
  50. whatcanwelearnfromunlearnabledatasets/94126f6c-48a3-4232-a5d9-f65f5096c8e7_model.json +3 -0
redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/e1fc6c9c-4fe3-4bc9-8957-c1f3dec8836f_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b10754a7d11769e3f18677b2b714730bf8d78e9dbced51089e5943fffe6793a8
3
+ size 85913
redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/e1fc6c9c-4fe3-4bc9-8957-c1f3dec8836f_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5000611080751966fdc2ccf6fefedcd4c8dcd185f7226f12419e08d2c6b9f29
3
+ size 104344
redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/e1fc6c9c-4fe3-4bc9-8957-c1f3dec8836f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dee8a3ab8989991b750a1a3eb469b42839780b7e3c3f7324558b2c5873eadc2
3
+ size 18138770
redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/full.md ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ReDS: Offline Reinforcement Learning With Heteroskedastic Datasets via Support Constraints
2
+
3
+ Anikait Singh $^{1,*}$ , Aviral Kumar $^{1,*}$ , Quan Vuong $^{2}$ , Yevgen Chebotar $^{2}$ , Sergey Levine $^{1}$
4
+
5
+ $^{1}$ UC Berkeley, $^{2}$ Google DeepMind (*Equal contribution)
6
+
7
+ asap7772@berkeley.edu
8
+
9
+ # Abstract
10
+
11
+ Offline reinforcement learning (RL) learns policies entirely from static datasets. Practical applications of offline RL will inevitably require learning from datasets where the variability of demonstrated behaviors changes non-uniformly across the state space. For example, at a red light, nearly all human drivers behave similarly by stopping, but when merging onto a highway, some drivers merge quickly, efficiently, and safely, while many hesitate or merge dangerously. Both theoretically and empirically, we show that typical offline RL methods, which are based on distribution constraints fail to learn from data with such non-uniform variability, due to the requirement to stay close to the behavior policy to the same extent across the state space. Ideally, the learned policy should be free to choose per state how closely to follow the behavior policy to maximize long-term return, as long as the learned policy stays within the support of the behavior policy. To instantiate this principle, we reweight the data distribution in conservative Q-learning (CQL) to obtain an approximate support constraint formulation. The reweighted distribution is a mixture of the current policy and an additional policy trained to mine poor actions that are likely under the behavior policy. Our method, CQL (ReDS), is theoretically motivated, and improves performance across a wide range of offline RL problems in games, navigation, and pixel-based manipulation.
12
+
13
+ # 1 Introduction
14
+
15
+ Recent advances in offline RL [39, 36] hint at exciting possibilities in learning high-performing policies, entirely from offline datasets, without requiring dangerous [19] or expensive [25] active interaction. Analogously to the importance of data diversity in supervised learning [9], the practical benefits of offline RL depend heavily on the coverage of behavior in the offline datasets [35]. Intuitively, the dataset must illustrate the consequences of a diverse range of behaviors, so that an offline RL method can determine what behaviors lead to high returns, ideally returns that are significantly higher than the best single behavior in the dataset.
16
+
17
+ One easy option to attain this kind of coverage is to combine many realistic sources of data, but doing so can lead to the variety of demonstrated behaviors varying in highly non-uniform ways across the state space, i.e. the dataset is heteroskedastic. For example, a driving dataset might show very high variability in driving habits, with some drivers being timid and some more aggressive, but remain remarkably consistent in "critical" states (e.g., human drivers are extremely unlikely to swerve in an empty road or drive off a bridge). A good offline RL algorithm should combine the best parts of each behavior in the dataset – e.g., in the above example, the algorithm should produce a policy that is as good as the best human in each situation, which would be better than any human driver overall. At the same time, the learned policy should not attempt to extrapolate to novel actions in subset of the state space where the distribution of demonstrated behaviors is narrow (e.g., the algorithm should not attempt to drive off a bridge). How effectively can current offline RL methods selectively choose on a per-state basis how closely to stick to the behavior policy?
18
+
19
+ Most existing methods [32, 33, 28, 27, 53, 17, 23] constrain the learned policy to stay close to the behavior policy with so-called "distribution constraints". Using a combination of empirical and theoretical evidence, we first show that distribution constraints are insufficient when the heteroskedasticity of the demonstrated behaviors varies non-uniformly across states, because the strength of the constraint is state-agnostic, and may be overly conservative at some states even when it is not conservative enough at other states. We also devise a measure of heteroskedasticity that enables us to determine if certain offline datasets would be challenging for distribution constraints.
20
+
21
+ Our second contribution is a simple observation: distribution constraints against a reweighted version of the behavior policy give rise to support constraints. That is, the return-maximization optimization process can freely choose per-state how much the learned policy should stay close to the behavior policy, so long as the learned policy remains within the data support. We show that it is convenient to instantiate this insight on top of conservative Q-learning (CQL) [33], a recent offline RL method. The new method, CQL (ReDS), changes minimally the form of regularization, design decisions employed by CQL and inherits existing hyper-parameter values. CQL (ReDS) attains better performance than recent distribution constraints methods on a variety of tasks with more heteroskedastic distributions.
22
+
23
+ # 2 Preliminaries
24
+
25
+ The goal in offline RL is to find the optimal policy in a Markov decision process (MDP) specified by the tuple $\mathcal{M} = (\mathcal{S},\mathcal{A},T,r,\mu_0,\gamma)$ . $\mathcal{S},\mathcal{A}$ denote the state and action spaces. $T(\mathbf{s}'|\mathbf{s},\mathbf{a})$ and $r(\mathbf{s},\mathbf{a})$ represent the dynamics and reward function. $\mu_0(s)$ denotes the initial state distribution. $\gamma \in (0,1)$ denotes the discount factor. We wish to learn a policy that maximizes return, denoted by $J(\pi) \coloneqq \frac{1}{1 - \gamma}\mathbb{E}_{(\mathbf{s}_t,\mathbf{a}_t)\sim \pi}[\sum_t\gamma^t r(\mathbf{s}_t,\mathbf{a}_t)]$ . We must find this policy while only having access to an offline dataset of transitions collected using a behavior policy $\pi_{\beta}$ , $\mathcal{D} = \{(\mathbf{s},\mathbf{a},r,\mathbf{s}')\}$ .
26
+
27
+ Offline RL via distributional constraints. Most offline RL algorithms regularize the learned policy $\pi$ from querying the target Q-function on unseen actions [17, 30], either implicitly or explicitly. For our theoretical analysis, we will abstract the behavior of distributional constraint offline RL algorithms into a generic formulation following Kumar et al. [33]. As shown in Equation 1, we consider the problem where we must maximize the return of the learned policy $\pi$ (in the empirical MDP) $\widehat{J} (\pi)$ , while also penalizing the divergence from $\pi_{\beta}$ :
28
+
29
+ $$
30
+ \max _ {\pi} \mathbb {E} _ {\mathbf {s} \sim \widehat {d} ^ {\pi}} \left[ \widehat {J} (\pi) - \alpha D (\pi , \pi_ {\beta}) (\mathbf {s}) \right], \tag {1}
31
+ $$
32
+
33
+ where $D$ denotes a divergence between the learned policy $\pi$ and the behavior policy $\pi_{\beta}$ at state $\mathbf{s}$ .
34
+
35
+ Conservative Q-learning. [33] enforces the distributional constraint on the policy implicitly. To see why this is the case, consider the CQL objective, which consists of two terms:
36
+
37
+ $$
38
+ \min _ {\theta} \alpha \underbrace {\left(\mathbb {E} _ {\mathbf {s} \sim \mathcal {D} , \mathbf {a} \sim \pi} \left[ Q _ {\theta} (\mathbf {s} , \mathbf {a}) \right] - \mathbb {E} _ {\mathbf {s} , \mathbf {a} \sim \mathcal {D}} \left[ Q _ {\theta} (\mathbf {s} , \mathbf {a}) \right]\right)} _ {\mathcal {R} (\theta)} + \frac {1}{2} \mathbb {E} _ {\mathbf {s}, \mathbf {a}, \mathbf {s} ^ {\prime} \sim \mathcal {D}} \left[ \left(Q _ {\theta} (\mathbf {s}, \mathbf {a}) - \mathcal {B} ^ {\pi} \bar {Q} (\mathbf {s}, \mathbf {a})\right) ^ {2} \right], \tag {2}
39
+ $$
40
+
41
+ where $\mathcal{B}^{\pi}\bar{Q} (\mathbf{s},\mathbf{a})$ is the Bellman backup operator applied to a delayed target Q-network, $\bar{Q}$ : $\mathcal{B}^{\pi}\bar{Q} (\mathbf{s},\mathbf{a})\coloneqq r(\mathbf{s},\mathbf{a}) + \gamma \mathbb{E}_{\mathbf{a}^{\prime}\sim \pi (\mathbf{a}^{\prime}|\mathbf{s}^{\prime})}[\bar{Q} (\mathbf{s}^{\prime},\mathbf{a}^{\prime})]$ . The second term (in blue) is the standard TD error [40, 18, 22]. The first term $\mathcal{R}(\theta)$ (in red) attempts to prevent overestimation in the Q-values for out-of-distribution (OOD) actions by minimizing the Q-values under a distribution $\mu (\mathbf{a}|\mathbf{s})$ , which is automatically chosen to pick actions with high Q-values $Q_{\theta}(\mathbf{s},\mathbf{a})$ , and counterbalances by maximizing the Q-values of the actions in the dataset. Kumar et al. [33] show that Equation 2 gives rise to a pessimistic Q-function that modifies the optimal Q function by the ratios of densities, $\pi (\mathbf{a}|\mathbf{s}) / \pi_{\beta}(\mathbf{a}|\mathbf{s})$ at a given state-action pair $(s,a)$ . Formally, the Q-function obtained after one iteration is given by:
42
+
43
+ $$
44
+ Q _ {\theta} (\mathbf {s}, \mathbf {a}) := \mathcal {B} ^ {\pi} \bar {Q} (\mathbf {s}, \mathbf {a}) - \alpha \left[ \frac {\pi (\mathbf {a} | \mathbf {s})}{\pi_ {\beta} (\mathbf {a} | \mathbf {s})} - 1 \right]. \tag {3}
45
+ $$
46
+
47
+ The Q function is unchanged only if the density of the learned policy $\pi$ matches that of the behavior policy $\pi_{\beta}$ . Otherwise, for state-action pairs where $\pi (\mathbf{a}|\mathbf{s}) < \pi_{\beta}(\mathbf{a}|\mathbf{s})$ , Eq. 3 increases their Q values and encourages the policy $\pi$ to assign more mass to the action. Vice versa, if $\pi (\mathbf{a}|\mathbf{s}) > \pi_{\beta}(\mathbf{a}|\mathbf{s})$ , Eq. 3 encourages the policy $\pi$ to assign smaller density to the action $\mathbf{a}$ . In Eq. 3, $\alpha$ is a constant for every state, and hence the value function learned by CQL is altered by the ratio of action probabilities to the same extent at all possible state-action pairs. As we will discuss in the next section, this can be sub-optimal when the learnt policy should stay close to the behavior policy in some states, but not others. We elaborate on this intuition in the next section.
48
+
49
+ # 3 Why Distribution Constraints Fail with Heteroskedastic Data
50
+
51
+ In statistics, heteroskedasticity is typically used to refer to the condition when the standard deviation in a given random variable varies non-uniformly over time (see for example, Cao et al. [6]). We call a offline dataset heteroskedastic when the variability of the behavior differs in different regions of the state space: for instance, if for certain regions of the state space, the observed behaviors in the dataset assign the most probability mass to a few actions, but in other regions, the observed behaviors are more diverse. Realistic offline datasets are often heteroskedastic as they are typically generated by multiple policies, each with its own characteristics, under different conditions. E.g., driving datasets come from multiple humans [11], and many robotic datasets are collected by multiple teleoperators [10], resulting in systematic variability in different regions of the state space.
52
+
53
+ # 3.1 A Didactic Example
54
+
55
+ To understand why distribution constraints are insufficient with heteroskedastic data, we present a didactic example. Motivated by the driving scenario, we consider a maze navigation task shown in Fig. 1. The task is to navigate from the position labeled as "Start" to the position labeled as "Goal" using five actions at every possible state (L: $\leftarrow$ , R: $\rightarrow$ , U: $\uparrow$ , D: $\downarrow$ , No: No Op), while making sure that the executed actions do not hit the walls of the grid.
56
+
57
+ Dataset construction. To collect a heteroskedastic dataset, we consider a mixture of several behavior policies that attain a uniform occupancy over different states in the maze. However, the dataset action distributions differ significantly in different states. The induced action distribution is heavily biased to move towards the goal in the narrow hallways (e.g., the behavior policy moves upwards at state A). In contrast, the action distribution is quite diverse in the wider rooms. In these rooms, the behavior policy often selects actions that do not immediately move the agent towards
58
+
59
+ the goal (e.g., the behavior policy at state B), because doing so does not generally hit the walls as the rooms are wider, and hence the agent is not penalized. Whereas, the agent must take utmost precaution to not hit the walls in the narrow hallways. More details are in Appendix B.
60
+
61
+ ![](images/ab7b4eea7f1db5f579ee72830d12c59ec36888d4c65d5c10cad9d6e7c8fcdb26.jpg)
62
+ Figure 1: Failure mode of distribution constraints. In this navigation task, an offline RL algorithm must find a path from the start state to the goal state as indicated in (a). The offline dataset provided exhibits non-uniform coverage at different states, e.g., in the state marked as "B" located in a wide room has more uniform action distribution, whereas the states in the narrow hallways exhibit a more narrow action distribution. This is akin to how the behavior of human drivers varies in certain locations ("B"), but is very similar in other situations ("A"). To perform well, an algorithm must stay close to the data in the hallways ("A"), but deviate significantly from the data in the rooms ("B"), where the data supports many different behaviors (most are not good). AWR and CQL get stuck because they stay too close to the bad behavior policy in the rooms, e.g. the left and right arrows near State B in Fig (b) and (c). Our method, CQL (ReDS), learns to ignore the bad behavior action in state B and prioritizes the good action, indicated by the downward arrow near B in (d).
63
+
64
+ Representative distribution constraint algorithms such as AWR [44, 43] and CQL [33] fail to perform the task, as shown in Figure 1. To ensure fair comparison, we tune each method to its best evaluation performance using online rollouts. The visualization in Figure 1 demonstrates that these two algorithms fail to learn reasonable policies because the learned policies match the random behavior of the dataset actions too closely in the wider rooms, and therefore are unable to make progress towards the Goal position. This is a direct consequence of enforcing too strong of a constraint on the learned policy to stay close to the behaviors in the dataset. Therefore, we also evaluated the performance of CQL and AWR in this example, with lower amounts of conservatism (Appendix B) and found that utilizing a lower amount of conservatism suffers from the opposite failure mode: it is unable to prevent the policies from hitting the walls in the narrow hallways. This means that conservatism prevents the algorithm from making progress in the regions where the behavior in the dataset is more diverse, whereas not being conservative enough hurts performance in regions where the behaviors in the dataset agree with each other. The method we propose in this paper to tackle this challenge, indicated as "CQL (ReDS)", effectively traverses the maze, $80\%$ of the time.
65
+
66
+ # 3.2 Challenges with Distribution Constraints
67
+
68
+ Having seen that distribution constraints can fail in certain scenarios, we now formally characterize when offline RL datasets is heteroskedastic, and why distribution constraints may be ineffective in
69
+
70
+ ![](images/84896b313105a0a5046eae84beafc097a2537812882c70e261f3f1c9af9dc5ea.jpg)
71
+ Figure 2: Empirically computing $C_{\mathrm{diff}}^{\pi}$ with three datasets: uniform (top), mixed (middle) and skewed (bottom) on a gridworld. We also visualize $D(\pi, \pi_{\beta})(s)$ across states in the maze as the colors on different cells, a histogram of $D(\pi, \pi_{\beta})(s)$ to visualize variation in this quantity and the performance of running standard CQL. Top: The uniform distribution leads to low $C_{\mathrm{diff}}^{\pi}$ , uniform $D(\pi, \pi_{\beta})(s)$ , and highest success. Middle: The mixed distribution leads to medium $C_{\mathrm{diff}}^{\pi}$ , less uniformly distributed $D(\pi, \pi_{\beta})(s)$ , and a drop in task success. Bottom: The skewed distribution leads to a high $C_{\mathrm{diff}}^{\pi}$ , non-uniform $D(\pi, \pi_{\beta})(s)$ , and poor performance.
72
+
73
+ such scenarios. Similar to how standard analyses utilize concentrability coefficient [46], which upper bounds the ratio of state-action visitation under a policy $d^{\pi}(\mathbf{s},\mathbf{a})$ and the dataset distribution $\mu$ , i.e., $\max_{\mathbf{s},\mathbf{a}}d^{\pi}(\mathbf{s},\mathbf{a}) / \mu (\mathbf{s},\mathbf{a})\leq C^{\pi}$ , we introduce a new metric called differential concentrability, which measures dataset heteroskedasticity (i.e., the variability in the dataset behavior across different states).
74
+
75
+ Definition 3.1 (Differential concentrability.). Given a divergence $D$ over the action space, the differential concentrability of a given policy $\pi$ with respect to the behavioral policy $\pi_{\beta}$ is given by:
76
+
77
+ $$
78
+ C _ {\text {d i f f}} ^ {\pi} = \underset {\mathbf {s} _ {1}, \mathbf {s} _ {2} \sim d ^ {\pi}} {\mathbb {E}} \left[ \left(\sqrt {\frac {D (\pi , \pi_ {\beta}) (\mathbf {s} _ {1})}{\mu (\mathbf {s} _ {1})}} - \sqrt {\frac {D (\pi , \pi_ {\beta}) (\mathbf {s} _ {2})}{\mu (\mathbf {s} _ {2})}}\right) ^ {2} \right]. \tag {4}
79
+ $$
80
+
81
+ Eq. 4 measures the variation in the divergence between a given policy $\pi(\mathbf{a}|\mathbf{s})$ and the behavior policy $\pi_{\beta}(\mathbf{a}|\mathbf{s})$ weighted inversely by the density of these states in the offline dataset (i.e., $\mu(\mathbf{s})$ in the denominator). For simplicity, let us revisit the navigation example from Section 3.1 and first consider a scenario where $\mu(\mathbf{s}) = \mathrm{Unif}(S)$ . For any given policy $\pi$ , if there are states where $\pi$ chooses actions that lie on the fringe of the data distribution (e.g., in the wider rooms), as well as states where the policy $\pi$ chooses actions at the mode of the data distribution (e.g., as in the narrow passages), then $C_{\mathrm{diff}}^{\pi}$ would be large any policy $\pi$ that we learn. Crucially, $C_{\mathrm{diff}}^{\pi}$ would be small even if the learned policy $\pi$ deviates significantly from the behavior policy $\pi_{\beta}$ , such that $D(\pi, \pi_{\beta})(\mathbf{s})$ is large, but $|D(\pi, \pi_{\beta})(\mathbf{s}_1) - D(\pi, \pi_{\beta})(\mathbf{s}_2)|$ is small, indicating the dataset is not heteroskedastic.
82
+
83
+ Connection between variability in the action distribution and high $C_{\mathrm{diff}}^{\pi}$ . Consider a simpler formula where we remove the counts $n(\mathbf{s})$ from the expression of differential concentrability and set $\pi$ in $C_{\mathrm{diff}}^{\pi}$ to be the uniform distribution over actions. Then, we can show that the value of $C_{\mathrm{diff}}^{\pi}$ is exactly equal to twice the variance of $D(\pi, \pi_{\beta})(\mathbf{s})$ across states. Therefore, we will demonstrate in Section 5 that arbitrary policy checkpoints $\pi$ learned by offline RL algorithms generally attain a low value of the variance in $D(\pi, \pi_{\beta})(\mathbf{s})$ on offline datasets from non-heteroskedastic sources, such as those covered in the D4RL [13] benchmark. Of course, we cannot always exclude the counts of states $n(\mathbf{s})$ , however, we note that in high-dimensional state spaces, such as those in our experiments, each state in the offline data is likely to be unique, thus validating the condition that $n(\mathbf{s}) = 1$ . That said, we do compute the exact value of $C_{\mathrm{diff}}^{\pi}$ (with $n(\mathbf{s})$ ) in a didactic gridworld maze shown in Figure 2. In this case, we find that our definition of $C_{\mathrm{diff}}^{\pi}$ is actually able to reflect the intuitive notion of heteroskedasticity.
84
+
85
+ We now use the definition of differential concentrability to bound both the improvement and deprovement of $\pi$ w.r.t. $\pi_{\beta}$ for distribution constraint algorithms using the framework of safe policy
86
+
87
+ improvement [37, 33]. We show that when $C_{\mathrm{diff}}^{\pi}$ is large, then constraints (Eq. 1) may not improve significantly over $\pi_{\beta}$ , even for the best value for the weight $\alpha$ (proof in Appendix C):
88
+
89
+ Theorem 3.2 (Informal; Limited policy improvement via distributional constraints.). W.h.p. $\geq 1 - \delta$ for any prescribed level of safety $\zeta$ , the maximum possible policy improvement over choices of $\alpha$ , $\max_{\alpha} [J(\pi_{\alpha}) - J(\pi_{\beta})] \leq \zeta^{+}$ , where $\zeta^{+}$ is given by:
90
+
91
+ $$
92
+ \zeta^ {+} := \max _ {\alpha} \frac {h ^ {*} (\alpha)}{(1 - \gamma) ^ {2}} s. t. \frac {c _ {1} \sqrt {\log \frac {| S | | \mathcal {A} |}{\delta}}}{(1 - \gamma) ^ {2}} \frac {\sqrt {C _ {d i f f} ^ {\pi_ {\alpha}}}}{| \mathcal {D} |} - \frac {\alpha \mathbb {E} _ {\mathbf {s} \sim \widehat {d} ^ {\pi_ {\alpha}}} [ D (\pi_ {\alpha} , \pi_ {\beta}) (\mathbf {s}) ]}{1 - \gamma} \leq \zeta , \tag {5}
93
+ $$
94
+
95
+ where $h^*$ is a monotonically decreasing function of $\alpha$ , and $h(0) = \mathcal{O}(1)$ .
96
+
97
+ Theorem 3.2 quantifies the fundamental tradeoff with distribution constraints: to satisfy a given $\zeta$ -safety constraint in problems with larger $C_{\mathrm{diff}}^{\pi}$ , we would need a larger $\alpha$ . Since the maximum policy improvement $\zeta^{+}$ is upper bounded by $h^{*}(\alpha)$ , the policy may not necessarily improve over the behavior policy if $\alpha$ is large. On the flip side, if we choose to fix the value of $\alpha$ to be small in hopes to attain more improvement in problems where $C_{\mathrm{diff}}^{\pi}$ is high for all policies, we would end up compromising on the safety guarantee as $\zeta$ needs to be large for a small $\alpha$ and large $C_{\mathrm{diff}}^{\pi}$ . Thus, in this case, the policy may not improve over the behavior policy reliably.
98
+
99
+ Note that a larger value of $C_{\mathrm{diff}}^{\pi}$ need not imply large $\mathbb{E}_{\mathbf{s} \sim \widehat{d}^{\pi}}[D(\pi, \pi_{\beta})(\mathbf{s})]$ because the latter does not involve $\mu(\mathbf{s})$ . $C_{\mathrm{diff}}^{\pi}$ also measures the dispersion of $D(\pi, \pi_{\beta})(\mathbf{s})$ , while the latter performs a mean over states. In addition, Theorem 3.2 characterizes the maximum possible improvement with an oracle selection of $\alpha$ , though it is not feasible in practice. Thus, when $C_{\mathrm{diff}}^{\pi}$ is large, distribution constraint algorithms could either not safely improve over $\pi_{\beta}$ or would attain only a limited improvement with any possible value of $\alpha$ . Finally, we remark that complementing [32, 39] that discuss failure modes of distribution constraints with high-entropy behavior policies, Theorem 3.2 quantifies when this would be the case: this happens when $C_{\mathrm{diff}}^{\pi}$ is large.
100
+
101
+ # 4 Support Constraints As Reweighted Distribution Constraints
102
+
103
+ Thus far, we have seen that distribution constraints can be ineffective with heteroskedastic datasets. If we can impose the distribution constraint such that the constraint strength can be modulated per state, then in principle, we can alleviate the issue raised in Theorem 3.2 and Section 3.1.
104
+
105
+ Our key insight is that by reweighting the action distribution in the data before utilizing a distribution constraint, we can obtain a method that enforces a per-state distribution constraint, which corresponds to an approximate support constraint. This will push down the values of actions that are outside the behavior policy support, but otherwise not impose a severe penalty for in-support actions, thus enabling the policy to deviate from the behavior policy by different amounts at different states. Rather than having a distribution constraint between $\pi$ and $\pi_{\beta}$ (Eq. 1), if we can impose a constraint between $\pi$ and a reweighted version of $\pi_{\beta}$ , where the reweighting is state-dependent, then we can obtain an approximate support constraint. Let the reweighted distribution be $\pi^{re}$ . Intuitively, if $\pi(\cdot|\mathbf{s})$ is within the support of the $\pi_{\beta}(\cdot|\mathbf{s})$ , then one can find a reweighting $\pi^{re}(\cdot|\mathbf{s})$ such that $D(\pi, \pi^{re})(\mathbf{s}) = 0$ , whereas if $\pi(\cdot|\mathbf{s})$ is not within the support of $\pi^{re}(\cdot|\mathbf{s})$ , then $D(\pi, \pi^{re})(\mathbf{s})$ still penalizes $\pi$ when $\pi$ chooses out-of-support actions, since no reweighting $\pi^{re}$ can put non-zero probability on out-of-support actions. This allows us to handle the failure mode from Section 3: at states with wide behavior policy, even with a large $\alpha$ , $\pi$ is not anymore constrained to the behavior distribution, whereas at other "critical" states, where $\pi_{\beta}$ is narrow, a large enough $\alpha$ will constrain $\pi(\cdot|\mathbf{s})$ to stay close to $\pi_{\beta}(\cdot|\mathbf{s})$ . We call this Reweighting Distribution constraints to Support (ReDS).
106
+
107
+ # 4.1 Instantiating the Principle Behind ReDS
108
+
109
+ One option is to reweight $\pi_{\beta}$ to $\pi^{re}$ , and enforce a distribution constraint $D(\pi, \pi^{re})$ between $\pi$ and $\pi^{re}$ . However, this is problematic because the $\pi^{re}$ would typically be estimated by using importance weighting or by fitting a parametric model, and prior work has shown that errors in estimating the behavior policy [43, 20] using only one action sample often get propagated and lead to poor downstream performance. For CQL, this issue might be especially severe if we push up the Q-values under $\pi^{re}$ , because then these errors might lead to severe Q-value over-estimation.
110
+
111
+ Abstract idea of CQL (ReDS). Instead, we devise an alternative formulation for ReDS that modifies the learned policy $\pi$ to $\pi^{re}$ , such that applying a distribution constraint on this modified policy imposes a support constraint. Thus, with CQL, now we instead push down the Q-values under $\pi^{re}$ . We define $\pi^{re}$ as a mixture distribution of the learned policy $\pi$ and a reweighted version of the behavior policy as follows:
112
+
113
+ ![](images/6a9a184cfacfab30cb1228d66c532b17b987a9317308159c2ff8abd50a4b32f9.jpg)
114
+ Figure 3: Comparison between support and distributional constraints: Left: CQL pushes down the Q-function under the policy $\pi$ , while pushing up the function under the behavior policy $\pi_{\beta}$ . This means that the Q-values for bad actions can go up. Right: In contrast, ReDS re-weights the data distribution to push down the values of bad actions, alleviating this shortcoming.
115
+
116
+ ![](images/d701bfdcd847ba09ee4d85060cb6b6e811b0b2690f179310054afb0810248655.jpg)
117
+
118
+ $$
119
+ \pi^ {r e} (\cdot | \mathbf {s}) := \frac {1}{2} \pi (\cdot | \mathbf {s}) + \frac {1}{2} \left[ \pi_ {\beta} (\cdot | \mathbf {s}) \cdot g (\pi (\cdot | \mathbf {s})) \right], \tag {6}
120
+ $$
121
+
122
+ where $g(\cdot)$ is a monotonically decreasing function. We will demonstrate how pushing down the Q-values under $\pi^{re}$ modifies CQL to enable a support constraint while reusing existing components of CQL that impose a distribution constraint. As shown in Figure 3, the second term in Equation 6 increases the probability of actions that are likely under the behavior policy, but are less likely under the learned policy (due to $g$ being a decreasing function). We will show in Lemma 4.1 that utilizing $\pi^{re}$ in CQL enforces a support constraint on $\pi$ . Thus, the learned policy $\pi$ can be further away from $\pi_{\beta}$ , allowing $\pi$ to assign more probability to good actions that are within the behavior policy support, even if they have lower probabilities under $\pi_{\beta}$ . Section 4.2 illustrates theoretically why pushing down the Q-values under Eq. 6 approximates a support constraint in terms of how it modifies the resulting Q-values. For an illustration, please see Figure 3.
123
+
124
+ How should we pick $g$ in practice? Since we wish to use $\pi^{re}$ as a replacement for $\pi$ in the minimization term in the CQL regularizer (Equation 2), we aim to understand how to design the re-weighting $g$ in practice. Since specifically CQL enforces a distribution constraint by maximizing the Q-value on all actions sampled from the behavior policy $\pi_{\beta}$ , our choice of $g$ should aim to counter this effect by instead minimizing the Q-value on "bad" actions within the support of the behavior policy. Equation 6 quantifies the notion of these "bad" actions using a monotonically decreasing function $g(\pi (\mathbf{a}|\mathbf{s}))$ of the policy probability. In practice, we find it convenient to define $g$ to be a function of the advantage estimate: $A_{\theta}(\mathbf{s},\mathbf{a})\coloneqq Q_{\theta}(\mathbf{s},\mathbf{a}) - E_{\mathbf{a}\sim \pi}[Q_{\theta}(\mathbf{s},\mathbf{a})]$ , that the policy $\pi$ is seeking to maximize. In fact, if entropy regularization is utilized for training the policy (akin to most offline RL algorithms), the density of an action under a policy is directly proportional to exponentiated advantages, i.e., $\pi (\mathbf{a}|\mathbf{s})\propto \exp (A_{\theta}(\mathbf{s},\mathbf{a}))$ . Hence, we choose $g(x) = 1 / x$ , such that $g(\exp (A(\mathbf{s},\mathbf{a}))) = \exp (-A(\mathbf{s},\mathbf{a}))$ (a decreasing function).
125
+
126
+ For the rest, we approximate the product distribution $\pi (\mathbf{a}|\mathbf{s})\cdot g(\pi (\mathbf{a}|\mathbf{s}))$ by fitting a parametric function approximator $\rho_{\psi}(\mathbf{a}|\mathbf{s})$ . Since $\rho_{\psi}(\mathbf{a}|\mathbf{s})$ is being trained to approximate a re-weighted version of the behavior policy, we fit $\rho_{\psi}$ by minimizing using a weighted maximum log-likelihood objective, as shown in prior work [44, 43]. The concrete form for our objective for training $\rho_{\psi}$ is shown below ( $\tau$ is a temperature hyperparameter typically introduced in prior work [44, 43]):
127
+
128
+ $$
129
+ \rho_ {\psi} (\cdot | \mathbf {s}) = \arg \max _ {\rho_ {\psi}} \mathbb {E} _ {\mathbf {s} \sim \mathcal {D}, \mathbf {a} \sim \pi_ {\beta} (\cdot | \mathbf {s})} [ \log \rho_ {\psi} (\mathbf {a} | \mathbf {s}) \cdot \exp (- A _ {\theta} (\mathbf {s}, \mathbf {a}) / \tau) ]. \tag {7}
130
+ $$
131
+
132
+ The crucial difference between this objective and standard advantage-weighted updates is the difference of the sign. While algorithms such as AWR [43] aim to find an action that attains a high advantage while being close to the behavior policy, and hence, uses a positive advantage, we utilize the negative advantage to mine for poor actions that are still quite likely under the behavior policy.
133
+
134
+ The final objective for the Q-function combines the regularizer in Eq. 8 with a standard TD objective:
135
+
136
+ $$
137
+ \mathcal {R} (\theta ; \rho) = \frac {1}{2} \left(\underset {\mathbf {s} \sim \mathcal {D}, \mathbf {a} \sim \pi} {\mathbb {E}} \left[ Q _ {\theta} (\mathbf {s}, \mathbf {a}) \right] + \underset {\mathbf {s} \sim \mathcal {D}, \mathbf {a} \sim \rho} {\mathbb {E}} \left[ Q _ {\theta} (\mathbf {s}, \mathbf {a}) \right]\right) - \underset {\mathbf {s}, \mathbf {a} \sim \mathcal {D}} {\mathbb {E}} \left[ Q _ {\theta} (\mathbf {s}, \mathbf {a}) \right] \tag {8}
138
+ $$
139
+
140
+ $$
141
+ \min _ {\theta} J _ {Q} (\theta) = \mathcal {R} (\theta ; \rho) + \frac {1}{2} \mathbb {E} _ {\mathbf {s}, \mathbf {a}, \mathbf {s} ^ {\prime} \sim \mathcal {D}} [ (Q _ {\theta} (\mathbf {s}, \mathbf {a}) - \mathcal {B} ^ {\pi} \bar {Q} (\mathbf {s}, \mathbf {a})) ^ {2} ] \tag {9}
142
+ $$
143
+
144
+ # 4.2 Theoretical Analysis of CQL (ReDS)
145
+
146
+ Next, we analyze CQL (ReDS), showing how learning using the regularizer in Eq. 8 modifies the Q-values and justifies our choice of the distribution $\rho$ in the previous section.
147
+
148
+ Lemma 4.1 (Per-state change of Q-values.). Let $g(\mathbf{a}|\mathbf{s})$ be a shorthand for $g(\mathbf{a}|\mathbf{s}) = g(\tau \cdot \pi (\mathbf{a}|\mathbf{s}))$ . In the tabular setting, the $Q$ -function obtained after one iteration of objective in Eq. 9 is given by:
149
+
150
+ $$
151
+ Q _ {\theta} (\mathbf {s}, \mathbf {a}) := \mathcal {B} ^ {\pi} \bar {Q} (\mathbf {s}, \mathbf {a}) - \alpha \frac {\pi (\mathbf {a} | \mathbf {s}) + \pi_ {\beta} (\mathbf {a} | \mathbf {s}) g (\mathbf {a} | \mathbf {s}) - 2 \pi_ {\beta} (\mathbf {a} | \mathbf {s})}{2 \pi_ {\beta} (\mathbf {a} | \mathbf {s})} \tag {10}
152
+ $$
153
+
154
+ where $\mathcal{B}^{\pi}\bar{Q} (\mathbf{s},\mathbf{a})$ is the Bellman backup operator applied to a delayed target $Q$ network.
155
+
156
+ Eq. 10 illustrates why the modified regularizer in Eq. 8 leads to a "soft" support constraint whose strength is modulated per-state. Since $g$ is a monotonically decreasing function of $\pi$ , for state-action pairs where $\pi(\mathbf{a}|\mathbf{s})$ has high values, $g(\mathbf{a}|\mathbf{s})$ is low and therefore the Q-value $Q(\mathbf{s},\mathbf{a})$ for such state-action pairs are underestimated less. Vice versa, for state-action pairs where $\pi(\mathbf{a}|\mathbf{s})$ attains low values, $g(\mathbf{a}|\mathbf{s})$ is high to counter-acts the low $\pi(\mathbf{a}|\mathbf{s})$ values. Also, since $\pi_{\beta}(\mathbf{a}|\mathbf{s})$ appears in the denominator, for out-of-support actions, where $\pi_{\beta}(\mathbf{a}|\mathbf{s}) = 0$ , $\pi(\mathbf{a}|\mathbf{s})$ must also assign 0 probability to the actions for the Q values to be well defined. An illustration of this idea is shown in Figure 9. We can use this insight to further derive the closed-form objective optimized by ReDS.
157
+
158
+ Lemma 4.2 (CQL (ReDS) objective.). Assume that for all policies $\pi \in \Pi, \forall (\mathbf{s}, \mathbf{a}), \pi (\mathbf{a}|\mathbf{s}) > 0$ . Then, CQL (ReDS) solves the following optimization problem:
159
+
160
+ $$
161
+ \left. \max _ {\pi \in \Pi} \widehat {J} (\pi) - \frac {\alpha}{2 (1 - \gamma)} \mathbb {E} _ {\mathbf {s} \sim \widehat {d} ^ {\pi}} \left[ D (\pi , \pi_ {\beta}) (\mathbf {s}) + \underset {\mathbf {a} \sim \pi (\cdot | \mathbf {s})} {\mathbb {E}} [ g (\tau \cdot \pi (\mathbf {a} | \mathbf {s})) \mathbb {I} \{\pi_ {\beta} (\mathbf {a} | \mathbf {s}) > 0 \} ] \right]. \right. \tag {11}
162
+ $$
163
+
164
+ $\widehat{J}(\pi)$ corresponds to the empirical return of the learned policy, i.e., the return of the policy under the learned Q-function. The objective in Lemma 4.3 can be intuitively interpreted as follows: The first term, $D(\pi, \pi_{\beta})(s)$ , is a standard distribution constraint, also present in naïve CQL, and it aims to penalize the learned policy $\pi$ if it deviates too far away from $\pi_{\beta}$ . ReDS adds an additional second term that effectively encourages $\pi$ to be "sharp" within the support of the behavior policy (as $g$ is monotonically decreasing), enabling $\pi$ to potentially put mass on actions that lead to a high $\widehat{J}(\pi)$ .
165
+
166
+ Specifically, this second term allows us to control the strength of the distribution constraint per state: at states where the support of the policy is narrow, i.e., the volume of actions such that $\pi_{\beta}(\mathbf{a}|\mathbf{s}) > 0$ is small (say, only a single action), the penalty in Equation 51 reverts to a standard distributional constraint by penalizing divergence from the behavioral policy via $D(\pi ,\pi_{\beta})(\mathbf{s})$ as the second term cannot be minimized. At states where the policy $\pi_{\beta}$ is broad, the second term counteracts the effect of the distributional constraint within the support of the behavior policy, by enabling $\pi$ to concentrate its density on only good actions within the support of $\pi_{\beta}$ with the same multiplier $\alpha$ . Thus even when we need to set $\alpha$ to be large to stay close to $\pi_{\beta}(\cdot |\mathbf{s})$ at certain states (e.g., in narrow hallways in the example in Sec. 3.1), $D(\pi ,\pi_{\beta})(\mathbf{s})$ is not heavily constrained at other states.
167
+
168
+ In fact, we formalize this intuition below to show that for the best possible value of the hyperparameters appearing in the training objective for CQL (ReDS) (Equation 51), CQL (ReDS) is guaranteed to outperform the best-tuned version of CQL for any offline RL problem. A proof is in Appendix C.
169
+
170
+ Lemma 4.3 (CQL (ReDS) formal guarantee). We will add the following guarantee to show that the policy learned by ReDS for the best possible value of $\tau$ (Equation 51) and $\alpha$ in CQL (Equation 3) outperforms the best CQL policy. That is, formally we show:
171
+
172
+ $$
173
+ \max _ {\alpha , \tau} J \left(\pi_ {\mathrm {R e D S}; \alpha , \tau}\right) \geq \max _ {\alpha} J \left(\pi_ {\mathrm {C Q L}; \alpha}\right). \tag {12}
174
+ $$
175
+
176
+ # 5 Experimental Evaluation
177
+
178
+ The goal of our experiments is to understand how CQL (ReDS) compares to distributional constraint methods when learning from heteroskedastic offline datasets. In order to perform our experiments,
179
+
180
+ we construct new heteroskedastic datasets that pose challenges representative of what we would expect to see in real-world problems. We first introduce tasks and heteroskedastic datasets that we evaluate on, and then present our results compared to prior state-of-the-art methods. We also evaluate ReDS on some of the standard D4RL [13] datasets which are not heteroskedastic in and find that the addition of ReDS, as expected, does not help, or hurt on those tasks.
181
+
182
+ 5.1 Comparison on the D4RL Benchmark
183
+
184
+ <table><tr><td>Dataset</td><td>BC</td><td>10%BC</td><td>DT</td><td>AWAC</td><td>Onestep RL</td><td>TD3+BC</td><td>COMBO</td><td>CQL</td><td>IQL</td><td>Ours</td></tr><tr><td>halfcheetah-medium-replay</td><td>36.6</td><td>40.6</td><td>36.6</td><td>40.5</td><td>38.1</td><td>44.6</td><td>55.1</td><td>45.5</td><td>44.2</td><td>52.3</td></tr><tr><td>hopper-medium-replay</td><td>18.1</td><td>75.9</td><td>82.7</td><td>37.2</td><td>97.5</td><td>60.9</td><td>89.5</td><td>95.0</td><td>94.7</td><td>101.5</td></tr><tr><td>walker2d-medium-replay</td><td>26.0</td><td>62.5</td><td>66.6</td><td>27.0</td><td>49.5</td><td>81.8</td><td>56.0</td><td>77.2</td><td>73.9</td><td>85.0</td></tr><tr><td>halfcheetah-medium-expert</td><td>55.2</td><td>92.9</td><td>86.8</td><td>42.8</td><td>93.4</td><td>90.7</td><td>90.0</td><td>91.6</td><td>86.7</td><td>89.5</td></tr><tr><td>hopper-medium-expert</td><td>52.5</td><td>110.9</td><td>107.6</td><td>55.8</td><td>103.3</td><td>98.0</td><td>111.1</td><td>105.4</td><td>91.5</td><td>110.0</td></tr><tr><td>walker2d-medium-expert</td><td>107.5</td><td>109.0</td><td>108.1</td><td>74.5</td><td>113.0</td><td>110.1</td><td>103.3</td><td>108.8</td><td>109.6</td><td>112.0</td></tr><tr><td>locomotion total</td><td>295.9</td><td>491.8</td><td>488.4</td><td>277.8</td><td>494.8</td><td>486.1</td><td>505</td><td>523.5</td><td>500.6</td><td>550.3</td></tr></table>
185
+
186
+ Heteroskedastic data is likely to exist in real-world problems such as driving and manipulation, where datasets are collected by multiple policies that agree and disagree at different states. While standard benchmarks (D4RL [13] and RLUnplugged [21]) include offline datasets generated by mixture policies (e.g. the "medium-expert" generated by two policies with different performance), these policies are trained via RL methods (SAC) that constrain the entropy of the action distribution at each state to be uniform. To measure heteroskedasticity, we utilize an approximation to $C_{\mathrm{diff}}^{\pi}$ : the standard deviation in the value of $D(\pi, \pi_{\beta})(\mathbf{s})$ across states in the dataset, using a fixed policy $\pi$ obtained by running CQL. We didn't use $C_{\mathrm{diff}}^{\pi}$ directly, as it is challenging to compute in continuous spaces. In Table 3, the standard deviation is lower for the D4RL antmaze datasets, corroborating our intuition that these datasets are significantly less heteroskedastic.
187
+
188
+ # 5.2 Comparisons on Heteroskedastic datasets
189
+
190
+ Heteroskedastic datasets. To stress-test our method and prior distribution constraint approaches, we collected new datasets for the medium and large mazes used in the antmaze navigation tasks from D4RL: noisy datasets, where the behavior policy action variance differs in different regions of the maze, representative of user variability in navigation, and biased datasets, where the behavior policy admits a systematic bias towards certain behaviors in different regions of the maze, representative of bias towards certain routes in navigation problems. Table 3 shows that these datasets are significantly more heteroskedastic to the D4RL datasets.
191
+
192
+ Table 1: Performance comparison on the D4RL benchmark. (Top 2 bolded)
193
+
194
+ <table><tr><td>Dataset</td><td>std</td><td>max</td></tr><tr><td>noisy (Ours)</td><td>18</td><td>253</td></tr><tr><td>biased (Ours)</td><td>9</td><td>31</td></tr><tr><td>diverse (D4RL)</td><td>2</td><td>11</td></tr><tr><td>play (D4RL)</td><td>2</td><td>13</td></tr></table>
195
+
196
+ Table 3: The new antmaze datasets (Ours) are significantly more heteroskedastic than the standard D4RL datasets. We measure heteroskedasticity using the std and max of $D(\pi, \pi_{\beta})(\mathbf{s})$ across states in the offline dataset.
197
+
198
+ Using these more heteroskedastic datasets, we compare CQL (ReDS) with CQL and IQL [27], recent popular methods, and two prior methods, BEAR [30] and EDAC [3], that also enforce support constraints. For each algorithm, including ours, we utilize hyperparameters directly from the counterpart tasks in D4RL. Due to the lack of an effective method for offline policy selection (see Fu et al. [14]), we utilize oracle checkpoint selection for every method. We compute the mean and standard deviation across 3 seeds. Table 2 shows that the largest gap between CQL (ReDS) and prior methods is on noisy datasets, which are particularly more heteroskedastic (Table 3).
199
+
200
+ We also compare CQL (ReDS) with recent offline RL algorithms on D4RL, including DT [8], AWAC [42], onestep RL [5], TD3+BC [16] and COMBO [57]. Table 1 shows that CQL (ReDS) obtains similar performance as existing distributional constraint methods and outperforms BC-based baselines. This is expected given that the D4RL datasets exhibit significantly smaller heteroscedasticity, as previously explained. Also, a large fraction of the datasets is trajectories with high returns. BC using the top $10\%$ trajectories with the highest episode returns already has strong performance. The previous results compares CQL (ReDS) to baselines in tasks where the MDP states are low-dimensional vectors. Next, we study vision-based robotic manipulation tasks.
201
+
202
+ Visual robotic manipulation. We consider two types of manipulation tasks. In the "Pick & Place" task, the algorithm controls a WidowX robot to grasp an object and place it into a tray located at a test
203
+
204
+ <table><tr><td>Task &amp; Dataset</td><td>EDAC</td><td>BEAR</td><td>CQL</td><td>IQL</td><td>INAC</td><td>RW &amp; AW</td><td>EQL</td><td>SQL</td><td>XQL-C</td><td>Ours</td></tr><tr><td>medium-noisy</td><td>0</td><td>0</td><td>55</td><td>44</td><td>0</td><td>5</td><td>0.0</td><td>0.7</td><td>4.3</td><td>73</td></tr><tr><td>medium-biased</td><td>0</td><td>0</td><td>73</td><td>48</td><td>0</td><td>0</td><td>6.5</td><td>8.0</td><td>11.7</td><td>74</td></tr><tr><td>large-noisy</td><td>0</td><td>0</td><td>42</td><td>39</td><td>0</td><td>10</td><td>7.1</td><td>2.9</td><td>11.3</td><td>53</td></tr><tr><td>large-biased</td><td>0</td><td>0</td><td>50</td><td>41</td><td>0</td><td>8</td><td>8.5</td><td>0.5</td><td>7.3</td><td>45</td></tr></table>
205
+
206
+ ![](images/a4d6174c77e5626208d95d107b0e64df5859ec2bc87ac46455a6128b89438219.jpg)
207
+ Figure 4: Examples rollouts in the heteroskedastic bin-sort data. In this task, an offline RL method must sort objects in front of it into two bins with a dataset that has non-uniform coverage at different states, using visual input. In the first half of the trajectory, the states exhibit a more narrow action distribution but the second half admits a more uniform action distribution.
208
+
209
+ location, directly from raw $128 \times 128 \times 3$ images and sparse 0/1 reward signal. The dataset consists of behavior from suboptimal grasping and placing policies, and the positions of the tray in the offline dataset very rarely match the target test location. The placing policies exhibit significant variability, implying these datasets are heteroskedastic under our definition. We also consider "Bin Sort" task (see Figure 4), where a WidowX robot is controlled to sort two objects into two separate bins. Here, heteroskedacity is introduced when sorting objects into the desirable bins. Similar to the Pick & Place task, the placing policy exhibits significant variability, showing an object placed in the incorrect bin (e.g., recyclable trash thrown into the non-recyclable bin). However, the grasping policy is more expert-like grasping the object with low variability. More details in Appendix E.
210
+
211
+ ![](images/b88592c8d6e6ca72a3d2c62d9346d78694531a0a8f4029b9d65f45bed75d3492.jpg)
212
+ Figure 5: CQL vs ReDS: IQM normalized score for 10 Atari games. We consider two dataset compositions.
213
+
214
+ Table 4 presents the results on these tasks. We utilize oracle policy selection analogous to the antmaze experiments from Table 2. Table 4 shows that CQL (ReDS) outperforms CQL attaining a success rate of about $15.1\%$ for the visual pick and place task, whereas CQL only attains $6.5\%$ success. While performance might appear low in an absolute sense, note that both CQL and ReDS do improve over the behavior policy, which only attains a success rate of $4\%$ . Thus offline RL does work on this task, and utilizing ReDS in conjunction with the standard distributional constraint in CQL does result in a boost in performance with this heteroskedastic dataset. For the "Bin Sorting", our method outperforms CQL by $3.5\mathbf{x}$ when learning from more heteroskedastic datasets. This indicates the effectiveness of our method in settings with higher heteroskedasticity.
215
+
216
+ Table 2: CQL (ReDS) outperforms prior offline RL methods including methods (IQL, XQL-C), and prior support constraint methods (BEAR, EDAC, SQL, EQL, RW & AW) on three out of four scenarios when learning from heteroskedastic data in the antmaze task. The improvement over prior methods is larger when learning from the noisy datasets, which are more heteroskedactic, as in Table 3, compared to biased datasets.
217
+
218
+ <table><tr><td>Task</td><td>CQL</td><td>CQL (ReDS)</td><td>std D(π, πβ)(s)</td><td>max D(π, πβ)(s)</td></tr><tr><td>Pick &amp; Place</td><td>6.5 ± 0.4</td><td>15.1 ± 0.4</td><td>48.7</td><td>307.4</td></tr><tr><td>Bin Sort (Easy)</td><td>31.2 ± 0.3</td><td>31.4 ± 0.3</td><td>7.9</td><td>81.6</td></tr><tr><td>Bin Sort (Hard)</td><td>6.1 ± 0.2</td><td>23.1 ± 0.7</td><td>59.6</td><td>988.3</td></tr></table>
219
+
220
+ Table 4: CQL (ReDS) vs CQL on robotic manipulation tasks. CQL (ReDS) outperforms CQL significantly when learning from more heteroskedastic datasets, as measured by $C_{\mathrm{diff}}^{\pi}$ : the standard deviation and the maximum of $D(\pi, \pi_{\beta})(\mathbf{s})$ across states.
221
+
222
+ Atari games. We collect data on 10 Atari games from multiple policies that behave differently at certain states while having similar actions otherwise. We consider a case of two such policies, and a harder scenario of five. We evaluate the performance of CQL (ReDS) on the Atari games using the evaluation metrics from prior works [2, 34]. Figure 5 shows that in both testing scenarios: with the mixture of two policies (top figure) and the mixture of five policies (bottom figure), CQL (ReDS) outperforms CQL in aggregate.
223
+
224
+ To summarize, our results indicate that incorporating CQL (ReDS) outperforms distribution constraints with heteroskedastic datasets in a variety of domains.
225
+
226
+ # 6 Related Work
227
+
228
+ Offline Q-learning methods utilize mechanisms to prevent backing up unseen actions [39], by applying an explicit behavior constraint that forces the learned policy to be "close" to the behavior policy [23, 53, 44, 49, 53, 30, 28, 27, 52, 15], or by learning a conservative value function [33, 54, 41, 57, 56, 47, 24, 53]. Most of these offline RL methods utilize a distribution constraint, explicit (e.g., TD3+BC [15]) or implicit (e.g., CQL [33]), and our empirical analysis of representative algorithms from either family indicates that these methods struggle with heteroskedastic data, especially those methods that use an explicit constraint. Model-based methods [26, 56, 4, 51, 45, 38, 57] train value functions using dynamics models, which is orthogonal to our method.
229
+
230
+ Some prior works have also made a case for utilizing support constraints instead of distribution constraints, often via didactic examples [30, 29, 39], and devised algorithms that impose support constraints in theory, by utilizing the maximum mean discrepancy metric [30] or an asymmetric f-divergences [53] for the policy constraint [53]. Empirical results on D4RL [13] and the analysis by Wu et al. [53] suggest that support constraints are not needed, as strong distribution constraint algorithms often have strong performance. As we discussed in Sections 3.2 (Theorem 3.2 indicates that this distribution constraints may not fail when $C_{\mathrm{diff}}^{\pi}$ is small, provided these algorithms are well-tuned.) and 4, these benchmark datasets are not heteroskedastic, as they are collected from policies that are equally wide at all states and centered on good actions (e.g., Antmaze domains in [13], control suite tasks in Gulcehre et al. [21]) and hence, do not need to modulate the distribution constraint strength. To benchmark with heteroskedastic data, we developed some novel tasks which may be of independent interest beyond this work, and find that our method ReDS can work well here.
231
+
232
+ # 7 Discussion, Future Directions, and Limitations
233
+
234
+ We studied the behavior of distribution constraint offline RL algorithms when learning from heteroskedastic datasets, a property we are likely encounter in the real world. Naive distribution constraint algorithms can be highly ineffective in such settings both in theory and practice, as they fail to modulate the constraint strength per-state. We propose ReDS, a method to convert distributional constraints into support-based constraints via reweighting, and validate it in CQL. A limitation of ReDS is that it requires estimating the distribution $\rho_{\psi}$ to enforce a support constraint, which brings about its some additional compute overhead. Additionally, the instantiation of ReDS we develop in Section 4.1 is specific to methods that utilize a conservative regularizer such as CQL (or related approaches like COMBO). We clarify that our main contribution in this work is an analysis of when distributional constraints fail (which we study for AWR and CQL), and developing a principle for reformulating distributional constraints to approximate support constraints via reweighting. Devising approaches for enforcing support constraints that do not require extra machinery is a direction for future work. Understanding if support constraints are less sensitive to hyperparameters or are more amenable to model election is also a direction for future work.
235
+
236
+ # References
237
+
238
+ [1] Achiam, J., Held, D., Tamar, A., and Abbeel, P. Constrained policy optimization. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 22–31. JMLR.org, 2017.
239
+ [2] Agarwal, R., Schuurmans, D., and Norouzi, M. An optimistic perspective on offline reinforcement learning. In International Conference on Machine Learning (ICML), 2020.
240
+ [3] An, G., Moon, S., Kim, J.-H., and Song, H. O. Uncertainty-Based Offline Reinforcement Learning with Diversified Q-Ensemble. arXiv e-prints, art. arXiv:2110.01548, October 2021.
241
+ [4] Argenson, A. and Dulac-Arnold, G. Model-based offline planning. arXiv preprint arXiv:2008.05556, 2020.
242
+ [5] Brandfonbrener, D., Whitney, W. F., Ranganath, R., and Bruna, J. Offline RL without off-policy evaluation. CoRR, abs/2106.08909, 2021. URL https://arxiv.org/abs/2106.08909.
243
+ [6] Cao, K., Chen, Y., Lu, J., Arechiga, N., Gaidon, A., and Ma, T. Heteroskedastic and imbalanced deep learning with adaptive regularization. arXiv preprint arXiv:2006.15766, 2020.
244
+ [7] Castro, P. S., Moitra, S., Gelada, C., Kumar, S., and Bellemare, M. G. Dopamine: A Research Framework for Deep Reinforcement Learning. 2018. URL http://arxiv.org/abs/1812.06110.
245
+ [8] Chen, L., Lu, K., Rajeswaran, A., Lee, K., Grover, A., Laskin, M., Abbeel, P., Srinivas, A., and Mordatch, I. Decision transformer: Reinforcement learning via sequence modeling. arXiv preprint arXiv:2106.01345, 2021.
246
+ [9] Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and Fei-Fei, L. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248–255. IEEE, 2009.
247
+ [10] Ebert, F., Yang, Y., Schmeckpeper, K., Bucher, B., Georgakis, G., Daniilidis, K., Finn, C., and Levine, S. Bridge data: Boosting generalization of robotic skills with cross-domain datasets. arXiv preprint arXiv:2109.13396, 2021.
248
+ [11] Ettinger, S., Cheng, S., Caine, B., Liu, C., Zhao, H., Pradhan, S., Chai, Y., Sapp, B., Qi, C. R., Zhou, Y., et al. Large scale interactive motion forecasting for autonomous driving: The way to open motion dataset. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9710-9719, 2021.
249
+ [12] Fu, J., Kumar, A., Soh, M., and Levine, S. Diagnosing bottlenecks in deep Q-learning algorithms. arXiv preprint arXiv:1902.10250, 2019.
250
+ [13] Fu, J., Kumar, A., Nachum, O., Tucker, G., and Levine, S. D4rl: Datasets for deep data-driven reinforcement learning. arXiv preprint arXiv:2004.07219, 2020.
251
+ [14] Fu, J., Norouzi, M., Nachum, O., Tucker, G., ziyu wang, Novikov, A., Yang, M., Zhang, M. R., Chen, Y., Kumar, A., Paduraru, C., Levine, S., and Paine, T. Benchmarks for deep off-policy evaluation. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=kWSeGEeHvF8.
252
+ [15] Fujimoto, S. and Gu, S. S. A minimalist approach to offline reinforcement learning. arXiv preprint arXiv:2106.06860, 2021.
253
+ [16] Fujimoto, S. and Gu, S. S. A minimalist approach to offline reinforcement learning. CoRR, abs/2106.06860, 2021. URL https://arxiv.org/abs/2106.06860.
254
+ [17] Fujimoto, S., Meger, D., and Precup, D. Off-policy deep reinforcement learning without exploration. arXiv preprint arXiv:1812.02900, 2018.
255
+ [18] Fujimoto, S., van Hoof, H., and Meger, D. Addressing function approximation error in actor-critic methods. In International Conference on Machine Learning (ICML), pp. 1587-1596, 2018.
256
+ [19] Garcia, J. and Fernández, F. A comprehensive survey on safe reinforcement learning. Journal of Machine Learning Research, 16(1):1437-1480, 2015.
257
+ [20] Ghasemipour, S. K. S., Schuurmans, D., and Gu, S. S. Emaq: Expected-max q-learning operator for simple yet effective offline and online rl. In International Conference on Machine Learning, pp. 3682-3691. PMLR, 2021.
258
+
259
+ [21] Gulcehre, C., Wang, Z., Novikov, A., Paine, T. L., Colmenarejo, S. G., Zolna, K., Agarwal, R., Merel, J., Mankowitz, D., Paduraru, C., et al. Rl unplugged: Benchmarks for offline reinforcement learning. 2020.
260
+ [22] Haarnoja, T., Zhou, A., Hartikainen, K., Tucker, G., Ha, S., Tan, J., Kumar, V., Zhu, H., Gupta, A., Abbeel, P., and Levine, S. Soft actor-critic algorithms and applications. Technical report, 2018.
261
+ [23] Jaques, N., Ghandeharioun, A., Shen, J. H., Ferguson, C., Lapedriza, A., Jones, N., Gu, S., and Picard, R. Way off-policy batch deep reinforcement learning of implicit human preferences in dialog. arXiv preprint arXiv:1907.00456, 2019.
262
+ [24] Jin, Y., Yang, Z., and Wang, Z. Is pessimism provably efficient for offline r1? arXiv preprint arXiv:2012.15085, 2020.
263
+ [25] Kalashnikov, D., Irpan, A., Pastor, P., Ibarz, J., Herzog, A., Jang, E., Quillen, D., Holly, E., Kalakrishnan, M., Vanhoucke, V., et al. Scalable deep reinforcement learning for vision-based robotic manipulation. In Conference on Robot Learning, pp. 651-673, 2018.
264
+ [26] Kidambi, R., Rajeswaran, A., Netrapalli, P., and Joachims, T. Morel: Model-based offline reinforcement learning. arXiv preprint arXiv:2005.05951, 2020.
265
+ [27] Kostrikov, I., Nair, A., and Levine, S. Offline reinforcement learning with implicit q-learning. arXiv preprint arXiv:2110.06169, 2021.
266
+ [28] Kostrikov, I., Thompson, J., Fergus, R., and Nachum, O. Offline reinforcement learning with fisher divergence: critical regularization. arXiv preprint arXiv:2103.08050, 2021.
267
+ [29] Kumar, A. Data-driven deep reinforcement learning. https://bair.berkeley.edu/blog/2019/12/05/bear/, 2019. BAIR Blog.
268
+ [30] Kumar, A., Fu, J., Soh, M., Tucker, G., and Levine, S. Stabilizing off-policy q-learning via bootstrapping error reduction. In Advances in Neural Information Processing Systems, pp. 11761-11771, 2019.
269
+ [31] Kumar, A., Fu, J., Tucker, G., and Levine, S. Stabilizing Off-Policy Q-Learning via Bootstrapping Error Reduction. arXiv e-prints, art. arXiv:1906.00949, June 2019.
270
+ [32] Kumar, A., Fu, J., Tucker, G., and Levine, S. Stabilizing off-policy q-learning via bootstrapping error reduction. 2019. URL http://arxiv.org/abs/1906.00949.
271
+ [33] Kumar, A., Zhou, A., Tucker, G., and Levine, S. Conservative q-learning for offline reinforcement learning. arXiv preprint arXiv:2006.04779, 2020.
272
+ [34] Kumar, A., Agarwal, R., Ghosh, D., and Levine, S. Implicit under-parameterization inhibits data-efficient deep reinforcement learning. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=09bnihsFfXU.
273
+ [35] Kumar, A., Hong, J., Singh, A., and Levine, S. Should i run offline reinforcement learning or behavioral cloning? In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=AP1MKT37rJ.
274
+ [36] Lange, S., Gabel, T., and Riedmiller, M. Batch reinforcement learning. In Reinforcement learning, pp. 45-73. Springer, 2012.
275
+ [37] Laroche, R., Trichelair, P., and Combes, R. T. d. Safe policy improvement with baseline bootstrapping. arXiv preprint arXiv:1712.06924, 2017.
276
+ [38] Lee, B.-J., Lee, J., and Kim, K.-E. Representation balancing offline model-based reinforcement learning. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=QpNz8r_Ri2Y.
277
+ [39] Levine, S., Kumar, A., Tucker, G., and Fu, J. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020.
278
+ [40] Lillicrap, T. P., Hunt, J. J., Pritzel, A., Heess, N., Erez, T., Tassa, Y., Silver, D., and Wierstra, D. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.
279
+ [41] Nachum, O., Dai, B., Kostrikov, I., Chow, Y., Li, L., and Schuurmans, D. Algaedice: Policy gradient from arbitrary experience. arXiv preprint arXiv:1912.02074, 2019.
280
+ [42] Nair, A., Dalal, M., Gupta, A., and Levine, S. Accelerating online reinforcement learning with offline datasets. CoRR, abs/2006.09359, 2020. URL https://arxiv.org/abs/2006.09359.
281
+
282
+ [43] Nair, A., Dalal, M., Gupta, A., and Levine, S. Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020.
283
+ [44] Peng, X. B., Kumar, A., Zhang, G., and Levine, S. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019.
284
+ [45] Rafailov, R., Yu, T., Rajeswaran, A., and Finn, C. Offline reinforcement learning from images with latent space models. Learning for Decision Making and Control (L4DC), 2021.
285
+ [46] Rashidinejad, P., Zhu, B., Ma, C., Jiao, J., and Russell, S. Bridging offline reinforcement learning and imitation learning: A tale of pessimism. arXiv preprint arXiv:2103.12021, 2021.
286
+ [47] RezaEIFar, S., Dadashi, R., Vieillard, N., Hussenot, L., Bachem, O., Pietquin, O., and Geist, M. Offline reinforcement learning as anti-exploration. arXiv preprint arXiv:2106.06431, 2021.
287
+ [48] Schulman, J., Levine, S., Abbeel, P., Jordan, M., and Moritz, P. Trust region policy optimization. In International conference on machine learning, pp. 1889-1897, 2015.
288
+ [49] Siegel, N. Y., Springenberg, J. T., Berkenkamp, F., Abdelmaleki, A., Neunert, M., Lampe, T., Hafner, R., and Riedmiller, M. Keep doing what worked: Behavioral modelling priors for offline reinforcement learning. arXiv preprint arXiv:2002.08396, 2020.
289
+ [50] Singh, A., Yu, A., Yang, J., Zhang, J., Kumar, A., and Levine, S. Cog: Connecting new skills to past experience with offline reinforcement learning. arXiv preprint arXiv:2010.14500, 2020.
290
+ [51] Swazinna, P., Udluft, S., and Runkler, T. Overcoming model bias for robust offline deep reinforcement learning. arXiv preprint arXiv:2008.05533, 2020.
291
+ [52] Wang, Z., Novikov, A., Zołna, K., Springenberg, J. T., Reed, S., Shahriari, B., Siegel, N., Merel, J., Gulcehre, C., Heess, N., et al. Critic regularized regression. arXiv preprint arXiv:2006.15134, 2020.
292
+ [53] Wu, Y., Tucker, G., and Nachum, O. Behavior regularized offline reinforcement learning. arXiv preprint arXiv:1911.11361, 2019.
293
+ [54] Xie, T., Cheng, C.-A., Jiang, N., Mineiro, P., and Agarwal, A. Bellman-consistent pessimism for offline reinforcement learning. Advances in neural information processing systems, 34, 2021.
294
+ [55] Yarats, D., Brandfonbrener, D., Liu, H., Laskin, M., Abbeel, P., Lazaric, A., and Pinto, L. Don't change the algorithm, change the data: Exploratory data for offline reinforcement learning. arXiv preprint arXiv:2201.13425, 2022.
295
+ [56] Yu, T., Thomas, G., Yu, L., Ermon, S., Zou, J., Levine, S., Finn, C., and Ma, T. Mopo: Model-based offline policy optimization. arXiv preprint arXiv:2005.13239, 2020.
296
+ [57] Yu, T., Kumar, A., Rafailov, R., Rajeswaran, A., Levine, S., and Finn, C. Combo: Conservative offline model-based policy optimization. arXiv preprint arXiv:2102.08363, 2021.
redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:033b6427e45ab0543c860495a39a36e4d4490bd39e659c2d42d0f3559acd5f81
3
+ size 413021
redsofflinerlwithheteroskedasticdatasetsviasupportconstraints/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58f795b57c43079cbeb2d7ee41e0ca9d95b391fd78d281deefc09ee74d905abf
3
+ size 501583
reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/a4158dee-af5b-4014-9608-b144bb9ff1be_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5704a9b35aa0c2b7e53fd09031aee1950648a79aba30271250998d59ff1ff52a
3
+ size 143602
reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/a4158dee-af5b-4014-9608-b144bb9ff1be_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f6246422b1dc39cc2a3c64415f60d09c25357c728956599b0668986eb3c4d5c
3
+ size 171013
reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/a4158dee-af5b-4014-9608-b144bb9ff1be_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8a7deadac6379683f384201f002d4ef92db37dbf6d7f4df56bb49c8317c6b4b
3
+ size 7402339
reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/full.md ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RePo: Resilient Model-Based Reinforcement Learning by Regularizing Posterior Predictability
2
+
3
+ Chuning Zhu
4
+
5
+ University of Washington
6
+
7
+ Seattle, WA 98105
8
+
9
+ zchuning@cs.washington.edu
10
+
11
+ Max Simchowitz
12
+
13
+ Massachusetts Institute of Technology
14
+
15
+ Boston, MA 02139
16
+
17
+ msimchow@mit.edu
18
+
19
+ Siri Gadipudi
20
+
21
+ University of Washington
22
+
23
+ Seattle, WA 98105
24
+
25
+ sg06@uw.edu
26
+
27
+ Abhishek Gupta
28
+
29
+ University of Washington
30
+
31
+ Seattle, WA 98105
32
+
33
+ abhgupta@cs.washington.edu
34
+
35
+ # Abstract
36
+
37
+ Visual model-based RL methods typically encode image observations into low-dimensional representations in a manner that does not eliminate redundant information. This leaves them susceptible to spurious variations – changes in task-irrelevant components such as background distractors or lighting conditions. In this paper, we propose a visual model-based RL method that learns a latent representation resilient to such spurious variations. Our training objective encourages the representation to be maximally predictive of dynamics and reward, while constraining the information flow from the observation to the latent representation. We demonstrate that this objective significantly bolsters the resilience of visual model-based RL methods to visual distractors, allowing them to operate in dynamic environments. We then show that while the learned encoder is resilient to spurious variations, it is not invariant under significant distribution shift. To address this, we propose a simple reward-free alignment procedure that enables test time adaptation of the encoder. This allows for quick adaptation to widely differing environments without having to relearn the dynamics and policy. Our effort is a step towards making model-based RL a practical and useful tool for dynamic, diverse domains. We show its effectiveness in simulation benchmarks with significant spurious variations as well as a real-world egocentric navigation task with noisy TVs in the background.
38
+
39
+ Videos and code: https://zchuning.github.io/repo-website/.
40
+
41
+ # 1 Introduction
42
+
43
+ Consider the difference between training a single robot arm against a plain background with reinforcement learning (RL), and learning to operate the same arm amidst of plentiful dynamic distractors - uncontrollable elements such as changing lighting and disturbances in the scene. The latter must contend with spurious variations - differences in environments which are irrelevant for the task but potentially confusing for a vision-based RL agent - resilience to which is indispensable for truly versatile embodied agents deployed in real world settings.
44
+
45
+ Standard end-to-end techniques for visual RL struggle in the presence of spurious variations [64, 48], in part because they fail to discard task-irrelevant elements. To improve generalization [38, 59], self-supervised representation learning methods [23, 39, 55, 54, 17, 31] pre-train visual encoders that compress visual observations. These methods aim for lossless compression of how image observations evolve in time (e.g. by minimizing reconstruction error). Unaware of the demands of downstream
46
+
47
+ tasks, these methods also cannot determine which elements of an environment can be discarded. As such, they often struggle in dynamic and diverse scenes [64, 48, 17] - ones where significant portions of the observations are both unpredictable and irrelevant - despite being remarkably successful in static domains.
48
+
49
+ This paper proposes Resilient Model-Based RL by Regularizing Posterior Predictability (RePo) – an algorithm for learning lossy latent representations resilient to spurious variations. A representation is satisfactory if it (a) predicts its own dynamics and (b) accurately predicts the reward. To satisfy these criteria, RePo jointly learns (i) a visual encoder mapping high-dimensional observations to intermediate image “encodings” (ii) a latent encoder which compresses histories of intermediate image encodings into compressed latent representations (iii) a dynamics model in the latent representation space, and (iv) a reward predictor to most accurately predict current and future rewards. What distinguishes us from past work [63, 12, 17] is a new desideratum of predictability: that, conditioned on past latents and actions, future latent dynamics should look as deterministic as possible. This is because an agent should try to maximize its control over task-relevant parts of the state, whilst neglecting aspects of the environment that it cannot influence [20, 60]. RePo optimizes a novel loss which encourages predictability, thereby discarding a broad range of spurious variations in aspects of the environment
50
+
51
+ which are out of the agents control (e.g. changes in background, lighting, or visual traffic in the background). At the same time, by penalizing reward prediction error, we capture the task-relevant aspects of the dynamics necessary for learning performant policies.
52
+
53
+ ![](images/4be0da7197f82e8fa0c2c387e1ae928a70c2137ad7853dc69297a2e721398c1a.jpg)
54
+ Figure 1: Reinforcement learning in environments with spurious variations - including dynamic elements like humans, changes in lighting and training across a range of visual appearances.
55
+
56
+ RePo implements a deceptively simple modification to recurrent state-space models for model-based RL [17, 59, 46]. We maximize mutual information (MI) between the current representation and all future rewards, while minimizing the mutual information between the representation and observation. Instead of minimizing image reconstruction error, we optimize a variational lower bound on the MI-objective which tractably enforces that the learned observation encoder, latent dynamics and reward predictors are highly informative of reward, while ensuring latents are as predictable as possible (in the sense described above). We demonstrate that the representations, and the policies built thereupon, learned through RePo succeed in environments with significant amounts of dynamic and uncontrollable distractors, as well as across domains with significant amounts of variability and complexity. Through ablations, we also validate the necessity of our careful algorithm design and optimization decisions.
57
+
58
+ While these learned representations enable more effective reinforcement learning in dynamic, complex environments, the visual encoders (point (i) above) mapping from observations into intermediate encodings suffer from distribution shift in new environments with novel visual features (e.g. a new background not seen at train time.) We propose a simple test-time adaptation scheme which uses (mostly) unlabeled test-time data to adapt the visual encoders only, whilst keeping all other aspects of the RePo model fixed. Because RePo ensures resilience of the compressed latent representation at training time, modifying only the test-time visual encoders to match training time representations allows representations to recover optimal performance with only minor amounts of adaptation.
59
+
60
+ Concretely, the key contributions of this work are: (1) We propose a simple representation learning algorithm RePo for learning representations that are informative of rewards, while being as predictable as possible. This allows model-based RL to scale to dynamic, cluttered environments, avoiding reconstruction. (2) We show that while the learned encoders may be susceptible to distribution shift, they are amenable to a simple test-time adaptation scheme that can allow for quick adaptation in new environments. (3) We demonstrate the efficacy of RePo on a number of simulation and real-world domains with dynamic and diverse environments.
61
+
62
+ # 2 Related Work
63
+
64
+ Our work is related to a number of techniques for visual model-based reinforcement learning, but differs in crucial elements that allow it to scale to dynamic environments with spurious variations.
65
+
66
+ Model-Based RL. Though model-based RL began with low-dimensional, compact state spaces [26, 37, 27, 57], advances in visual model-based reinforcement learning [17, 19, 18, 44, 42, 21] learn latent representations and dynamics models from high dimensional visual feedback (typically via recurrent state-space models). Perhaps most relevant to RePo is DREAMER [17]. Section 4 explains the salient differences between DREAMER and RePo; notably, we eschew a reconstruction loss in pursuit of resilience to spurious variations. A closely related work is TD-MPC [22], which learns a task-oriented latent representation by predicting the value function. However, its representation may not discard irrelevant information and necessarily contains information about the policy.
67
+
68
+ Representation Learning for Control. There is a plethora of techniques for pretraining visual representations using unsupervised learning objectives [38, 34, 30, 32, 41, 49, 47, 13]. While these can be effective on certain domains, they do not take downstream tasks into account. Task-relevant representation learning for RL uses the reward function to guide representation learning, typically in pursuit of value-equivalence (e.g. via bisimulation) [63, 8, 62, 12, 50, 22]. However, these approaches do little to explicitly counteract spurious variations. Our work aligns with a line of work that disentangles task-relevant and task-irrelevant components of the MDP. [7, 6] obtain provable guarantees for representation learning with exogenous distractors - parts of the state space whose dynamics is independent of the agent's actions. [56] introduces a more granular decomposition of the MDP across the task relevance and controllability axes. Our work, in contrast, does not impose a specific form on the spurious variations.
69
+
70
+ Domain Adaptation. Unsupervised domain adaptation adapts representations across visually different source and target domains [66, 58, 45, 11, 25]. These techniques predominantly adapt visual encoders by minimizing a distribution measure across source and training distributions, such as MMD [5, 33, 53, 24], KL divergence [67, 35] or Jensen-Shannon divergence [11, 52]. In [61], distribution matching was extended to sequential decision making. While domain adaptation settings typically assume that the source and target share an underlying marginal or joint distribution in a latent space, this assumption does not hold in online RL because the data is being collected incrementally through exploration, and hence the marginals may not match. Hence, our test-time adaptation technique, as outlined in Section 4.1, introduces a novel support matching objective that enforces the test distribution to be in support of the train distribution, without trying to make the distributions identical.
71
+
72
+ # 3 Preliminaries
73
+
74
+ MDPs. A (discounted) MDP $\mathcal{M} = (\mathcal{S},\mathcal{A},\gamma ,P,P_0,r)$ consists of a state-space $\mathcal{S}$ , action space $\mathcal{A}$ , discount factor, $\gamma \in (0,1)$ , transition and $P(\cdot ,\cdot):S\times \mathcal{A}\to \triangle (\mathcal{S})$ , initial state distribution $P_{0}\in \triangle (\mathcal{S})$ , and reward function $r(\cdot ,\cdot):S\times \mathcal{A}\rightarrow [0,1]$ (assumed deterministic for simplicity). A policy $\pi :S\to \triangle (\mathcal{A})$ is a mapping from states to distributions over actions. We let $\mathbb{E}_{\mathcal{M}}^{\pi}$ denote expectations under $s_0\sim P_0$ , $a_{t}\sim \pi (s_{t})$ , and $s_{t + 1}\sim P(s_t,a_t)$ ; the value is $V_{\mathcal{M}}^{\pi}(s)\coloneqq \mathbb{E}_{\mathcal{M}}^{\pi}\left[\sum_{t = 0}^{\infty}\gamma^{h}r(s_{t},a_{t})\mid s_{0} = s\right]$ , and $V_{\mathcal{M}}^{\pi} = \mathbb{E}_{s_{0}\sim P_{0}}[V_{\mathcal{M}}^{\pi}(s_{0})]$ . The goal is to learn a policy $\pi$ that maximizes the sum of expected returns $\mathbb{E}_{\mathcal{M}}^{\pi}\left[\sum_{t = 0}^{\infty}\gamma^{h}r(s_{t},a_{t})\mid s_{0} = s\right]$ , as in most RL problems, but we do so based on a belief state as explained below.
75
+
76
+ Visual RL and Representations. For our purposes, we take states $s_t$ to be visual observations $s_t \equiv o_t \in \mathcal{O}$ ; for simplicity, we avoid explicitly describing a POMDP formulation - this can be subsumed either by introducing a belief-state [68], or by assuming that images (or sequences thereof, e.g. to estimate velocities) are sufficient to determine rewards and transitions [36]. The states $o_t$ may be high-dimensional, so we learn encoders $h: \mathcal{O} \to \mathcal{X}$ to an encoding space $\mathcal{X}$ . We compress these encodings $x_t$ further into latent states $z_t$ , described at length in our method in Section 4.
77
+
78
+ Spurious variation. By spurious variation, we informally mean the presence of features of the states $s_t$ which are irrelevant to our task, but which do vary across trajectories. These can take the form of explicit distractors - either static objects (e.g. background wall-paper) or dynamic processes (e.g. video coming from a television) that do not affect the part of the state space involved in our task [7, 6]. Spurious variation can also encompass processes which are not so easy to disentangle with the state: for example, lighting conditions will affect all observations, and hence will affect the appearance of transition dynamics.
79
+
80
+ Consider the following canonical example: an MDP with state space $S_1 \times S_2$ , where for $s = (s^{(1)}, s^{(2)}) \in S_1 \times S_2$ , the reward $r(s, a)$ is a function $\bar{r}(s^{(1)}, a)$ only of the projection onto $S^1$ . Moreover, suppose that $\mathbb{P}[(s^+)^{(1)} \in \cdot \mid s, a]$ , where $s^+ \sim P(s, a)$ , is a distribution $\bar{P}(s^{(1)}, a)$ again only depending on $s^{(1)}$ . Then, the states $s^{(2)}$ can be viewed as spuriously various. For example, if
81
+
82
+ ![](images/a4dcfeb3a90e255b00e011360122bd8da6c3f57260da0513b313b197470cd397.jpg)
83
+ Figure 2: RePo learns a latent representation resilient to spurious variations by predicting the dynamics and the reward while constraining the information flow from images.
84
+
85
+ ![](images/432b13d723417c389e8860a06f4aaa93c29818382352f1e78e4789f3ddde64f3.jpg)
86
+
87
+ $s^{(1)}$ is a Lagrangian state and $s^{(2)}$ is a static background, then it is clear that transitions of Lagrangian state and reward do not depend on $s^{(2)}$ . Our template also encompasses dynamic distractors; e.g. a television show in the background has its own dynamics, and these also do not affect reward or physical dynamics. Even varying lighting conditions can be encompassed in this framework: the shadows in a scene or brightness of the environment should not affect reward or physics, even though these visual features themselves evolve dynamically in response to actions and changes in state. That is, there are examples of spurious variation where $s^{(1)}$ (e.g. Lagrangian state) affect $s^{(2)}$ (e.g. certain visual features), but not the other way round. In all cases, "spurious" implies that states $(s_t^{(2)})_{t\geq 0}$ and their possible variations due to different environments, have no bearing on optimal actions.
88
+
89
+ # 4 RePo: Parsimonious Representation Learning without Reconstruction
90
+
91
+ We propose a simple technique for learning task-relevant representations that encourages parsimony by removing all information that is neither pertinent to the reward nor the dynamics. Such representations discard information about spurious variations, while retaining the information actually needed for decision making.
92
+
93
+ To describe our method formally, we introduce some notation (which is also shown in Fig 2). Let $\mathcal{O}$ be the space of image observations, $\mathcal{X}$ the space of encoded observations, where $h: \mathcal{O} \to \mathcal{X}$ represents the encoding function from images observations to encoded observations, and $\mathcal{Z}$ the space of latent representations. Note that $x_{t+1}$ is simply the instantaneous encoding of the image $o_{t+1}$ as $x_{t+1} = h(o_{t+1})$ , but the latent representation $z_{t+1}$ at time step $t+1$ is an aggregation of the current encoding $x_{t+1}$ and previous latent $z_t$ and action $a_t$ . Let $\mathcal{P}_{\mathrm{post}}$ denote the space of "posteriors" on latent dynamics $z$ of the form $p(z_{t+1} \in \cdot \mid z_t, a_t, x_{t+1})$ , where $z_t, z_{t+1} \in \mathcal{Z}, a_t \in \mathcal{A}, x_{t+1} \in \mathcal{X}$ , and where and $z_0 \sim p_0$ has some initial distribution $p_0$ . In words, the latent posterior use past latent state and action, in addition to current encoding to determine current latent. Control policies and learned dynamics models act on this latent representation $z_{t+1}$ , and not simply the image encoding $x_{t+1}$ so as to incorporate historical information.
94
+
95
+ Let $\mathcal{D}_{\mathrm{buf}}$ denote the distribution over experienced actions, observations and rewards from the environment $((a_{1:T},o_{1:T},r_{1:T})\sim \mathcal{D}_{\mathrm{buf}})$ . For $p\in \mathcal{P}_{\mathrm{post}}$ , let $\mathbb{E}_{p,h}$ denote expectation of $(a_{1:T},o_{1:T},r_{1:T})\sim \mathcal{D}_{\mathrm{buf}}$ , $x_{t} = h(o_{t})$ and the latents $z_{t + 1}\sim p(\cdot \mid z_t,a_t,x_{t + 1})$ drawn from the latent posterior, with the initial latent $z_0\sim p_0$ . Our starting proposal is to optimize the latent posterior $p$ and image encoder $h$ such that information between the latent representation and future reward is maximized, while bottlenecking [1] the information between the latent and the observation:
96
+
97
+ $$
98
+ \max _ {p, h} \mathrm {I} _ {p, h} \left(z _ {1: T}; r _ {1: T} \mid a _ {1: T}\right) \text {s . t .} \mathrm {I} _ {p, h} \left(z _ {1: T}; o _ {1: T} \mid a _ {1: T}\right) < \epsilon . \tag {4.1}
99
+ $$
100
+
101
+ Above, $\mathrm{I}_{p,h}(z_{1:T};r_{1:T}\mid a_{1:T})$ denotes mutual information between latents and rewards conditioned actions under the $\mathbb{E}_{p,h}$ distribution, and distribution $\mathrm{I}_{p,h}(z_{1:T};o_{1:T}\mid a_{1:T})$ measures information
102
+
103
+ between latents and observations under $\mathbb{E}_{p,h}$ as well. Thus, (4.1) aims to preserve large mutual information with rewards whilst minimizing information stored from observations.
104
+
105
+ Optimizing mutual information is intractable in general, so we propose two variational relaxations of both objects (proven in Appendix B)
106
+
107
+ $$
108
+ \mathrm {I} _ {p, h} \left(z _ {1: T}; r _ {1: T} \mid a _ {1: T}\right) \geq \mathbb {E} _ {p, h} \left[ \sum_ {t = 1} ^ {T} \log q _ {\mathrm {r}} \left(r _ {t} \mid z _ {t}\right) \right] \tag {4.2}
109
+ $$
110
+
111
+ $$
112
+ \mathrm {I} _ {p, h} \left(z _ {1: T}; o _ {1: T} \mid a _ {1: T}\right) \leq \mathbb {E} _ {p, h} \left[ \sum_ {t = 0} ^ {T - 1} \mathrm {D} _ {\mathrm {K L}} \left(p (\cdot \mid z _ {t}, a _ {t}, x _ {t + 1}) \| q _ {\mathrm {z}} (\cdot \mid z _ {t}, a _ {t})\right) \right], \tag {4.3}
113
+ $$
114
+
115
+ where $q_{\mathrm{r}}$ and $q_{\mathrm{z}}$ are variational families representing beliefs over rewards $r_t$ and latent representations $z_{t+1}$ , respectively. We refer to $z_{t+1} \sim p(\cdot \mid z_t, a_t, x_{t+1})$ as the latent posterior, because it conditions on the latest encoded observation $x_{t+1} = h(o_{t+1})$ . We call the variational approximation $q_{\mathrm{z}}(\cdot \mid z_t, a_t)$ the latent prior because it does not use the current observation $o_{t+1}$ (or it's encoding $x_{t+1}$ ) to determine $z_{t+1}$ . Note that the right hand side of Eq. (4.3) depends on $h$ through $x_{t+1} = h(o_{t+1})$ , and thus gradients of this expression incorporate gradients through $h$ .
116
+
117
+ The magic of Eq. (4.3). The upper bound in (4.3) reveals a striking feature which is at the core of our method: that, in order to reduce extraneous information in the latents $z_{t}$ about observations $o_{t}$ , it is enough to match the latent posterior $z_{t + 1} \sim p(\cdot \mid z_{t},a_{t},x_{t + 1})$ to our latent prior $q_{z}(\cdot \mid z_{t},a_{t})$ that does not condition on current $x_{t + 1}$ . Elements that are spurious variations can be captured by $p(\cdot \mid z_{t},a_{t},x_{t + 1})$ , but not by $q_{z}(\cdot \mid z_{t},a_{t})$ , since $q_{z}$ is not informed by the latest observation encoding $x_{t + 1}$ , and spurious variations are not predictable. To match the latent posterior and the latent prior, the latent representation must omit these spurious variations. For example, in an environment with a TV in the background, removing the TV images reduces next-step stochasticity of the environment. Thus, (4.3) encourages representations to omit television images.
118
+
119
+ The relaxed bottleneck. The above discussion may make it seem as if we suffer in the presence of task-relevant stochasticity. However, by replacing the terms in Eq. (4.1) with their relaxations in Eqs. (4.2) and (4.3), we only omit the stochasticity that is not useful for reward-prediction. We make these substitutions, and move to a penalty-formulation amenable to constrained optimization methods like dual-gradient descent [2]. The resulting objective we optimize to learn the latent posterior $p$ , latent prior $q_{\mathrm{z}}$ , reward predictor $q_{\mathrm{r}}$ and observation encoder $h$ jointly is:
120
+
121
+ $$
122
+ \max _ {p, q _ {\mathrm {r}}, q _ {\mathrm {z}}, h} \min _ {\beta} \mathbb {E} _ {p, h} \left[ \sum_ {t = 1} ^ {T} \log q _ {\mathrm {r}} \left(r _ {t} \mid z _ {t}\right) \right] + \beta \left(\mathbb {E} _ {p, h} \left[ \sum_ {t = 0} ^ {T - 1} \mathrm {D} _ {\mathrm {K L}} \left(p (\cdot \mid z _ {t}, a _ {t}, x _ {t + 1}) \| q _ {\mathrm {z}} (\cdot \mid z _ {t}, a _ {t})\right) \right] - \epsilon\right). \tag {4.4}
123
+ $$
124
+
125
+ Implementation details. We parameterize $p$ and $q$ using a recurrent state-space model (RSSM) [17]. The RSSM consists of an encoder $h_{\theta}(x_t \mid o_t)$ , a latent dynamics model $q_{\theta}(z_{t+1} \mid z_t, a_t)$ corresponding to the prior, a representation model $p_{\theta}(z_{t+1} \mid z_t, a_t, x_{t+1})$ corresponding to the posterior, and a reward predictor $q_{\theta}(r_t \mid z_t)$ . We optimize (4.4) using dual gradient descent. In addition, we use the KL balancing technique introduced in Dreamer V2 [19] to balance the learning of the prior and the posterior. Concretely, we compute the KL divergence in Eq. (4.4) as $\mathrm{D}_{\mathrm{KL}}(p \parallel q) = \alpha \mathrm{D}_{\mathrm{KL}}(\lfloor p \rfloor \parallel q) + (1 - \alpha) \mathrm{D}_{\mathrm{KL}}(p \parallel \lfloor q \rfloor)$ , where $\lfloor \cdot \rfloor$ denotes the stop gradient operator and $\alpha \in [0,1]$ is the balancing parameter. With the removal of reconstruction, the KL balancing parameters become especially important as shown by our ablation in Sec. 5.
126
+
127
+ Policy learning As is common in the literature on model-based reinforcement learning [19, 17, 18], our training procedure alternates between (1) Representation Learning: learning a representation $z$ by solving the optimization problem outlined in Eq. (4.4) to infer a latent posterior $p(z_{t+1} \mid z_t, a_t, x_{t+1})$ , a latent prior $q_z(z_{t+1} \mid z_t, a_t)$ , an encoder $x_t = h(o_t)$ and a reward predictor $q_r(r_t \mid z_t)$ , and (2) Policy Learning: using the inferred representation, dynamics model and reward predictor to learn a policy $\pi_\phi(a_t \mid z_t)$ for control. With the latent representation and dynamics model, we perform actor-critic policy learning [16, 10] by rolling out trajectories in the latent space. The critic $V_\psi(z)$ is trained to predict the discounted cumulative reward given a latent state, and the actor $\pi_\phi(a \mid z)$ is trained to take the action that maximizes the critic's prediction. While policy learning is carried out entirely using the latent prior as the dynamics model, during policy execution (referred to as inference in Fig. 2), we infer the posterior distribution $p(z_{t+1} \mid z_t, a_t, x_{t+1})$ over latent representations from the current observation, and use this to condition the policy acting in the world. We refer readers to Appendix C for further details.
128
+
129
+ ![](images/f4e426e9a31006cf7eae75f02c879bc6da6b781544b97640d9f49eb729c5268d.jpg)
130
+ Figure 3: Depiction of test-time adaptation scheme for latent alignment via support constraints. During exploration, the marginal distributions may not match perfectly, so we match the supports of the latent features instead, using a reweighted distribution constraint.
131
+
132
+ Comparison to DREAMER, DEEPMDP, and BISIMULATION. DREAMER [17] was first derived to optimize pixel-reconstruction, leading to high-fidelity dynamics but susceptibility to spurious variations. Naively removing pixel reconstruction from dreamer, however, leads to poor performance [17]. Our objective can be interpreted as modifying DREAMER so as to maintain sufficiently accurate dynamics, but without the fragility of pixel-reconstruction. DEEPMDP [12] sets the latents $z_{t}$ to exactly the image encodings $x_{t} = h(o_{t})$ . It learns a dynamics $\bar{P}: \mathcal{X} \times \mathcal{A} \to \triangle(\mathcal{X})$ such that the distribution $\bar{x}_{t+1} \sim \bar{P}(h(o_{t}), a_{t})$ is close to $x_{t+1} \sim h(o_{t+1})$ , $o_{t+1} \sim P^{\star}(o_{t}, a_{t})$ , where $P^{\star}$ denotes a ground-truth transition dynamics; this enforces consistency of dynamics under encoding. The above distributions are viewed as conditional on past observation and action, and as a result, highly non-parsimonious representations such as the identity are valid under this objective. BISIMULATION [63] learns an optimal representation in the sense that a perfect bisimulation metric does not discard any relevant information about an MDP. However, there is no guarantee that it will disregard irrelevant information. Indeed, the identity mapping induces a trivial bisimulation metric. Hence, BISIMULATION compress only by reducing the dimensionality of the latent space. In contrast, we further compress the encodings $x_{t}$ into latents $z_{t}$ so as to enforce the latent prior $q_{z}(\cdot | a_{t}, z_{t})$ is close to the latest observation-dependent posterior distribution $p(\cdot | z_{t}, a_{t}, x_{t+1})$ . As mentioned in Eq. (4.3), this ensures information compression and invalidates degenerate representations such as the identity mapping.
133
+
134
+ # 4.1 Transferring Invariant Latent Representations via Test-Time Adaptation
135
+
136
+ While resilient to spurious variations seen during training, our learned latents $z_{t}$ - and hence the policies which depend on them - may not generalize to new environment which exhibit systematic distribution shift, e.g. lighting changes or background changes. The main source of degradation is that encoder $h: \mathcal{O} \to \mathcal{X}$ may observe images that it has not seen at train time; thus the latent, which depend on observations through $x_{t} = h(o_{t})$ , may behave erratically, even when system dynamics remain unchanged.
137
+
138
+ Relying on the resilience of our posteriors $p$ over latents $z_{t}$ introduced by RePo, we propose a test-time adaption strategy to only adjust the encoder $h$ to the new environment, whilst leaving $p$ fixed. A natural approach is to apply unsupervised domain adaptation methods [66, 58] to adapt the visual encoder $h$ to $h_{\mathrm{test}}$ . These domain adaptation techniques typically operate in supervised learning settings, and impose distributional constraints between source and target domains [61, 25], where the distributions of training and test data are stationary and assumed to be the same in some feature space. A distribution matching constraint would be:
139
+
140
+ $$
141
+ \min _ {h _ {\text {t e s t}} (\cdot)} \mathrm {D} \left(\mathcal {P} _ {\text {t r a i n}} \| \mathcal {P} _ {\text {t e s t}}\right) \text {s . t .} \mathcal {P} _ {\text {t e s t}} = h _ {\text {t e s t}} \circ \mathcal {D} _ {\text {t e s t}}, \mathcal {P} _ {\text {t r a i n}} = h \circ \mathcal {D} _ {\text {t r a i n}}. \tag {4.5}
142
+ $$
143
+
144
+ In Eq. (4.5), we consider matching the distributions over encodings $x$ of observations $o$ . Specifically, we assume $\mathcal{D}_{\mathrm{train}}$ and $\mathcal{D}_{\mathrm{test}}$ denote training and test-buffer distributions over observations $o$ , $\mathcal{P}_{\mathrm{train}} = h_{\mathrm{train}} \circ \mathcal{D}_{\mathrm{train}}$ denotes the distribution of $x = h_{\mathrm{train}}(o)$ where $o \sim \mathcal{D}_{\mathrm{train}}$ is encoded by the train-time
145
+
146
+ ![](images/0c6c5187323988ace5a2a802cbf3ac30b5c36e8ba4e9218d374e1d300bf9af5c.jpg)
147
+ Distracted DeepMind Control
148
+
149
+ ![](images/64715d7ed03ad37da791d89e516ba4804f76b8b98040006b3b98e801db44d56f.jpg)
150
+
151
+ ![](images/d46e64b755a50860183df55a81b1691a48d9a530a1d61bec624ef9022c4f8c48.jpg)
152
+ Figure 4: Depiction of the environments being used for evaluation. (Left): the Distracted DeepMind Control suite [64], (Top Right): Maniskill2 [15] environments with realistic backgrounds from Matterport [3]. (Bottom Right): TurtleBot environment with two TVs playing random videos in the background.
153
+
154
+ ![](images/d8c483e681abc109c87c4cb372097768b6fc1a9d4974457e42aaf5bf4adadf6f.jpg)
155
+
156
+ ![](images/ec7f4d0c47d7e12e9a625f5c48db4edef1fad15c644c146f03c5c1802ae40906.jpg)
157
+ Realistic Maniskill
158
+
159
+ ![](images/1e56b3142b20e1eb04a011d13d6397fc35deb8463386682176e1d9cdbb6db064.jpg)
160
+
161
+ ![](images/cc690f9af3d007a44e4c13f5b6668db5147773139500f4b82bdbc13b91b9c748.jpg)
162
+ Lazy TurtleBot
163
+
164
+ ![](images/cdac0f630686e578258b38bfb534d1e2c4c4ef44dac049d6707f042cf2572b08.jpg)
165
+
166
+ encoder $h_{\mathrm{train}}$ , and $\mathcal{P}_{\mathrm{test}} = h_{\mathrm{test}} \circ \mathcal{D}_{\mathrm{test}}$ denotes encodings under a test-time encoder $h_{\mathrm{test}}(\cdot)$ over which we optimize. Here, $\mathrm{D}(\cdot, \cdot)$ denotes an $f$ -divergence, such as the $\chi^2$ -divergence.
167
+
168
+ Support Constraint. (4.5) fails to capture that the encoded distributions at train and test time differ at the start of our adaption phase: suboptimal encoder performance at the start of the adaptation phase causes the policy to visit sub-optimal regions of state space not seen at train time. Thus, it may be impossible to match the distribution as in standard unsupervised domain adaptation. We therefore propose to replace (4.5) with a support constraint, enforcing that the distribution of $h_{\mathrm{test}} \circ \mathcal{D}_{\mathrm{test}}$ is contained in the support of $h_{\mathrm{train}} \circ \mathcal{D}_{\mathrm{train}}$ . We consider the following idealized objective:
169
+
170
+ $$
171
+ \min _ {\tau (\cdot) \geq 0, h _ {\text {t e s t}} (\cdot)} \mathrm {D} \left(\tau \cdot \mathcal {P} _ {\text {t r a i n}} \| \mathcal {P} _ {\text {t e s t}}\right) \text {s . t .} \mathbb {E} _ {x \sim \mathcal {P} _ {\text {t r a i n}}} [ \tau (x) ] = 1. \tag {4.6}
172
+ $$
173
+
174
+ Here, by $\tau \cdot \mathcal{P}_{\mathrm{train}}$ , we mean the re-weighted density of $\mathcal{P}_{\mathrm{train}} = h_{\mathrm{train}} \circ \mathcal{D}_{\mathrm{train}}$ by a function $\tau(x)$ . The constraints $\mathbb{E}_{\mathcal{P}_{\mathrm{train}}}[\tau(x)] = 1$ and $\tau(\cdot) \geq 0$ ensures this reweighted distribution is also a valid probability distribution. The reweighting operation $\tau \cdot \mathcal{P}_{\mathrm{train}}$ seems intractable at first, but we show that if we take $\mathrm{D}(\cdot, \cdot) = \chi^2(\cdot, \cdot)$ to be the $\chi^2$ divergence, then Eq. (4.6) admits the following tractable Lagrangian formulation (we refer readers to [65] and Appendix B for a thorough derivation)
175
+
176
+ $$
177
+ \min _ {\tau (\cdot) \geq 0, h _ {\text {t e s t}} (\cdot) f (\cdot), \lambda} \max _ {f (\cdot), \lambda} \mathbb {E} _ {\mathcal {P} _ {\text {t r a i n}}} [ \tau (x) \cdot f (x) ] - \mathbb {E} _ {\mathcal {P} _ {\text {t e s t}}} \left[ f (x) + \frac {1}{4} f (x) ^ {2} \right] + \lambda \left(\mathbb {E} _ {\mathcal {P} _ {\text {t r a i n}}} [ \tau (x) ] - 1\right), \tag {4.7}
178
+ $$
179
+
180
+ where above, $\lambda \in \mathbb{R}$ , $f: \mathcal{X} \to \mathbb{R}$ , and the objective depends on $h_{\mathrm{test}}$ through the definition $\mathcal{P}_{\mathrm{test}} = h_{\mathrm{test}} \circ \mathcal{D}_{\mathrm{test}}$ . This objective is now a tractable saddle point optimization, which can be solved with standard stochastic optimization techniques. The optimization alternates between optimizing the reweighting $\tau$ and the visual encoder $h_{\mathrm{test}}$ , and the dual variables $f, \lambda$ . Throughout adaptation, we freeze all other parts of the recurrent state space model and only optimize the encoder. We provide more intuition for the support constraint in Appendix E.
181
+
182
+ Calibration. We note that naively reweighting by $\tau(\cdot)$ can cause degenerate encodings that collapse into one point. To prevent this, we regularize the support constraint by also ensuring that some set of paired "calibration" states across training and testing domains share the same encoding. We collect paired trajectories in the training and testing domains using actions generated by an exploration policy, and minimize the $\ell_2$ loss between the training and testing encoding of each pair of observations. We defer the details of the complete optimization to Appendix C.
183
+
184
+ # 5 Experimental Evaluation
185
+
186
+ We conduct empirical experiments to answer the following research questions: (1) Does RePo enable learning in dynamic, distracted environments with spurious variations? (2) Do representations learned by RePo quickly adapt to new environments with test time adaptation? (3) Does RePo help learning in static, but diverse and cluttered environments?
187
+
188
+ ![](images/db89e1b891b490f2262a77c8c6518988ee02bc72269e9ccfd3c8662583dd3ad5.jpg)
189
+
190
+ ![](images/8d6c899ef5f08759b03a3886a7b0d8da7fce8c6ce9b26f27ac404d5bcbfc8af5.jpg)
191
+
192
+ ![](images/5693b4ed171a3789358166d8715c3881b6d965bd9644c4dbd418352773f75881.jpg)
193
+
194
+ Figure 5: Results on distracted DeepMind control environments. These environments have spurious variations, and RePo is able to successfully learn in all of them, both faster and achieving higher asymptotic returns than prior representation learning methods.
195
+ ![](images/60b351cc298844b5c0e4125fc363c33a546dad9eac378c232e94dd22f3661327.jpg)
196
+ RePo (Ours) TIA TD-MPC DBC Dreamer Denoised MDP DeepMDP RePo Convergence
197
+
198
+ ![](images/d7dce5bfef6b66bb01151c97e0a05b38e78f3d43001a2a56f49935bab31882b3.jpg)
199
+
200
+ ![](images/08b3c916663305548cc90a0b55d792b88fbf660626b7261b70f1ad7b4d2c4e0d.jpg)
201
+
202
+ Evaluation domains We evaluate our method primarily in three different settings. (1) Distracted DeepMind Control Suite [64, 63] is a variant of DeepMind Control Suite where the static background is replaced with natural videos (Fig. 4). For adaptation experiments, we train agents on static undistracted backgrounds and adapt them to distracted variants. (2) Realistic Maniskill is a benchmark we constructed based on the Maniskill2 benchmark [15], but with realistic backgrounds from [3] to simulate learning in a diverse range of human homes. We solve three tasks - LiftCube, PushCube, and TurnFaucet in a variety of background settings. (3) Lazy TurtleBot is a real-world robotic setup where a TurtleBot has to reach some goal location from egocentric observations in a furnished room. However, there are two TVs playing random videos to distract the "lazy" robot. We provide more details about evaluation domains in Appendix D.
203
+
204
+ Baselines We compare our method with a number of techniques that explicitly learn representations and use them for learning control policies. (1) Dreamer [17] is a state-of-the-art visual model-based RL method that learns a latent representation by reconstructing images. (2) TIA [9] renders Dreamer more robust to visual distractors by using a separate dynamics model to capture the task-irrelevant components in the environment. (3) Denoised MDP [56] further learns a factorized latent dynamics model that disentangles controllability and reward relevance. (4) TD-MPC [22] trains a latent dynamics model to predict the value function and uses a hybrid planning method to extract a policy. (5) DeepMDP [12] is a model-free method that learns a representation by predicting dynamics and reward, and then performs actor-critic policy learning on the learned representation. (6) Deep Bisimulation for Control DBC [63] is model-free algorithm which encodes images into a latent space that preserves the bisimulation metric.
205
+
206
+ We also compare with a number of techniques for test-time adaptation of these representations. (1) calibrated distribution matching, a variant of the method proposed in Section 4.1, using a distribution matching constraint rather than a support matching one, (2) uncalibrated support matching, a variant of the method proposed in Section 4.1, using a support matching constraint but without using paired examples, (3) uncalibrated distribution matching, a variant of the method proposed in Section 4.1, using a distribution matching constraint, but without using paired examples, (4) invariance through latent alignment ILA [61], a technique for test-time adaptation of representations with distribution matching and enforcing consistency in latent dynamics, (5) calibration, a baseline that only matches the encodings of paired examples.
207
+
208
+ Does RePo learn behaviors in environments with spurious variations? We evaluate our method's ability to ignore spurious variations on a suite of simulated benchmark environments with dynamic visual backgrounds (Fig. 4); these are challenging because uncontrollable elements of the environment visually dominate a significant portion of the scene. Fig. 5 shows our method outperforms the baselines across six Distracted DeepMind Control environments, both in terms of learning speed and asymptotic performance. This implies that our method successfully learns latent representations resilient to spurious variations. Dreamer [17] attempts to reconstruct the dynamic visual distractors which is challenging in these domains. TIA [9] and Denoised MDP [56] see occasional success when
209
+
210
+ ![](images/40a66a6dec4faa69c4a02658611a0ee64e429387877e8945d3a3b3b7dd9ad342.jpg)
211
+ Calibrated Support (Ours) Calibrated Distribution
212
+
213
+ ![](images/1e418bfbce4a775d332c59c1a5391a642f9baa5f6faec9bbad4cd5ad793ffbd2.jpg)
214
+ Support Distribution
215
+
216
+ ![](images/568c430cd66e73c0a540d7dbbdfee32a0c5ffa24f5c3b9b44f1addd84613b809.jpg)
217
+ ILACalibration
218
+
219
+ Figure 6: Results on adaptation from static environments to dynamic environments in Deepmind control. RePo with calibrated support constraints outperforms ablations and previous techniques for domain adaptation.
220
+ ![](images/4414ae4eb567613dfefa6e75fb0ff32138d36b2f8be1a5735ea00f70fe3b62ba.jpg)
221
+ RePo (Ours) Dreamer
222
+
223
+ Figure 7: Results of training agents on varying static environments in Maniskill [15]. RePo is able to learn more quickly and efficiently than alternatives even in static domains.
224
+ ![](images/dfc5730354f0c6e69d09092b0b9299936e42e5efc987ccda387961e32439f303.jpg)
225
+ TIA TD Denoised MDP De
226
+
227
+ ![](images/f33a31724f8c36433b6ecd0993b3e8c46e13ca0e4701488c2990dfc818e5c9b7.jpg)
228
+ IPC DBC MDP
229
+
230
+ they dissociate the task-relevant and irrelevant components, but they suffer from high variance and optimization failures. TD-MPC [22] is affected by spurious variations as its representations are not minimal. The model-free baselines DeepMDP [12] and DBC [63] exhibit lower sample efficiency on the more complex domains despite performing well on simpler ones.
231
+
232
+ To further validate RePo's ability to handle spurious variations in the real world, we evaluate its performance on Lazy TurtleBot, where a mobile robot has to navigate around a furnished room to reach the goal from egocentric observations (Fig. 4). To introduce spurious variations, we place two TVs playing random Youtube videos along the critical paths to the goal. As shown in Table. 1, RePo is able to reach the goal with nontrivial success within 15K environment steps, whereas Dreamer fails to reach the goal. We provide details about the setup in Appendix. D.
233
+
234
+ Table 1: Results on Lazy TurtleBot at ${15}\mathrm{\;K}$ environment steps. RePo achieves nontrivial success whereas Dreamer fails to reach the goal.
235
+
236
+ <table><tr><td></td><td>Success</td><td>Return</td></tr><tr><td>RePo (Ours)</td><td>62.5%</td><td>-24.3</td></tr><tr><td>Dreamer [17]</td><td>0.0%</td><td>-61.7</td></tr></table>
237
+
238
+ Do representations learned by RePo transfer under distribution shift? We evaluate the effectiveness of the test-time adaptation method described in Section 4.1 on three DeepMind Control domains: Walker Stand, Walker Walk, and Cheetah Run. We train the representation in environments with static backgrounds, and adapt the representation to domains with natural video distractors (as shown in Fig. 4). For methods that use calibration between the source and target environments, we collect 10 trajectories of paired observations. Results are shown in Fig. 6. RePo shows the ability to adapt quickly across all three domains, nearly recovering the full training performance within 50k steps. Performance degrades if we replace the support constraint with a distribution matching objective, as it is infeasible to match distributions with the test-time distribution having insufficient exploration. We also observe that by removing the calibration examples, both support constraint and distribution perform worse as the distributions tend to collapse. We found the addition of dynamics consistency in ILA to be ineffective. Nor is calibration alone sufficient for adaptation.
239
+
240
+ Does RePo learn across diverse environments with varying visual features? While the previous two sections studied learning and adaptation in dynamic environments with uncontrollable elements, we also evaluate RePo on its ability to learn in a diverse range of environments, each with a realistic and cluttered static background. Being able to learn more effectively in these domains suggests that RePo focuses it's representation capacity on the important elements of the task across environments, rather than trying to reconstruct the entire background for every environment.
241
+
242
+ ![](images/ee00bd2e520c363ea07907efc9cf6fec140094df72070f9c8dd18bfd4a81bdf0.jpg)
243
+ Figure 9: Probing representations learned by RePo shows that the background is largely ignored, while [17] tries to reconstruct it at the cost of the agent prediction.
244
+
245
+ ![](images/b36d5ef62212f20293cf738c4c9a10d946390b7b4a61210b1d6101bc0b488fd9.jpg)
246
+ Figure 10: Top two principle components of RePo and Dreamer's latent representations across different backgrounds. RePo's latent representation is more compact than Dreamer's, which enables data sharing.
247
+
248
+ We test on three robotic manipulation tasks - LiftCube, PushCube, and TurnFaucet with realistic backgrounds depicted in Fig. 4. As shown in Fig. 7, our method achieves saturating performance across all three tasks. Dreamer [17] spends its representation capacity memorizing backgrounds and is unable to reach optimal task performance. TIA [9] suffers from high variance and occasionally fails to dissociate task-relevant from task-irrelevant features. Denoised MDP [56], TD-MPC [22], and DBC [63] learn to ignore the background in two of the tasks but generally lag behind RePo in terms of sample efficiency. DeepMDP [12] fails to learn meaningful behavior in any task.
249
+
250
+ Visualizing representations learned by RePo To decipher our representation learning objective, we probe the learned representations by post-hoc training a separate image decoder to reconstruct image observations from the latents. We visualize the results in Fig. 9 and compare them with Dreamer reconstructions [17]. Our representation contains little information about background but is capable of reconstructing the agent, implying that it contains only task-relevant information.
251
+
252
+ In addition to probing, we qualitatively compare the latent states of RePo and Dreamer by visualizing their top two principal components. We collect the same trajectory across all backgrounds in Maniskill and visualize the final recurrent latent state inferred by RePo and Dreamer respectively. As shown in Fig. 10, RePo produces more compact latent representations than Dreamer, meaning the latent states encode less information about background variations. This enables RePo to share data across different backgrounds, explaining its superior sample efficiency compared to baselines.
253
+
254
+ ![](images/78e34d71d55d80e2d143e0412249ae7d7bc0d2102b519cc8a791909736f790bd.jpg)
255
+ Figure 8: Ablating objectives showing the importance of information bottleneck and KL balancing described in Section 4.
256
+
257
+ Ablation experiments We conduct ablation experiments to determine the effect of hyperparameters in Fig. 8. As we can see, the performance is crucially dependent on the information bottleneck $\epsilon$ , as well as KL balancing. We refer readers to Appendix E for a more thorough discussion.
258
+
259
+ # 6 Discussion
260
+
261
+ This work presents RePo, a technique for learning parsimonious representations that are resilient to spurious variations. Our representation is effective on learning in dynamic, distracted environments. And while the representation is subject to degradation under distribution shift, it can be quickly adapted to new domains by a semi-supervised test-time adaptation procedure. A limitation of our method is that the learned dynamics model is no longer task-agnostic, as it only captures task-relevant information. This can be potentially addressed by simultaneously predicting multiple reward objectives. Our framework opens up several interesting directions for future research, such as: can a multi-task variant of RePo allow for representations applicable to a some distribution of tasks? Can we apply our algorithm in a continual learning setup? We believe our method holds promise in these more general settings, especially for real robots deployed into dynamic, human-centric environments.
262
+
263
+ # Acknowledgments and Disclosure of Funding
264
+
265
+ We would like to thank Marius Memmel, Max Balsells, and many other members of the WEIRD Lab at University of Washington for valuable feedback and discussions.
266
+
267
+ # References
268
+
269
+ [1] A. A. Alemi, I. Fischer, J. V. Dillon, and K. Murphy. Deep variational information bottleneck. In International Conference on Learning Representations, 2017.
270
+ [2] S. Boyd and L. Vandenberghe. Convex Optimization. Cambridge University Press, 2004.
271
+ [3] A. Chang, A. Dai, T. Funkhouser, M. Halber, M. Niessner, M. Savva, S. Song, A. Zeng, and Y. Zhang. Matterport3D: Learning from RGB-D data in indoor environments. International Conference on 3D Vision (3DV), 2017.
272
+ [4] B. Dai, N. He, Y. Pan, B. Boots, and L. Song. Learning from Conditional Distributions via Dual Embeddings. In A. Singh and J. Zhu, editors, Proceedings of the 20th International Conference on Artificial Intelligence and Statistics, volume 54 of Proceedings of Machine Learning Research, pages 1458-1467. PMLR, 20-22 Apr 2017.
273
+ [5] F. Dorri and A. Ghodsi. Adapting component analysis. In M. J. Zaki, A. Siebes, J. X. Yu, B. Goethals, G. I. Webb, and X. Wu, editors, 12th IEEE International Conference on Data Mining, ICDM 2012, Brussels, Belgium, December 10-13, 2012, pages 846-851. IEEE Computer Society, 2012.
274
+ [6] Y. Efroni, D. J. Foster, D. Misra, A. Krishnamurthy, and J. Langford. Sample-efficient reinforcement learning in the presence of exogenous information. In P.-L. Loh and M. Raginsky, editors, Proceedings of Thirty Fifth Conference on Learning Theory, volume 178 of Proceedings of Machine Learning Research, pages 5062-5127. PMLR, 02-05 Jul 2022.
275
+ [7] Y. Efroni, D. Misra, A. Krishnamurthy, A. Agarwal, and J. Langford. Provably filtering exogenous distractors using multistep inverse dynamics. In International Conference on Learning Representations, 2022.
276
+ [8] N. Ferns and D. Precup. Bisimulation metrics are optimal value functions. In N. L. Zhang and J. Tian, editors, Proceedings of the Thirtieth Conference on Uncertainty in Artificial Intelligence, UAI 2014, Quebec City, Quebec, Canada, July 23-27, 2014, pages 210-219. AUAI Press, 2014.
277
+ [9] X. Fu, G. Yang, P. Agrawal, and T. Jaakkola. Learning task informed abstractions. In M. Meila and T. Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 3480-3491. PMLR, 18-24 Jul 2021.
278
+ [10] S. Fujimoto, H. van Hoof, and D. Meger. Addressing function approximation error in actor-critic methods. In J. G. Dy and A. Krause, editors, Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pages 1582-1591. PMLR, 2018.
279
+ [11] Y. Ganin, E. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, and V. S. Lempitsky. Domain-adversarial training of neural networks. CoRR, abs/1505.07818, 2015.
280
+ [12] C. Gelada, S. Kumar, J. Buckman, O. Nachum, and M. G. Bellemare. DeepMDP: Learning continuous latent space models for representation learning. In K. Chaudhuri and R. Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 2170–2179. PMLR, 09–15 Jun 2019.
281
+ [13] D. Ghosh, A. Gupta, and S. Levine. Learning actionable representations with goal conditioned policies. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019.
282
+ [14] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio. Generative adversarial nets. In Advances in neural information processing systems, pages 2672–2680, 2014.
283
+
284
+ [15] J. Gu, F. Xiang, X. Li, Z. Ling, X. Liu, T. Mu, Y. Tang, S. Tao, X. Wei, Y. Yao, X. Yuan, P. Xie, Z. Huang, R. Chen, and H. Su. Maniskill2: A unified benchmark for generalizable manipulation skills. In International Conference on Learning Representations, 2023.
285
+ [16] T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018.
286
+ [17] D. Hafner, T. P. Lillicrap, J. Ba, and M. Norouzi. Dream to control: Learning behaviors by latent imagination. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020.
287
+ [18] D. Hafner, T. P. Lillicrap, I. Fischer, R. Villegas, D. Ha, H. Lee, and J. Davidson. Learning latent dynamics for planning from pixels. In K. Chaudhuri and R. Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pages 2555-2565. PMLR, 2019.
288
+ [19] D. Hafner, T. P. Lillicrap, M. Norouzi, and J. Ba. Mastering atari with discrete world models. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021.
289
+ [20] K. Haninger, R. Vicente-Garcia, and J. Krüger. Towards learning controllable representations of physical systems. CoRR, abs/2011.09906, 2020.
290
+ [21] N. Hansen, Y. Lin, H. Su, X. Wang, V. Kumar, and A. Rajeswaran. Modem: Accelerating visual model-based reinforcement learning with demonstrations. CoRR, abs/2212.05698, 2022.
291
+ [22] N. Hansen, X. Wang, and H. Su. Temporal difference learning for model predictive control. In International Conference on Machine Learning. PMLR, 2022.
292
+ [23] K. He, X. Chen, S. Xie, Y. Li, P. Dólár, and R. B. Girshick. Masked autoencoders are scalable vision learners. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 15979-15988. IEEE, 2022.
293
+ [24] J. Hoffman, E. Tzeng, T. Darrell, and K. Saenko. Simultaneous deep transfer across domains and tasks. In G. Csurka, editor, Domain Adaptation in Computer Vision Applications, Advances in Computer Vision and Pattern Recognition, pages 173-187. Springer, 2017.
294
+ [25] J. Hoffman, E. Tzeng, T. Park, J. Zhu, P. Isola, K. Saenko, A. A. Efros, and T. Darrell. Cycada: Cycle-consistent adversarial domain adaptation. CoRR, abs/1711.03213, 2017.
295
+ [26] M. Janner, J. Fu, M. Zhang, and S. Levine. When to trust your model: Model-based policy optimization. In H. M. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. B. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pages 12498-12509, 2019.
296
+ [27] M. Janner, I. Mordatch, and S. Levine. Gamma-models: Generative temporal difference learning for infinite-horizon prediction. In H. Larochelle, M. Ranzato, R. Hadsell, M. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020.
297
+ [28] W. Kay, J. Carreira, K. Simonyan, B. Zhang, C. Hillier, S. Vijayanarasimhan, F. Viola, T. Green, T. Back, A. Natsev, M. Suleyman, and A. Zisserman. The kinetics human action video dataset. ArXiv, abs/1705.06950, 2017.
298
+ [29] D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. International Conference on Learning Representations, 2015.
299
+ [30] M. Laskin, A. Srinivas, and P. Abbeel. CURL: contrastive unsupervised representations for reinforcement learning. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pages 5639-5650. PMLR, 2020.
300
+
301
+ [31] A. X. Lee, A. Nagabandi, P. Abbeel, and S. Levine. Stochastic latent actor-critic: Deep reinforcement learning with a latent variable model. In H. Larochelle, M. Ranzato, R. Hadsell, M. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020.
302
+ [32] T. Lesort, N. D. Rodríguez, J. Goudou, and D. Filliat. State representation learning for control: An overview. CoRR, abs/1802.04181, 2018.
303
+ [33] M. Long, J. Wang, G. Ding, J. Sun, and P. S. Yu. Transfer joint matching for unsupervised domain adaptation. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2014, Columbus, OH, USA, June 23-28, 2014, pages 1410-1417. IEEE Computer Society, 2014.
304
+ [34] A. Majumdar, K. Yadav, S. Arnaud, Y. J. Ma, C. Chen, S. Silwal, A. Jain, V. Berges, P. Abbeel, J. Malik, D. Batra, Y. Lin, O. Maksymets, A. Rajeswaran, and F. Meier. Where are we in the search for an artificial visual cortex for embodied intelligence? CoRR, abs/2303.18240, 2023.
305
+ [35] Z. Meng, J. Li, Y. Gong, and B. Juang. Adversarial teacher-student learning for unsupervised domain adaptation. In 2018 IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP 2018, Calgary, AB, Canada, April 15-20, 2018, pages 5949-5953. IEEE, 2018.
306
+ [36] V. Mnih, K. Kavukcuoglu, D. Silver, A. Graves, I. Antonoglou, D. Wierstra, and M. A. Ried-miller. Playing atari with deep reinforcement learning. CoRR, abs/1312.5602, 2013.
307
+ [37] A. Nagabandi, G. Kahn, R. S. Fearing, and S. Levine. Neural network dynamics for model-based deep reinforcement learning with model-free fine-tuning. In 2018 IEEE International Conference on Robotics and Automation, ICRA 2018, Brisbane, Australia, May 21-25, 2018, pages 7559-7566. IEEE, 2018.
308
+ [38] S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta. R3M: A universal visual representation for robot manipulation. In K. Liu, D. Kulic, and J. Ichnowski, editors, Conference on Robot Learning, CoRL 2022, 14-18 December 2022, Auckland, New Zealand, volume 205 of Proceedings of Machine Learning Research, pages 892-909. PMLR, 2022.
309
+ [39] M. Oquab, T. Darcet, T. Moutakanni, H. Vo, M. Szafraniec, V. Khalidov, P. Fernandez, D. Haziza, F. Massa, A. El-Nouby, M. Assran, N. Ballas, W. Galuba, R. Howes, P. Huang, S. Li, I. Misra, M. G. Rabbat, V. Sharma, G. Synnaeve, H. Xu, H. Jégou, J. Mairal, P. Labatut, A. Joulin, and P. Bojanowski. Dinov2: Learning robust visual features without supervision. CoRR, abs/2304.07193, 2023.
310
+ [40] X. B. Peng, A. Kanazawa, S. Toyer, P. Abbeel, and S. Levine. Variational discriminator bottleneck: Improving imitation learning, inverse RL, and GANs by constraining information flow. In International Conference on Learning Representations, 2019.
311
+ [41] I. Radosavovic, T. Xiao, S. James, P. Abbeel, J. Malik, and T. Darrell. Real-world robot learning with masked visual pre-training. In K. Liu, D. Kulic, and J. Ichnowski, editors, Conference on Robot Learning, CoRL 2022, 14-18 December 2022, Auckland, New Zealand, volume 205 of Proceedings of Machine Learning Research, pages 416-426. PMLR, 2022.
312
+ [42] R. Rafailov, T. Yu, A. Rajeswaran, and C. Finn. Offline reinforcement learning from images with latent space models. In A. Jadbabaie, J. Lygeros, G. J. Pappas, P. A. Parrilo, B. Recht, C. J. Tomlin, and M. N. Zeilinger, editors, Proceedings of the 3rd Annual Conference on Learning for Dynamics and Control, LADC 2021, 7-8 June 2021, Virtual Event, Switzerland, volume 144 of Proceedings of Machine Learning Research, pages 1154-1168. PMLR, 2021.
313
+ [43] S. K. Ramakrishnan, A. Gokaslan, E. Wijmans, O. Maksymets, A. Clegg, J. M. Turner, E. Undersander, W. Galuba, A. Westbury, A. X. Chang, M. Savva, Y. Zhao, and D. Batra. Habitatmatterport 3d dataset (HM3d): 1000 large-scale 3d environments for embodied AI. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2021.
314
+
315
+ [44] O. Rybkin, C. Zhu, A. Nagabandi, K. Daniilidis, I. Mordatch, and S. Levine. Model-based reinforcement learning via latent-space collocation. In M. Meila and T. Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 9190-9201. PMLR, 2021.
316
+ [45] S. Sankaranarayanan, Y. Balaji, A. Jain, S. N. Lim, and R. Chellappa. Learning from synthetic data: Addressing domain shift for semantic segmentation. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pages 3752-3761. Computer Vision Foundation / IEEE Computer Society, 2018.
317
+ [46] R. Sekar, O. Rybkin, K. Daniilidis, P. Abbeel, D. Hafner, and D. Pathak. Planning to explore via self-supervised world models. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pages 8583-8592. PMLR, 2020.
318
+ [47] P. Sermanet, C. Lynch, Y. Chebotar, J. Hsu, E. Jang, S. Schaal, and S. Levine. Time-contrastive networks: Self-supervised learning from video. In 2018 IEEE International Conference on Robotics and Automation, ICRA 2018, Brisbane, Australia, May 21-25, 2018, pages 1134-1141. IEEE, 2018.
319
+ [48] A. Stone, O. Ramirez, K. Konolige, and R. Jonschkowski. The distracting control suite - A challenging benchmark for reinforcement learning from pixels. CoRR, abs/2101.02722, 2021.
320
+ [49] A. Stooke, K. Lee, P. Abbeel, and M. Laskin. Decoupling representation learning from reinforcement learning. In M. Meila and T. Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 9870-9879. PMLR, 2021.
321
+ [50] J. Subramanian, A. Sinha, R. Seraj, and A. Mahajan. Approximate information state for approximate planning and reinforcement learning in partially observed systems. J. Mach. Learn. Res., 23:12:1-12:83, 2022.
322
+ [51] Y. Tassa, Y. Doron, A. Muldal, T. Erez, Y. Li, D. de Las Casas, D. Budden, A. Abdelmaleki, J. Merel, A. Lefrancq, T. Lillicrap, and M. Riedmiller. Deepmind control suite, 2018.
323
+ [52] E. Tzeng, J. Hoffman, K. Saenko, and T. Darrell. Adversarial discriminative domain adaptation (workshop extended abstract). In 5th International Conference on Learning Representations, ICLR 2017, Toulouse, France, April 24-26, 2017, Workshop Track Proceedings. OpenReview.net, 2017.
324
+ [53] E. Tzeng, J. Hoffman, N. Zhang, K. Saenko, and T. Darrell. Deep domain confusion: Maximizing for domain invariance. CoRR, abs/1412.3474, 2014.
325
+ [54] A. van den Oord, Y. Li, and O. Vinyals. Representation learning with contrastive predictive coding. CoRR, abs/1807.03748, 2018.
326
+ [55] A. van den Oord, O. Vinyals, and K. Kavukcuoglu. Neural discrete representation learning. In I. Guyon, U. von Luxburg, S. Bengio, H. M. Wallach, R. Fergus, S. V. N. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 6306-6315, 2017.
327
+ [56] T. Wang, S. S. Du, A. Torralba, P. Isola, A. Zhang, and Y. Tian. Denoised mdps: Learning world models better than the world itself. In International Conference on Machine Learning. PMLR, 2022.
328
+ [57] G. Williams, A. Aldrich, and E. A. Theodorou. Model predictive path integral control: From theory to parallel computation. Journal of Guidance, Control, and Dynamics, 40(2):344-357, 2017.
329
+ [58] G. Wilson and D. J. Cook. A survey of unsupervised deep domain adaptation. ACM Trans. Intell. Syst. Technol., 11(5):51:1-51:46, 2020.
330
+
331
+ [59] P. Wu, A. Escontrela, D. Hafner, P. Abbeel, and K. Goldberg. Daydreamer: World models for physical robot learning. In K. Liu, D. Kulic, and J. Ichnowski, editors, Conference on Robot Learning, CoRL 2022, 14-18 December 2022, Auckland, New Zealand, volume 205 of Proceedings of Machine Learning Research, pages 2226-2240. PMLR, 2022.
332
+ [60] M. Yang, D. Schuurmans, P. Abbeel, and O. Nachum. Dichotomy of control: Separating what you can control from what you cannot. CoRR, abs/2210.13435, 2022.
333
+ [61] T. Yoneda, G. Yang, M. R. Walter, and B. Stadie. Invariance through latent alignment, 2021.
334
+ [62] A. Zhang, C. Lyle, S. Sodhani, A. Filos, M. Kwiatkowska, J. Pineau, Y. Gal, and D. Precup. Invariant causal prediction for block mdps. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pages 11214-11224. PMLR, 2020.
335
+ [63] A. Zhang, R. T. McAllister, R. Calandra, Y. Gal, and S. Levine. Learning invariant representations for reinforcement learning without reconstruction. In International Conference on Learning Representations, 2021.
336
+ [64] A. Zhang, Y. Wu, and J. Pineau. Natural environment benchmarks for reinforcement learning. CoRR, abs/1811.06032, 2018.
337
+ [65] R. Zhang, B. Dai, L. Li, and D. Schuurmans. Gendice: Generalized offline estimation of stationary values. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020.
338
+ [66] Y. Zhang. A survey of unsupervised domain adaptation for visual recognition. CoRR, abs/2112.06745, 2021.
339
+ [67] F. Zhuang, X. Cheng, P. Luo, S. J. Pan, and Q. He. Supervised representation learning with double encoding-layer autoencoder for transfer learning. ACM Trans. Intell. Syst. Technol., 9(2):16:1-16:17, 2018.
340
+ [68] L. M. Zintgraf, K. Shiarlis, M. Igl, S. Schulze, Y. Gal, K. Hofmann, and S. Whiteson. Varibad: A very good method for bayes-adaptive deep RL via meta-learning. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020.
341
+
342
+ # Supplementary Materials for
343
+
344
+ # "RePo: Resilient Model-Based Reinforcement Learning by Regularizing Posterior Predictability"
345
+
346
+ # A Algorithm Pseudocode
347
+
348
+ Algorithm 1 Resilient Model-Based RL by Regularizing Posterior Predictability (RePo)
349
+ 1: Initialize dataset $\mathcal{D}_{\mathrm{buf}}$ with $S$ random seed episodes.
350
+ 2: Initialize neural network parameters $\theta, \phi, \psi$ randomly and set dual variable $\beta = \beta_0$ .
351
+ 3: while not converged do
352
+ 4: for update step $c = 1 \dots C$ do
353
+ 5: // Dynamics Learning
354
+ 6: Draw $B$ data sequences $\{(o_t, a_t, r_t)\}_{t=1}^T \sim \mathcal{D}_{\mathrm{buf}}$ .
355
+ 7: Encode images $x_{1:T} = h_\theta(o_{1:T})$ .
356
+ 8: Infer prior and posterior distributions $q_\theta(z_t \mid z_{t-1}, a_{t-1})$ , $p_\theta(z_t \mid z_{t-1}, a_{t-1}, x_t)$ .
357
+ 9: Sample latent states $z_t \sim p_\theta(z_t \mid z_{t-1}, a_{t-1}, x_t)$ and infer reward distributions $q_\theta(r_t \mid z_t)$ .
358
+ 10: Compute the Lagrangian objective in (4.4)
359
+ 11: Update $\theta$ and $\beta$ using dual gradient descent.
360
+ 12: // Behavior Learning
361
+ 13: Imagine trajectories $\{(z_\tau, a_\tau)\}_{\tau=t}^{t+H}$ from each $z_t$ .
362
+ 14: Predict rewards $r_\tau \sim q_\theta(r_\tau \mid z_\tau)$ , values $v_\tau \sim V_\psi(v_\tau \mid z_\tau)$ , actions $a_\tau \sim \pi_\phi(a_\tau \mid z_\tau)$ .
363
+ 15: Update $\phi$ and $\psi$ using actor-critic learning.
364
+ 16: // Data Collection
365
+ 17: for time step $t = 1 \dots N$ do
366
+ 18: Compute $z_t \sim p_\theta(z_t, z_{t-1}, a_{t-1}, h_\theta(o_t))$ from history and current observation.
367
+ 19: Compute $a_t \sim \pi_\phi(a_t \mid z_t)$ from policy.
368
+ 20: Execute $a_t$ in environment and collect $o_{t+1}, a_{t+1}$ .
369
+ 21: Add experience to dataset $\mathcal{D}_{\mathrm{buf}} \gets \mathcal{D}_{\mathrm{buf}} \cup \{o_t, a_t, r_t\}_{t=1}^N$ .
370
+
371
+ Algorithm 2 Semi-Supervised Adaptation of Visual Encoder Using Support Constraint
372
+ 1: Fix training encoder $h_\theta$ , model $p_\theta$ , policy $\pi_\phi$ and replay buffer $\mathcal{D}_{\mathrm{train}}$ .
373
+ 2: Initialize test-time replay buffer $\mathcal{D}_{\mathrm{test}}$ with $S$ random seed episodes.
374
+ 3: Initialize test-time encoder $h_{\theta'}$ by setting $\theta' = \theta$ .
375
+ 4: Initialize neural networks $\tau_\rho$ and $f_\omega$ randomly and set dual variable $\lambda = \lambda_0$ .
376
+ 5: Collect $N$ calibration trajectories with paired observations $\mathcal{D}_{\mathrm{cal}} \gets \{(o_t^{\mathrm{train}}, o_t^{\mathrm{test}}, a_t, r_t)\}_{t=1}^T$ using an exploration policy.
377
+ 6: while not converged do
378
+ 7: for update step $c = 1 \dots C$ do
379
+ 8: // Adaptation
380
+ 9: Draw $B$ observations from training and test buffers $o_{1:B}^{\mathrm{train}} \sim \mathcal{D}_{\mathrm{train}}, o_{1:B}^{\mathrm{test}} \sim \mathcal{D}_{\mathrm{test}}$ .
381
+ 10: Encode images using respective encoders $x_{1:B}^{\mathrm{train}} = h_\theta(o_{1:B}^{\mathrm{train}})$ , $x_{1:B}^{\mathrm{test}} = h_{\theta'}(o_{1:B}^{\mathrm{test}})$ .
382
+ 11: Compute the Lagrangian objective in (4.7)
383
+ 12: Draw $B$ pairs of observations from the calibration buffer $\{(o_i^{\mathrm{cal\_train}}, o_i^{\mathrm{cal\_test}})\}_{i=1}^B \sim \mathcal{D}_{\mathrm{cal}}$ .
384
+ 13: Encode images using encoders $x_{1:B}^{\mathrm{cal\_train}} = h_\theta(o_{1:B}^{\mathrm{train}})$ , $x_{1:B}^{\mathrm{cal\_test}} = h_{\theta'}(o_{1:B}^{\mathrm{cal\_test}})$ .
385
+ 14: Compute MSE loss between calibration encodings and add to the Lagrangian objective.
386
+ 15: Update $\theta', \rho, \omega$ and $\lambda$ using dual gradient descent.
387
+ 16: // Data Collection
388
+ 17: for time step $t = 1 \dots N$ do
389
+ 18: Compute $z_t \sim p_\theta(z_t, z_{t-1}, a_{t-1}, h_{\theta'}(o_t))$ from history and current observation.
390
+ 19: Compute $a_t \sim \pi_\phi(a_t | z_t)$ from policy.
391
+ 20: Execute $a_t$ in environment and collect $o_{t+1}, a_{t+1}$ .
392
+ 21: Add experience to dataset $\mathcal{D}_{\mathrm{test}} \gets \mathcal{D}_{\mathrm{test}} \cup \{o_t, a_t, r_t\}_{t=1}^N$ .
393
+
394
+ # B Derivations
395
+
396
+ # B.1 RePo objective
397
+
398
+ Recall that $\mathcal{D}_{\mathrm{buf}}$ denote the distribution over experienced actions, observations and rewards from the environment $((a_{1:T},o_{1:T},r_{1:T})\sim \mathcal{D}_{\mathrm{buf}})$ . For $p\in \mathcal{P}_{\mathrm{post}}$ (our class of posteriors), let $\mathbb{E}_{p,h}$ denote expectation of $(a_{1:T},o_{1:T},r_{1:T})\sim \mathcal{D}_{\mathrm{buf}},x_t = h(o_t)$ and the latents $z_{t + 1}\sim p(\cdot \mid z_t,a_t,x_{t + 1})$ drawn from the latent posterior, with the initial latent $z_0\sim p_0$ .
399
+
400
+ We derive a variational lower bound for the following information bottleneck objective from eq. (4.1).
401
+
402
+ $$
403
+ \max _ {p, h} \mathrm {I} _ {p, h} (z _ {1: T}; r _ {1: T} \mid a _ {1: T}) \text {s . t .} \mathrm {I} _ {p, h} (z _ {1: T}; o _ {1: T} \mid a _ {1: T}) < \epsilon
404
+ $$
405
+
406
+ where the mutual information is with respect to the distribution described by the expectation $\mathbb{E}_{p,h}$ above. The goal is to learn a latent representation that maximally predicts the dynamics and reward while sharing minimal information with the observations. Notice that, as defined, $p$ is only a latent dynamics model. Thus, for the derivation we will introduce $\tilde{p}$ as corresponding to the distribution of all variables under $\mathbb{E}_{p,h}$ , which in particular takes into account the distribution from $\mathcal{D}_{\mathrm{buf}}$ .
407
+
408
+ Definition B.1. We let $\tilde{p}$ denote the joint distribution of $(z_{1:T}, a_{1:T}, x_{1:T}, o_{1:T})$ under $\mathbb{E}_{p,h}$ .
409
+
410
+ Lower bound on reward prediction. We first derive a variational lower bound for the objective $\mathrm{I}_{p,h}(z_{1:T}; r_{1:T} \mid a_{1:T})$ . For our derivation, we make a simplifying assumption:
411
+
412
+ Assumption B.1. Assume that there is a function $p_{\mathrm{r}}(r_t \mid z_t)$ such that $\tilde{p}(r_t \mid z_{1:t}, a_{1:t}) = p_{\mathrm{r}}(r_t \mid z_t)$ .
413
+
414
+ The simplifying assumption enforces that $z_{t}$ is sufficient for the reward given past latents, actions, and the current action $a_{t}$ . We remark that Assumption B.1 is not strictly necessary, and the following lower bound we derive is still valid if it fails; it may just be very loose. The derivation begins as follows:
415
+
416
+ $$
417
+ \begin{array}{l} \mathrm {I} _ {p, h} \left(z _ {1: T}; r _ {1: T} \mid a _ {1: T}\right) \\ = \mathbb {E} _ {p, h} \left[ \log \tilde {p} \left(r _ {1: T} \mid z _ {1: T}, a _ {1: T}\right) \right] + \mathrm {H} _ {p, h} \left(r _ {1: T} \mid a _ {1: T}\right) \\ \stackrel {\pm} {=} \mathbb {E} _ {p, h} \left[ \log \tilde {p} \left(r _ {1: T} \mid z _ {1: T}, a _ {1: T}\right) \right] \\ = \mathbb {E} _ {p, h} \left[ \sum_ {t = 1} ^ {T} \log p _ {\mathrm {r}} \left(r _ {t} \mid z _ {t}, a _ {t}\right)\right) \tag {Assumption B.1} \\ = \mathbb {E} _ {p, h} \left[ \sum_ {t = 1} ^ {T} \log q _ {\mathrm {r}} \left(r _ {t} \mid z _ {t}\right)\right) + \sum_ {t = 1} ^ {T} \mathrm {D} _ {\mathrm {K L}} \left(p _ {\mathrm {r}} \left(r _ {t} \mid z _ {t}\right) \| q _ {\mathrm {r}} \left(r _ {t} \mid z _ {t}\right)\right) \\ \geq \mathbb {E} _ {p, h} \left[ \sum_ {t = 1} ^ {T} \log q _ {\mathrm {r}} \left(r _ {t} \mid z _ {t}\right)\right), \\ \end{array}
418
+ $$
419
+
420
+ where above $\mathrm{H}_{p,h}$ denotes an entropy term under distribution $\mathbb{E}_{p,h}$ , and $\stackrel{+}{\equiv}$ denotes equality up to a constant that does not depend on our choice of parametrization $p$ , which only dictates latents $z_{t}$ . In the fourth line, we add and subtract $\log q_{\mathrm{r}}(r_t|z_t)$ in each term of the summation. The last step uses the nonnegativity of KL divergence.
421
+
422
+ Upper bound on dynamic compression. We proceed to derive an upper bound for the constraint $\mathrm{I}_{p,h}(z_{1:T};o_{1:T}\mid a_{1:T})$ . We make a simplifying assumption analogous to Assumption B.1; again, this assumption can also be discarded, but illustrates that our lower bound is sharper when this assumption holds.
423
+
424
+ Assumption B.2. We assume that there is a function $p_{\mathrm{z}}(z_{t + 1} \mid z_t, a_t)$ such that $\tilde{p}(z_{t + 1} \mid z_{1:t}, a_{1:t}) = p_{\mathrm{z}}(z_{t + 1} \mid z_t, a_t)$ .
425
+
426
+ With Assumption B.2, we then invoke a variational upper bound which replaces $p_{\mathrm{z}}(z_{t + 1} \mid z_t, a_t)$ with a variational approximation $q_{\mathrm{z}}(z_{t + 1} \mid z_t, a_t)$ :
427
+
428
+ $$
429
+ \begin{array}{l} \mathrm {I} _ {p, h} \left(z _ {1: T}; o _ {1: T} \mid a _ {1: T}\right) \\ = \mathbb {E} _ {p, h} \left[ \log \tilde {p} \left(z _ {1: T} \mid o _ {1: T}, a _ {1: T}\right) - \log \tilde {p} \left(z _ {1: T} \mid a _ {1: T}\right) \right] \\ = \mathbb {E} _ {p, h} \left[ \left(\sum_ {t = 1} ^ {T} \log p \left(z _ {t + 1} \mid z _ {t}, a _ {t}, x _ {t + 1}\right)\right) - \log \tilde {p} \left(z _ {1: T} \mid a _ {1: T}\right) \right] (\text {d e f i n i t i o n o f l a t e n t d y n a m i c s}) \\ = \mathbb {E} _ {p, h} \left[ \sum_ {t = 1} ^ {T} \log p \left(z _ {t + 1} \mid z _ {t}, a _ {t}, x _ {t + 1}\right) - \sum_ {t = 1} ^ {T} \log p _ {\mathrm {z}} \left(z _ {t + 1} \mid z _ {t}, a _ {t}\right) \right] \tag {Assumption B.2} \\ = \mathbb {E} _ {p, h} \left[ \sum_ {t = 1} ^ {T} \log p (z _ {t + 1} \mid z _ {t}, a _ {t}, x _ {t + 1}) - \sum_ {t = 1} ^ {T} \log q _ {\mathrm {z}} (z _ {t + 1} \mid z _ {t}, a _ {t}) \right] - \sum_ {t = 1} ^ {T} \mathrm {D} _ {\mathrm {K L}} (p _ {\mathrm {z}} (z _ {t + 1} \mid z _ {t}, a _ {t}) \| q _ {\mathrm {z}} (z _ {t + 1} \mid z _ {t}, a _ {t})) \\ \leq \mathbb {E} _ {p, h} \left[ \sum_ {t = 1} ^ {T} \log p \left(z _ {t + 1} \mid z _ {t}, a _ {t}, x _ {t + 1}\right) - \sum_ {t = 1} ^ {T} \log q _ {z} \left(z _ {t + 1} \mid z _ {t}, a _ {t}\right) \right], \\ \end{array}
430
+ $$
431
+
432
+ where in the last two lines we add and subtract $q_{z}(z_{t + 1} \mid z_{t}, a_{t})$ and apply the nonnegativity of KL divergence. In the third line, we use the fact that our transition dynamics model is Markov, with
433
+
434
+ $$
435
+ \tilde {p} \left(z _ {t + 1} \mid o _ {1: t}, a _ {1: t}, z _ {1: t}\right) = p \left(z _ {t + 1} \mid h \left(o _ {t + 1}\right), a _ {t}, z _ {t}\right) = p \left(z _ {t + 1} \mid x _ {t + 1}, a _ {t}, z _ {t}\right), \tag {B.1}
436
+ $$
437
+
438
+ where under $\mathbb{E}_{p,h}$ , $x_{t} \equiv h(o_{t})$ holds by definition. Notice that the same identity would be true if instead we tried to lower bound the mutual information $\mathrm{I}_{p,h}(z_{1:T}; o_{1:T} \mid a_{1:T})$ , showing that our objective is agnostic to whether we consider compressing information with respect to $x_{1:T}$ or $o_{1:T}$ . This may be seem surprising, but is a simple consequence of choosing a latent dynamics model which depends on $o_{1:T}$ only through $x_{1:T}$ .
439
+
440
+ Combining the derivations. Combining these results and writing in Lagrangian form, we arrive at the objective in eq. (4.4)
441
+
442
+ $$
443
+ \max_{p,q_{\mathrm{r}},q_{\mathrm{z}},h}\min_{\beta}\mathbb{E}_{p,h}\left[\sum_{t = 1}^{T}\log q_{\mathrm{r}}(r_{t}\mid z_{t})\right] + \beta \left(\mathbb{E}_{p,h}\left[\sum_{t = 0}^{T - 1}\mathrm{D}_{\mathrm{KL}}(p(\cdot \mid z_{t},a_{t},x_{t + 1}))\parallel q_{\mathrm{z}}(\cdot \mid z_{t},a_{t}))\right] - \epsilon\right).
444
+ $$
445
+
446
+ # B.2 Tractable objective for support constraint
447
+
448
+ We derive a tractable variant for the support matching objective in eq. (4.6):
449
+
450
+ $$
451
+ \min _ {\tau (\cdot) \geq 0, h _ {\mathrm {t e s t}} (\cdot)} \mathrm {D} (\tau \cdot \mathcal {P} _ {\mathrm {t r a i n}} \| \mathcal {P} _ {\mathrm {t e s t}}) \mathrm {s . t .} \mathbb {E} _ {x \sim \mathcal {P} _ {\mathrm {t r a i n}}} [ \tau (x) ] = 1,
452
+ $$
453
+
454
+ where $\mathcal{P}_{\mathrm{train}}$ denotes the distribution of image encodings at training time and $\mathcal{P}_{\mathrm{test}}$ denotes the distribution of image encodings at test time. $\tau$ is a reweighting function and the constraint $\mathbb{E}_{x\sim \mathcal{P}_{\mathrm{train}}}\big[\tau (x)\big] = 1$ ensures that $\tau \cdot \mathcal{P}_{\mathrm{train}}$ is a valid distribution. Let $\mathrm{D}_{\phi}$ be any $f$ -divergence, we have
455
+
456
+ $$
457
+ \begin{array}{l} \min _ {\tau (\cdot) \geq 0, h _ {\text {t e s t}} (\cdot)} \mathrm {D} _ {\phi} (\tau \cdot \mathcal {P} _ {\text {t r a i n}} \| \mathcal {P} _ {\text {t e s t}}) \\ = \min _ {\tau (\cdot) \geq 0, h _ {\text {t e s t}} (\cdot)} \int_ {x} p _ {\text {t e s t}} (x) \phi \left(\frac {p _ {\text {t r a i n}} (x) \cdot \tau (x)}{p _ {\text {t e s t}} (x)}\right) \quad \text {(d e f i n i t i o n o f} f \text {- d i v e r g e n c e}) \\ = \min _ {\tau (\cdot) \geq 0, h _ {\text {t e s t}} (\cdot)} \int_ {x} p _ {\text {t e s t}} (x) \left\{\max _ {f (\cdot)} \left[ \frac {p _ {\text {t r a i n}} (x) \cdot \tau (x)}{p _ {\text {t e s t}} (x)} f (x) - \phi^ {*} (f (x)) \right] \right\} \quad (\text {b y}) \\ = \min _ {\tau (\cdot) \geq 0, h _ {\text {t e s t}} (\cdot)} \max _ {f (\cdot)} \int_ {x} p _ {\text {t e s t}} (x) \left[ \frac {p _ {\text {t r a i n}} (x) \cdot \tau (x)}{p _ {\text {t e s t}} (x)} f (x) - \phi^ {*} (f (x)) \right] \quad \left(\text {b y}\right) \\ = \min _ {\tau (\cdot) \geq 0, h _ {\text {t e s t}} (\cdot)} \max _ {f (\cdot)} \mathbb {E} _ {\mathcal {P} _ {\text {t r a i n}}} [ \tau (x) \cdot f (x) ] - \mathbb {E} _ {\mathcal {P} _ {\text {t e s t}}} [ \phi^ {*} (f (x)) ] \\ \end{array}
458
+ $$
459
+
460
+ We follow [4] and use the $\chi^2$ divergence where $\phi(x) = (x - 1)^2$ and $\phi^*(y) = y + \frac{y^2}{4}$ . This gives
461
+
462
+ $$
463
+ \min _ {\tau (\cdot) \geq 0, h _ {\mathrm {t e s t}} (\cdot)} \max _ {f (\cdot)} \mathbb {E} _ {\mathcal {P} _ {\mathrm {t r a i n}}} [ \tau (x) \cdot f (x) ] - \mathbb {E} _ {\mathcal {P} _ {\mathrm {t e s t}}} \left[ f (x) + \frac {1}{4} f (x) ^ {2} \right].
464
+ $$
465
+
466
+ Incorporating the constraint as a Lagrange multiplier, we arrive at the objective in eq. (4.7).
467
+
468
+ $$
469
+ \min _ {\tau (\cdot) \geq 0, h _ {\mathrm {t e s t}} (\cdot) \atop f (\cdot), \lambda} \max _ {f (\cdot), \lambda} \mathbb {E} _ {\mathcal {P} _ {\mathrm {t r a i n}}} [ \tau (x) \cdot f (x) ] - \mathbb {E} _ {\mathcal {P} _ {\mathrm {t e s t}}} \left[ f (x) + \frac {1}{4} f (x) ^ {2} \right] + \lambda (\mathbb {E} _ {\mathcal {P} _ {\mathrm {t r a i n}}} [ \tau (x) ] - 1).
470
+ $$
471
+
472
+ # C Implementation Details
473
+
474
+ Model architecture We base our implementation of RePo on Dreamer [17]. We fix the image size to $64 \times 64$ and parameterize the image encoder using a 4-layer CNN with $\{32, 64, 128, 256\}$ channels, kernel size 4, stride 2, and ReLU activation. This results in an embedding size of 1024. The recurrent state space model is parametrized by a GRU operating on deterministic beliefs. Given the current belief, state (sampled from either prior or posterior), and action, the GRU recurrently predicts the next belief. We parameterize the prior $q_{\mathrm{z}}(z_{t+1} \mid z_t, a_t)$ as a Gaussian whose mean and standard deviation are predicted by passing the next belief through a 2-layer MLP. Similarly, we parametrize the posterior $p(z_{t+1} \mid z_t, a_t, x_{t+1})$ as a Gaussian whose mean and standard deviation are predicted by passing the next belief along with the next image embedding through a 2-layer MLP. We set the belief size to 200 and the state size to 30. As in Dreamer, we use both deterministic belief and stochastic state as inputs to the prediction heads, including the reward model, policy, and value function. The reward model and value function are 4-layer MLPs. The policy is a 4-layer MLP outputting a squashed Gaussian distribution. We use 200 hidden units per layer and ELU activation for all MLPs.
475
+
476
+ Table 2: Hyperparameters for evaluation tasks
477
+
478
+ <table><tr><td></td><td>β0</td><td>ε</td><td>r</td></tr><tr><td>Walker</td><td>1e-5</td><td>3</td><td>5</td></tr><tr><td>Cheetah</td><td>1e-5</td><td>3</td><td>5</td></tr><tr><td>Hopper</td><td>1e-4</td><td>1</td><td>3</td></tr><tr><td>Cartpole</td><td>1e-4</td><td>3</td><td>4</td></tr><tr><td>Maniskill</td><td>1e-4</td><td>3</td><td>4</td></tr><tr><td>TurtleBot</td><td>1e-5</td><td>3</td><td>5</td></tr></table>
479
+
480
+ **Optimization** We train the RL agent in an online setting, performing 100 training steps for every 500 environment steps (except for TurtleBot which trains every 100 environment steps). In each training step, we sample 50 trajectories of length 50 from the replay buffer and optimize the RePo objective using the prior and posterior inferred from these trajectories. We then perform behavior learning by using the posterior states as initial latent states and rolling out the policy for 15 steps in the dynamics model. To balance bias and variance, we train the value function to predict the generalized value estimate [17] with $\lambda = 0.95$ and discount factor $\gamma = 0.99$ . We optimize all components with the Adam [29] optimizer. The image encoder, recurrent state space model, and reward model share the same learning rate of 3e-4. The policy and value function use a learning rate of 8e-5. Specific to our method, we initialize the Lagrange multiplier as $\beta = \beta_0$ and set its learning rate to 1e-4. We cast the KL balancing parameter $\alpha$ to the ratio $r$ between the number of prior training steps to posterior training steps, where $r$ translates to $\alpha = \frac{r}{r + 1}$ . We tune the initial Lagrange multiplier $\beta_0$ , target KL $\epsilon$ , and KL balancing ratio $r$ on our evaluation tasks and report the best hyperparameters in Table 2.
481
+
482
+ Baselines We implement Dreamer [17] and TIA [9] on top of the RePo codebase and tune their hyperparameters on the evaluation tasks. For DBC and DeepMDP, we use the official implementation along with their reported hyperparameters. We use a deterministic transition model for DBC as we find it to perform better than using a stochastic transition model.
483
+
484
+ Adaptation We parametrize the reweighting function $\tau$ as 2-layer MLP with 256 hidden units in each layer and ReLU activation. The dual function $f$ is analogous to the discriminator in a GAN architecture [14]. To prevent vanishing gradient, we parameterize $f$ using a variational discriminator bottleneck [40] with 2 hidden layers of size 256 and a bottleneck layer of size 64. Prior to training, we collect 10 calibration trajectories with corresponding observations from both the source and the target domains, using an expert policy as an approximation for an exploration policy. We initialize the test-time encoder with the weights of the training encoder and adapt it online, performing 100
485
+
486
+ adaptation steps for every 500 environment steps taken. In each adaptation step, we sample 2500 observations from the offline source buffer and online target buffer respectively, and optimize the tractable support constraint objective (4.7). In addition, we sample 2500 paired observations from the calibration data and minimize the $\ell_2$ loss between their encodings. All components are optimized using Adam [29] optimizer. We use a learning rate of 3e-4 for $h_{\mathrm{test}}$ , 5e-5 for $\tau$ , 1e-4 for $f$ , and 5e-3 for the Lagrange multiplier $\lambda$ initialized to 1e-4.
487
+
488
+ Adaptation baselines We reuse the same model architecture for all variants of our proposed method. For baselines involving a distribution matching objective, we optimize the standard GAN [14] objective to minimize the Janson-Shannon divergence between training and test-time encoding distributions. To enforce dynamics consistency for the ILA [61] baseline, we forward the image encodings through the RSSM to get the latent states and compute dynamics violation as the KL divergence between the prior and the posterior. We minimize dynamics violation by backpropagating the gradient through the model to the encodings.
489
+
490
+ # D Environment Details
491
+
492
+ Distracted DeepMind Control To evaluate the ability of RePo to learn in dynamic environments, we use the distracted DeepMind Control Suite proposed in [63]. Specifically, we replace the static background in standard DeepMind Control environments [51] with grayscale videos from the Kinectics-400 Dataset [28]. We use a time limit of 1000 and an action repeat of 2 for all environments. We evaluate all methods with 4 random seeds.
493
+
494
+ Realistic Maniskill We evaluate the ability of RePo to learn across diverse scenarios on 3 manipulation tasks adapted from the ManiSkill 2 benchmark [15]:
495
+
496
+ - Push Cube: the goal is to push a cube to reach a position on the floor.
497
+ - Lift Cube: the goal is to lift a cube above a certain height.
498
+ - Turn Facet: the goal is to turn the faucet to a certain angle.
499
+
500
+ To simulate real-world scenarios, we replace the default background with realistic scenes from the Habitat Matterport dataset [43]. We curate 90 different scenes and randomly load a new scene at the beginning of each episode. We use a time limit of 100 and an action repeat of 1. We evaluate all methods with 4 random seeds.
501
+
502
+ Lazy TurtleBot To evaluate RePo's resilience against spurious variations in the real world, we furnish a room to mimic a typical household setup and train a TurtleBot to reach a certain goal location from egocentric observations (Fig. 4). We introduce spurious variations by placing two TVs playing random YouTube videos along the critical paths to the goal. The robot features a discrete action space with 4 actions: move forward, rotate left, rotate right, and no-op. To minimize algorithmic change, we convert the action space to a continuous 2D action space where the discrete actions correspond to $(1,0)$ , $(0,1)$ , $(-1,0)$ , $(0,-1)$ respectively, and continuous actions are mapped back to the closest discrete actions in L2 distance. The reward is the negative distance to goal, where the robot's state is estimated using an overhead camera mounted on the ceiling. We apply a time limit of 100 and an action repeat of 1. At the start of training, we prefetch the replay buffer with 5K offline transitions collected by sampling and reaching random goals using a state-based controller. This is followed by 10K steps of pure offline training. We then train each method online for 10K environment steps, using the state-based controller for automatic resets. Due to time constraints, we evaluate each method for 2 seeds.
503
+
504
+ # E Additional Experiments
505
+
506
+ # E.1 Results on standard DMC environments
507
+
508
+ In Fig. 11, we provide additional comparisons to Dreamer on standard DeepMind Control tasks. We note that RePo is able to match Dreamer in asymptotic performance despite not being trained with a reconstruction objective. Our method slightly lags behind Dreamer in terms of sample efficiency, which is due to reward signals being inherently more sparse than pixel reconstruction signals.
509
+
510
+ ![](images/e410b8daeb78421e608a6109e5385798b7d0ab1e8f97f13b50c044cc0d05c339.jpg)
511
+ Figure 11: Comparison with Dreamer on standard DeepMind control environments. RePo matches Dreamer in asymptotic performance despite not being trained with a reconstruction objective.
512
+
513
+ ![](images/7ce34cf5aedbbc82a8971a2bb69bca8bb3c975db793f69eea2472edc7bcf43b9.jpg)
514
+
515
+ ![](images/5343497c433310c563cc57c2c51f589e16a2ce5baeae4ee457e8ef396e9630e9.jpg)
516
+
517
+ # E.2 Results on Distracted Walker Walk with scoreboard
518
+
519
+ We evaluate RePo on a variant of Distracted Walker Walk which consists of a scoreboard displaying the cumulative reward of the current episode. The goal is to investigate if RePo's reward-predictive nature causes its latent representation to collapse onto predicting the score. We present the probing visualization in Fig. 12, which shows that the learned latent reconstructs the joint positions of the agent and ignores the score. This is because RePo predicts not only the current reward but also the dynamics and future rewards. The empirical performance with the scoreboard is $872.86 \pm 43.71$ while without the scoreboard it is $868.72 \pm 53.93$ .
520
+
521
+ ![](images/a41f0580f720a1dd6d63118856b36885aaef66eaefd92722119f8715359d1050.jpg)
522
+ Figure 12: Visualization of RePo's representation on distracted DMC with scoreboard. Since RePo predicts not only the current reward but also the dynamics and future rewards, its latent representation does not collapse to predicting the score.
523
+
524
+ # E.3 Additional ablations
525
+
526
+ We perform additional ablation experiments on distracted DMC Walker Walk to analyze the effect of hyperparameters. All other hyperparameters are fixed to those reported in Table. 2. Fig. 13 illustrates the effect of $\beta$ learning rate. When the learning rate is too high, the constraint is enforced immediately after training begins, which can lead to ineffective exploration. When the learning rate is too low, dual gradient descent converges slowly. We find the sweet spot to be a learning rate that relaxes the information bottleneck at the beginning of training and gradually tightens the bottleneck to discard redundant information in the representation. Fig. 14 show the result of ablating the KL constraint value $\epsilon$ . With too small a KL target, the representation becomes degenerate and fails to capture task-relevant information, as indicated by the increase in reward loss. A large KL target, on the other hand, can result in the dynamics model being inaccurate, thereby affecting behavior learning. Finally, Fig. 15 explores the effect of KL balancing. In accordance with [19], we find that the dynamics model is generally harder to train than the representation model. Hence, a larger KL balancing parameter tends to result in better performance. However, if we train the prior too aggressively, then the posterior becomes poorly regularized and captures task-irrelevant information.
527
+
528
+ Figure 13: Ablating $\beta$ learning rate. A larger $\beta$ learning rate corresponds to faster convergence of the constraint, which can lead to ineffective exploration.
529
+ ![](images/b0c8c763b144eda80541f82149d7cd483c8b11d8af58a320560a9f7f9cafdd84.jpg)
530
+ Ir=5e-3 Ir=1e-4 Ir=1e-5
531
+
532
+ ![](images/4b5c5911834b8474b4e962053cdf3b75119727ab42279087c5d3e64573f864ad.jpg)
533
+
534
+ ![](images/444f0b7dd39a98e8560195a20f89b894a74eec68292f10361dfca216d95ffe35.jpg)
535
+
536
+ ![](images/1c81db9c0526463f5aeb9e2c0d9956174f448458b6e4bacb06e57c441fa0b418.jpg)
537
+ -ε=1 -ε=3 -ε=7
538
+
539
+ ![](images/23db5f222db71e28af78c9bee2c561289146c1dbc8bc19a2fddedbd5cdf1469f.jpg)
540
+
541
+ ![](images/30cedf1122ab3eba0bd2997dcca94b5918765c46eac40277ed7c087cc05c4062.jpg)
542
+
543
+ Figure 14: Ablating information bottleneck $\epsilon$ . A tighter information bottleneck generally induces more parsimonious representations and more accurate dynamics models. But too tight a bottleneck can thwart reward learning.
544
+ ![](images/98b55180860477888ad0a9110b234dd89fc49b398e39ff4851325d770efeef74.jpg)
545
+ ratio=1 ratio=5 ratio=9
546
+
547
+ ![](images/2b1f2b9ff25e7c3c2d3567a456f101c723179d8ced162d8fbfc2dd47a24780fe.jpg)
548
+ Figure 15: Ablating KL balancing ratio. The dynamics model (prior) is generally harder to train than the representation model (posterior), which suggests training the prior more frequently than the posterior, i.e. using a higher KL balancing ratio. However, training the prior too aggressively leads to poorly regularized posterior.
549
+
550
+ ![](images/63c404bfb0da7fe51d36f1f0294bda22bdf9a80c1eabf84d58e11e1498b7c788.jpg)
551
+
552
+ # E.4 An illustrative example of support constraint
553
+
554
+ We provide a didactic example to illustrate the intuition behind using a support constraint for alignment. Consider two ground truth latent distributions shown in Fig. 16. The training distribution $\mathcal{P}_{\mathrm{train}}$ is a uniform distribution spanning $[0,6]\times [0,6]$ , and the test-time distribution $\mathcal{P}_{\mathrm{test}}$ is a uniform distribution covering exactly half the support of $\mathcal{P}_{\mathrm{train}}$ . This is to simulate insufficient online exploration at test time. We construct a nonlinear emission function $f:\mathcal{X}\to \mathcal{O}:f(x) = 0.025e^{x}$ to generate the observations. For illustration purposes, we use the same emission function for both training
555
+
556
+ and testing domains. Given access to a perfect training encoder, our goal is to learn a test-time encoder that recovers the ground truth encoding function (inverse of emission function) and in turn the test-time latent distribution. We compare the support constraint objective (detailed in eq. (4.7)) with a distribution matching objective minimizing the Jenson-Shannon divergence.
557
+
558
+ First, notice in Fig. 17 that naively optimizing either objective fails to recover the ground truth test-time latent distribution. The distribution matching objective "stretches" the test-time distribution to match the training distribution, whereas the support constraint only forces the former to be within the support of the latter without any additional stipulation. A common method in unsupervised domain adaptation is to enforce dynamics consistency between adjacent states, which in our toy example can be interpreted as preserving the pairwise distance between latent states. When we add in
559
+
560
+ ![](images/b10b0ff04bd990c034b556990c4a7c7ad10e2c0b78cadc690ec8e0d65ea3c5dd.jpg)
561
+ Training Latent Distribution
562
+ Figure 16: Ground truth latent distributions in training and testing domains.
563
+
564
+ ![](images/511cec334e3c63dc36acefd27388434e1b89a15149b51ddeda41991bb0463836.jpg)
565
+ Testing Latent Distribution
566
+
567
+ ![](images/207f83cf1b2ca790900abab304d08e311f52df07d9e80a512b07bd80b21bf55e.jpg)
568
+
569
+ ![](images/dcdfd20a0e761e7d257c91b0ab39963c0df3aa70d5b1b6c2e372aef476674307.jpg)
570
+
571
+ ![](images/d3cd4555e50b04f34aa1573128a1ed39aacdc9a0483d0826cf81e9095d15b586.jpg)
572
+
573
+ ![](images/c3f9d1d6fa7f4c63e6bac42d94d148725b1b4c11181f901938ad9cba6c726421.jpg)
574
+ Figure 17: Comparison between distribution matching and support constraint. When there is insufficient exploration, the distribution matching object conflicts with dynamics consistency and calibration objectives, whereas support constraint conforms to these objectives.
575
+
576
+ ![](images/dde83ffc4a7a54b64c91c2d591193cd60240c2d68c1f201e73b558702a82915f.jpg)
577
+
578
+ ![](images/512a47917b1114653d0821119507835428c54825112a12a3e8b24c6b5611a3d7.jpg)
579
+
580
+ this objective, a crucial difference between support constraint and distribution matching transpires: while the support constraint conforms to the dynamics consistency objective through the reweighting function and recovers the correct shape of the distribution, the distribution matching objective is inherently conflicting with dynamics consistency. Intuitively, the distribution matching objective attempts to stretch the distribution, whereas the dynamics consistency pulls it together. This leads to suboptimal results as shown in the middle column of the first row in Fig. 17. Still, neither method can recover the ground truth distribution by only using dynamics consistency, as it only preserves the relative positions of latents but not the global position. To address this, we propose to match the ground truth latent state for a small number of calibration examples. These calibration examples effectively serve as an anchor for the distribution. When the calibration samples have good coverage, we see the support matching objective successfully recovers the ground truth encoding function and latent distribution. Distribution matching, on the other hand, yields suboptimal solution as it still conflicts with the calibration objective.
581
+
582
+ We further investigate the case where the calibration examples do not have good coverage. In Fig. 18, we see that when the calibration examples only cover a small region of the test-time distribution, the result of optimizing the support constraint is accurate only around the calibration examples. Under this circumstance, the addition of dynamics consistency proves to be quite effective. Since (1) the distribution is anchored around the calibration samples and (2) pairwise distance is preserved, the only valid solution is recovering the ground truth latent distribution. In our DMC benchmark, we do observe degradation when we replace the exploration policy with a random policy for collecting calibration samples. Yet we find optimizing for dynamics consistency through RSSM to be of little avail. We hypothesize this is due to the recurrent architecture interfering with the optimization, and we leave it for future work to investigate potential solutions.
583
+
584
+ ![](images/1f57d384beed730faccc40fc332adc2f8a431ad56aa340bb9659ce20b8ef24b6.jpg)
585
+ Figure 18: Support constraint with skewed calibration examples. When the calibration examples are skewed, the result of optimizing the support constraint is only accurate around the calibration examples. However, adding dynamics consistency effectively propagates the calibration signal to other states.
reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee660f561d210343a6113f9e1e6eb2da517a5027ed1da9ae4dce8eac465cb642
3
+ size 832187
reporesilientmodelbasedreinforcementlearningbyregularizingposteriorpredictability/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48fd978e0e2583e3c60d285ea38ec7a1f532954e92972f737d3ebb0b893eedc7
3
+ size 827153
weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/f7f37953-e3be-4d76-9b64-a0fdf5d77a23_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f67e74fbbf4d67d6c944904e7d33b93272d3480803d7ab8934a44253f9a7410
3
+ size 84496
weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/f7f37953-e3be-4d76-9b64-a0fdf5d77a23_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f44af9df11f91de1e44ff819350caecd12008a6a8d45394f371d66705e0b1b4f
3
+ size 103122
weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/f7f37953-e3be-4d76-9b64-a0fdf5d77a23_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40f5d943a7573752aa83d157524dc19f6f736010d1689eea95f65631015ca078
3
+ size 1602037
weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/full.md ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Weakly-Supervised Concealed Object Segmentation with SAM-based Pseudo Labeling and Multi-scale Feature Grouping
2
+
3
+ Chunming He $^{1,\ast}$ , Kai Li $^{2,\ast}$ , Yachao Zhang $^{1}$ , Guoxia Xu $^{3}$ , Longxiang Tang $^{1}$ , Yulun Zhang $^{4}$ , Zhenhua Guo $^{5}$ , and Xiu Li $^{1,\dagger}$
4
+
5
+ <sup>1</sup>Shenzhen International Graduate School, Tsinghua University, <sup>2</sup>NEC Laboratories America, <sup>3</sup>Nanjing University of Posts and Telecommunications, <sup>4</sup>ETH Zürich, <sup>5</sup>Tianyi Traffic Technology
6
+
7
+ # Abstract
8
+
9
+ Weakly-Supervised Concealed Object Segmentation (WSCOS) aims to segment objects well blended with surrounding environments using sparsely-annotated data for model training. It remains a challenging task since (1) it is hard to distinguish concealed objects from the background due to the intrinsic similarity and (2) the sparsely-annotated training data only provide weak supervision for model learning. In this paper, we propose a new WSCOS method to address these two challenges. To tackle the intrinsic similarity challenge, we design a multi-scale feature grouping module that first groups features at different granularities and then aggregates these grouping results. By grouping similar features together, it encourages segmentation coherence, helping obtain complete segmentation results for both single and multiple-object images. For the weak supervision challenge, we utilize the recently-proposed vision foundation model, "Segment Anything Model (SAM)", and use the provided sparse annotations as prompts to generate segmentation masks, which are used to train the model. To alleviate the impact of low-quality segmentation masks, we further propose a series of strategies, including multi-augmentation result ensemble, entropy-based pixel-level weighting, and entropy-based image-level selection. These strategies help provide more reliable supervision to train the segmentation model. We verify the effectiveness of our method on various WSCOS tasks, and experiments demonstrate that our method achieves state-of-the-art performance on these tasks. The code will be available at https://github.com/ChunmingHe/WS-SAM.
10
+
11
+ # 1 Introduction
12
+
13
+ Concealed object segmentation (COS) aims to segment objects visually blended with surrounding environments [1]. COS is a general term with different applications, e.g., camouflaged object detection [2, 3], polyp image segmentation [4, 5], transparent object detection [6, 7], etc. COS is a challenging task due to the intrinsic between foreground objects and background, which makes it extremely difficult to identify discriminative clues for accurate foreground-background separation. To address this challenge, existing methods have employed approaches that mimic human vision [8-10], introduce frequency information [11, 12], or adopt joint modeling in multiple tasks [13-18].
14
+
15
+ Weakly-Supervised COS (WSCOS) studies an even more challenging yet more practical problem, involving learning a COS model without relying on pixel-wise fully-annotated training data. WSCOS greatly reduces annotation costs by only requiring a few annotated points or scribbles in the foreground
16
+
17
+ or background. However, the sparsity of annotated training data diminishes the limited discrimination capacity of the segmenter during model learning, thus further restricting segmentation performance.
18
+
19
+ In this paper, we propose a new algorithm for the challenging WSCOS task. To tackle the intrinsic similarity of foreground and background, we introduce a Multi-scale Feature Grouping (MFG) module that first evacuates discriminative cues at different granularities and then aggregates these cues to handle various concealing scenarios. By performing feature grouping, MFG essentially promotes coherence among features and thus is able to alleviate incomplete segmentation by encouraging local correlation within individual objects, while also facilitating multiple-object segmentation by seeking global coherence across multiple objects.
20
+
21
+ To address the challenge of weak supervision, we propose to leverage the recently proposed vision foundation model, Segment Anything Model (SAM), to generate dense masks by using sparse annotations as prompts, and use the generated masks as pseudo labels to train a segmenter. However, due to the intrinsic similarity between foreground objects and the background, the pseudo labels generated by SAM may not always be reliable. We propose a series of strategies to address this problem. First, we propose to generate multiple augmentation views for each image and fuse the segmentation masks produced from all views. The fused mask can highlight reliable predictions resistant to image augmentations and tend to be more accurate and complete as an effect of ensemble. Second, we propose an entropy-based weighting mechanism that assigns higher weights to predictions of pixels of high certainty. Lastly, to deal with the extreme images that SAM fails to generate reasonably correct masks, we propose an entropy-based image-level selection technique to assess the quality of the generated mask and decide whether to use the masks as pseudo labels for model training. These strategies ensure that only high-quality pseudo labels are used for training the segmenter. For ease of description, we refer to our solution of using SAM to address this task as WS-SAM.
22
+
23
+ Our contributions are summarized as follows:
24
+
25
+ (1) We propose to leverage SAM for weakly-supervised segmentation by using the provided sparse annotations as prompts to generate dense segmentation masks and train a task segmentation model. To the best of our knowledge, this is the first attempt to leverage the vision foundation model to address the weakly-supervised segmentation task.
26
+ (2) We propose a series of strategies for dealing with potentially low segmentation mask quality, including the multi-augmentation result ensemble technique, entropy-based pixel-level weighting technique, and entropy-based image-level selection technique. These techniques help provide reliable guidance to train the model and lead to improved segmentation results.
27
+ (3) We introduce a Multi-scale Feature Grouping (MFG) technique to tackle the intrinsic similarity challenge in the WSCOS task. MFG evacuates discriminative cues by performing feature grouping at different granularities. It encourages segmentation coherence, facilitating to obtain complete segmentation results for both single and multiple object images.
28
+ (4) We evaluate our method on various WSCOS tasks, and the experiments demonstrate that our method achieves state-of-the-art performance.
29
+
30
+ # 2 Related Works
31
+
32
+ Segment Anything Model. SAM [19] is a recently proposed vision foundation model trained on SA-1B of over 1 billion masks. Its primary objective is to segment any object in any given image without requiring any additional task-specific adaptation. Its outstanding quality in segmentation results and zero-shot generalization to new scenes make SAM a promising candidate for various computer vision tasks. However, recent studies have highlighted that SAM encounters difficulties when segmenting objects with poor visibility, such as camouflaged objects [20-22], medical polyps [23-27], and transparent glasses [28, 22]. These findings suggest that SAM still has limitations in COS tasks.
33
+
34
+ In this paper, we propose using SAM to generate dense segmentation masks from sparse annotations and introduce the first SAM-based weakly-supervised framework in COS, termed WS-SAM. To further increase the accuracy of the generated pseudo-labels, we propose a pseudo label refinement strategy. Such a strategy assigns higher weights to those reliable predictions that are resistant to various image augmentation. Therefore, WS-SAM can offer more precise and stable guidance for the learning process, ultimately boosting the segmentation performance of the segmenter.
35
+
36
+ ![](images/4736f87981ac362390c52141592a49e3f795d97515bc6bbc97adbbf561512874.jpg)
37
+ Figure 1: Framework of WS-SAM with scribble supervision. Note that the corresponding masks of the augmented images are inversely transformed so as to be consistent with the original image.
38
+
39
+ Concealed Object Segmentation. With the rapid development of deep learning, learning-based segmenters have obtained great achievements in the fully-supervised COS tasks [4, 2, 29]. PraNet [4] proposed a parallel reverse attention network to segment polyps in colonoscopy images. Drawn inspiration from biology, SINet [2] designed a predator network to discover and locate camouflaged objects. To detect transparent objects, GSDNet [29] integrated an aggregation module and a reflection refinement module. However, there is limited research on the weakly-supervised COS task. SCOD [30] introduced the first weakly-supervised COD framework, but it is only supervised with sparse annotations, which greatly restricts its discrimination capacity and inevitably inhibits segmentation performance. To address this problem, we first propose using SAM to generate precise pseudo-labels. Besides, to tackle intrinsic similarity, we introduce the multi-scale feature grouping module to evacuate discriminative cues at different granularities and thus promote feature coherence.
40
+
41
+ # 3 Methodology
42
+
43
+ Weakly-Supervised Concealed Object Segmentation (WSCOS) aims to learn a segmentation model from a sparsely-annotated training dataset $S = \{\mathbf{X}_i, \mathbf{Y}_i\}_{i=1}^S$ and test the model on a test dataset $\mathcal{T} = \{\mathbf{T}_i\}_{i=1}^T$ , where $\mathbf{X}_i$ and $\mathbf{T}_i$ denote the training and test images, respectively; $\mathbf{Y}_i$ represents the sparse annotations, which could be a few points or scribbles annotated as foreground or background.
44
+
45
+ Learning the segmentation model could be a challenging task, as concealed objects usually blend well with their surrounding environment, making it hard to distinguish foreground from background. Besides, the sparse annotations $\mathbf{Y}_i$ may not provide sufficient supervision to learn the model capable of making accurate dense predictions. To address these challenges, we first propose a strategy of leveraging the recently-proposed vision foundation model, Segment Anything Model (SAM), to generate high-quality dense masks from sparse annotations and use the dense masks as pseudo labels to train the segmentation model. In addition, we propose a Multi-scale Feature Grouping (MFG) module that groups features at different granularities, encouraging segmentation coherence and facilitating obtain complete segmentation results for various concealing scenarios.
46
+
47
+ # 3.1 Pseudo Labeling with SAM
48
+
49
+ SAM is a recently-released vision foundation model for generic object segmentation [19]. It is trained with more than one billion segmentation masks and has shown impressive capabilities of producing precise segmentation masks for a wide range of object categories (so-called "segment anything"). Unlike some enthusiasts who brag SAM has "killed" the segment task, we find that SAM is far from reaching that level, at least for the studied concealed object segmentation task. This is first because SAM requires "prompts" that provide clues about the objects of interest to produce segmentation results. While the prompts could be in many forms, e.g., points, masks, bounding boxes, etc., they are required to be provided by humans or other external sources (e.g., other algorithms) 2. This requirement of the additional prompt inputs makes SAM unable to be (directly) used for applications where only test images are provided. In addition, we find that while SAM exhibits
50
+
51
+ ![](images/9f33991910665eac197eaf10dfb77189cb20a0bf2f01bff11e8f62a2269971fd.jpg)
52
+ Figure 2: Masks of SAM with different augmented images. We inversely transform the masks to keep consistent with the original image. It is observed that fused masks contain more accurate and complete segmentation information.
53
+
54
+ impressive performance for general scene images, it still struggles for concealed object images, due to the intrinsic similarity between foreground objects and the background.
55
+
56
+ In this paper, we introduce SAM for the Weakly-Supervised Concealed Object Segmentation (WS-COS) task. As shown in Fig. 1, we use SAM to generate segmentation masks on training images by taking the sparse annotations as prompts, and take the segmentation masks as pseudo labels to train a COS model, which will be used for the test. It is expected that the SAM-generated pseudo labels are not reliable. We address this problem by proposing three techniques, namely, Multi-augmentation result fusion, and pixel-level weighting and image-level selection.
57
+
58
+ Multi-augmentation result fusion. Given a concealed image $(\mathbf{X}_i, \mathbf{Y}_i) \in S$ , we generate $K$ augmented images $\{\mathbf{X}_i^k\}_{k=1}^K$ by applying stochastic augmentations randomly sampled from image flipping, rotation $(0^\circ, 90^\circ, 180^\circ, 270^\circ)$ , and scaling $(\times 0.5, \times 1.0, \times 2.0)$ . We send $\{\mathbf{X}_i^k\}_{k=1}^K$ to SAM by using the sparse annotations $\mathbf{Y}_i$ as prompts, and generate segmentation masks $\{\mathbf{M}_i^k\}_{k=1}^K$ , where
59
+
60
+ $$
61
+ \mathbf {M} _ {i} ^ {k} = \operatorname {S A M} \left(\mathbf {X} _ {i} ^ {k}, \mathbf {Y} _ {i}\right). \tag {1}
62
+ $$
63
+
64
+ Note that $\mathbf{M}_i^k$ has the same shape as input image $\mathbf{X}_i^k$ , which may differ in shape from $\mathbf{X}_i$ ; we perform inverse image transformation to ensure all masks have the same shape as the original image.
65
+
66
+ As different segmentation results can be obtained when feeding SAM with different prompts, we expect $\{\mathbf{M}_i^k\}_{k=1}^K$ to vary since different augmented images are used for segmentation. Fig. 2 shows some examples. We can see that while these masks vary significantly in shape, they overlap in certain regions, which are reliably predicted by SAM regardless of image transformations and usually correspond to correctly predicted foreground regions. Besides, these masks complement each other, such that some foreground regions missed by one mask can be found in other masks. Based on these observations, we propose to fuse the segmentation masks for different augmented images, as
67
+
68
+ $$
69
+ \tilde {\mathbf {M}} _ {i} = \frac {1}{K} \sum_ {k = 1} ^ {K} \mathbf {M} _ {i} ^ {k}, \tag {2}
70
+ $$
71
+
72
+ where $\tilde{\mathbf{M}}_i$ is the fused mask. We expect $\tilde{\mathbf{M}}_i$ to be more reliable than the individual masks as it is an ensemble over various augmented images.
73
+
74
+ Pixel-level weighting. The prediction reliability of different pixels may vary. To highlight those more reliable ones, we propose to use entropy to weight the predictions. We calculate the entropy of each pixel and get an entropy map as
75
+
76
+ $$
77
+ \tilde {\mathbf {E}} _ {i} = - \tilde {\mathbf {M}} _ {i} \log \tilde {\mathbf {M}} _ {i} - (1 - \tilde {\mathbf {M}} _ {i}) \log (1 - \tilde {\mathbf {M}} _ {i}). \tag {3}
78
+ $$
79
+
80
+ As the entropy map is calculated from the fused mask, it measures the prediction uncertainty of each pixel across all augmentation as a pixel will have low entropy only if it is confidently and meanwhile consistently predicted from all augmented images. Therefore, we can use this entropy map to weigh the fused mask $\mathbf{M}_i^k$ and assign higher weights to those reliable pixels.
81
+
82
+ Image-level selection. We have observed that for some highly challenging concealed images, SAM fails to produce even reasonably correct results with the sparse annotations as the prompts, with
83
+
84
+ ![](images/79b44f72ce8285a1da0271e8b075be62c58721945135887cf78a3ee1fdbf8d82.jpg)
85
+ Figure 3: Architecture of the proposed model. $\Phi_P$ denotes feature grouping with $P$ prototypes. We simplify the broadcast process in $\Phi_P$ for space limitation.
86
+
87
+ whatever types of augmented images. This fundamentally invalidates the above pixel-wise weighting strategy. To deal with this case, we further propose an image-level selection mechanism to selectively choose images for training, further striving to provide reliable supervision to train the segmenter.
88
+
89
+ Similar to using entropy to define the pixel-level prediction uncertainty, we propose two entropy-based image-level uncertainty measurements, namely, absolute uncertainty $U_{a}$ and relative uncertainty $U_{r}$ . Absolute uncertainty $U_{a}$ refers to the proportion of high-uncertainty pixels among all pixels, while relative uncertainty $U_{r}$ indicates the proportion of high-uncertainty pixels and foreground pixels with low uncertainty, which is specifically designed to accommodate small-object scenarios. We regard a pixel as a high-uncertainty pixel when its entropy is above 0.9. We define the following indicator function to decide whether to keep an image for training or not:
90
+
91
+ $$
92
+ \tilde {\mathbf {M}} _ {i} = \mathbb {1} \left[ U _ {a} < \tau_ {a} \right] \times \mathbb {1} \left[ U _ {r} < \tau_ {r} \right], \tag {4}
93
+ $$
94
+
95
+ where $\tau_{a}$ and $\tau_{r}$ are the thresholds that are set as 0.1 and 0.5 in this paper, respectively.
96
+
97
+ Applying the entropy weights $\tilde{\mathbf{E}}_i$ on the image selection indicator $\hat{\mathbf{M}}_i$ , we reach our final mask that will be used for training the segmenter as,
98
+
99
+ $$
100
+ \hat {\mathbf {Y}} _ {i} = \left(1 - \tilde {\mathbf {E}} _ {i}\right) \times \hat {\mathbf {M}} _ {i}. \tag {5}
101
+ $$
102
+
103
+ Our technique leverages SAM to generate segmentation masks and further incorporates multi-augmentation result fusion, pixel-level uncertainty weighting, and image-level uncertainty filtering, thus being able to generate reliable pseudo labels to train the segmenter.
104
+
105
+ # 3.2 Multi-scale Feature Grouping
106
+
107
+ The intrinsic similarity in concealed objects may cause incomplete segmentation and partial object localization in multi-object segmentation. Such problems could be further aggravated in weakly supervised scenarios due to the limited discriminative capacity of the segmenter. To address this issue, we propose a Multi-scale Feature Grouping (MFG) module that evacuates discriminative cues at various granularities. MFG achieves this by exploring the coherence of foreground/background regions and performing feature grouping at different levels. By encouraging feature coherence, MFG can alleviate incomplete segmentation by enhancing local correlation within individual objects and further facilitate multiple-object segmentation by seeking global coherence across multiple objects. The architecture of the proposed MFG module is illustrated in Fig. 3.
108
+
109
+ Feature grouping. Suppose $\mathbf{F} \in \mathbb{R}^{H \times W \times C}$ is the feature representation of an input image. We perform feature grouping by mapping $\mathbf{F}$ to $N$ learnable cluster prototypes $\mathbf{P} \in \mathbb{R}^{N \times C}$ . These cluster prototypes $\mathbf{P}$ are randomly initialized. We first append the learnable spatial positional embedding $\mathbf{P}_e$ to the input feature $\mathbf{F}$ and get $\mathbf{F}_p$ . Then, we linearly transform the prototypes $\mathbf{P}$ and the positioned feature $\mathbf{F}_p$ into $\mathbf{Q} \in \mathbb{R}^{N \times C}$ , $\mathbf{K} \in \mathbb{R}^{HW \times C}$ , and $\mathbf{V} \in \mathbb{R}^{HW \times C}$ :
110
+
111
+ $$
112
+ \mathbf {Q} = \mathbf {W} _ {q} \mathbf {P}, \quad \mathbf {K} = \mathbf {W} _ {k} \mathbf {F} _ {p}, \quad \mathbf {V} = \mathbf {W} _ {v} \mathbf {F} _ {p}, \tag {6}
113
+ $$
114
+
115
+ where $\mathbf{W}_q, \mathbf{W}_k, \mathbf{W}_v \in \mathbb{R}^{C \times C}$ are the learnable weights. To ensure the exclusive assignment of features to the cluster prototypes, we normalize the coefficients over all prototypes,
116
+
117
+ $$
118
+ \bar {\mathbf {A}} _ {i, j} = \frac {e ^ {\mathbf {A} _ {i , j}}}{\sum_ {l} e ^ {\mathbf {A} _ {i , l}}}, \quad \text {w h e r e} \quad \mathbf {A} = \frac {1}{\sqrt {C}} \mathbf {K} ^ {\top} \mathbf {Q}. \tag {7}
119
+ $$
120
+
121
+ We then calculate the integral value $\mathbf{U}$ of the input values with respect to the prototypes as
122
+
123
+ $$
124
+ \mathbf {U} = \mathbf {D} ^ {\top} \mathbf {V}, \quad \text {w h e r e} \quad \mathbf {D} _ {i, j} = \frac {\mathbf {A} _ {i , j}}{\sum_ {l} \mathbf {A} _ {i , l}}, \tag {8}
125
+ $$
126
+
127
+ and update the prototypes $\mathbf{P}$ by feeding $\mathbf{P}$ and $\mathbf{U}$ into a Gated Recurrent Units $GRU(\cdot)$ :
128
+
129
+ $$
130
+ \mathbf {P} = G R U (i n p u t s = \mathbf {U}, s t a t e s = \mathbf {P}). \tag {9}
131
+ $$
132
+
133
+ By repeating Eqs. (6) - (9) for $T$ iterations, the cluster prototypes are iteratively updated and gradually strengthen the association between similar features, where $T = 3$ in this paper.
134
+
135
+ We broadcast each prototype onto a 2D grid augmented with the learnable spatial position embedding $\mathbf{P}_e$ to obtain $\{\mathbf{F}_i^{\prime}\}_{i = 1}^{N}\in \mathbb{R}^{H\times W\times C}$ , and use $1\times 1$ convolution to downsample each prototype, obtaining $\{\mathbf{F}_i^{\prime \prime}\}_{i = 1}^{N}\in \mathbb{R}^{H\times W\times C / N}$ . We concatenate those prototypes and obtain $\mathbf{F}_c\in \mathbb{R}^{H\times W\times C}$ .
136
+
137
+ For ease of future use, we denote the feature grouping process with $N$ prototypes as $\mathbf{F}_c = \Phi_N(\mathbf{F})$ .
138
+
139
+ Multi-scale feature aggregation. The number of prototypes $N$ in the above feature grouping technique controls the grouping granularity: a smaller value of $N$ facilitates the extraction of global information, while a larger value of $N$ can provide more valuable detailed information. To strike a balance, we propose to aggregate the multi-scale grouping features with different numbers of prototypes. Taking inspiration from the second-order Runge-Kutta (RK2) structure known for its superior numerical solutions compared to the traditional residual structure [31, 32], we employ RK2 to aggregate those features. Additionally, as shown in Fig. 3, we adopt a weighted gate mechanism $\alpha_{g}$ to adaptively estimate the trade-off parameter rather than using a fixed coefficient. Given the feature $\mathbf{F}$ , the adaptively aggregated feature $\hat{\mathbf{F}}$ is formulated as follows:
140
+
141
+ $$
142
+ \hat {\mathbf {F}} = \mathbf {F} + \alpha_ {g} \Phi_ {N _ {1}} (\mathbf {F}) + (1 - \alpha_ {g}) \Phi_ {N _ {2}} (\mathbf {F} + \Phi_ {N _ {1}} (\mathbf {F})), \tag {10}
143
+ $$
144
+
145
+ where $\alpha_{g} = S(\sigma \operatorname{cat}(\Phi_{N_{1}}(\mathbf{F}), \Phi_{N_{2}}(\mathbf{F} + \Phi_{N_{1}}(\mathbf{F}))) + \mu)$ . $S$ is Sigmoid. $\sigma$ and $\mu$ are the learnable parameters in $\alpha_{g}$ . $N_{1}$ and $N_{2}$ are the numbers of groups, which are empirically set as 4 and 2.
146
+
147
+ Our multi-scale feature grouping technique is inspired by the slot attention technique [33], but we differ from slot attention in the following aspects. Slot attention targets at instance-level grouping in a self-supervised manner, our MFG is proposed to adaptively excavate the feature-level coherence for complete segmentation and accurate multi-object localization. To relax the segmenter and ensure generalization, we remove the auxiliary decoder used in slot attention for image reconstruction, along with the reconstruction constraint. Additionally, we employ an RK2 structure to aggregate the multiscale grouping feature with different numbers of prototypes, which further facilitates the excavation of feature coherence and therefore helps improve segmentation performance.
148
+
149
+ # 3.3 Weakly-Supervised Concealed Object Segmentation
150
+
151
+ To use the proposed MFG technique for concealed object segmentation, we integrate MFG with the encoder and decoder architecture utilized in an existing camouflaged object detection model [12] to construct a novel segmenter. The model comprises a ResNet50-backed encoder $E$ that maps an input image $\mathbf{X}_i$ to a feature space as $\mathbf{F}_i = E(\mathbf{X}_i)$ . Using the obtained $\mathbf{F}_i$ , we apply MFG to perform multi-scale feature grouping, resulting in $\hat{\mathbf{F}}_i = MFG(\mathbf{F}_i)$ . Subsequently, a decoder $D$ maps $\hat{\mathbf{F}}_i$ back to the image space, generating the predicted mask $\mathbf{Y}_i' = D(\hat{\mathbf{F}}_i)$ . Fig. 3 provides a conceptual illustration of this model, and more architecture details can be found in the supplementary materials.
152
+
153
+ Following [30], we train the whole model jointly with the sparse annotations $\mathbf{Y}_i$ , and the generated segmentation mask $\hat{\mathbf{Y}}_i$ and $\tilde{\mathbf{M}}_i$ by the SAM model, as
154
+
155
+ $$
156
+ L = \frac {1}{N _ {s}} \sum_ {\left(\mathbf {X} _ {i}, \mathbf {Y} _ {i}\right) \sim \mathcal {S}} L _ {p c e} \left(\mathbf {Y} _ {i} ^ {\prime}, \mathbf {Y} _ {i}\right) + \hat {\mathbf {Y}} _ {i} L _ {c e} \left(\mathbf {Y} _ {i} ^ {\prime}, \tilde {\mathbf {M}} _ {i}\right) + \hat {\mathbf {Y}} _ {i} L _ {I o U} \left(\mathbf {Y} _ {i} ^ {\prime}, \tilde {\mathbf {M}} _ {i}\right). \tag {11}
157
+ $$
158
+
159
+ Where the first term is the partial cross-entropy loss $L_{pce}$ used to ensure consistency between the prediction maps and the sparse annotations $\mathbf{Y}_i$ [34]. The second and third terms are the cross-entropy loss $L_{ce}$ and the intersection-over-union loss $L_{IoU}$ , both calculated using the pseudo label $\hat{\mathbf{Y}}_i$ [35].
160
+
161
+ # 4 Experiments
162
+
163
+ # 4.1 Experimental Setup
164
+
165
+ Implementation details. The image encoder uses ResNet50 as the backbone and is pre-trained on ImageNet [39]. The batch size is 36 and the learning rate is initialized as 0.0001, decreased by 0.1 every 80 epochs. For scribble supervision, we propose a nine-box strategy, namely constructing
166
+
167
+ Table 1: Results on COD with point supervision and scribble supervision. $\mathrm{SCOD + }$ indicates integrating SCOD with our WS-SAM framework. The best two results are in red and blue fonts.
168
+
169
+ <table><tr><td rowspan="2">Methods</td><td rowspan="2">Pub.</td><td colspan="4">CHAMELEON</td><td colspan="4">CAMO</td><td colspan="4">COD10K</td><td colspan="4">NC4K</td></tr><tr><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td></tr><tr><td colspan="18">Scribble Supervision</td></tr><tr><td>SAM [19]</td><td>—</td><td>0.207</td><td>0.595</td><td>0.647</td><td>0.635</td><td>0.160</td><td>0.597</td><td>0.639</td><td>0.643</td><td>0.093</td><td>0.673</td><td>0.737</td><td>0.730</td><td>0.118</td><td>0.675</td><td>0.723</td><td>0.717</td></tr><tr><td>SAM-S [19]</td><td>—</td><td>0.076</td><td>0.729</td><td>0.820</td><td>0.650</td><td>0.105</td><td>0.682</td><td>0.774</td><td>0.731</td><td>0.046</td><td>0.695</td><td>0.828</td><td>0.772</td><td>0.071</td><td>0.747</td><td>0.832</td><td>0.763</td></tr><tr><td>WSSA [36]</td><td>CVPR20</td><td>0.067</td><td>0.692</td><td>0.860</td><td>0.782</td><td>0.118</td><td>0.615</td><td>0.786</td><td>0.696</td><td>0.071</td><td>0.536</td><td>0.770</td><td>0.684</td><td>0.091</td><td>0.657</td><td>0.779</td><td>0.761</td></tr><tr><td>SCWS [37]</td><td>AAAI21</td><td>0.053</td><td>0.758</td><td>0.881</td><td>0.792</td><td>0.102</td><td>0.658</td><td>0.795</td><td>0.713</td><td>0.055</td><td>0.602</td><td>0.805</td><td>0.710</td><td>0.073</td><td>0.723</td><td>0.814</td><td>0.784</td></tr><tr><td>TEL [38]</td><td>CVPR22</td><td>0.073</td><td>0.708</td><td>0.827</td><td>0.785</td><td>0.104</td><td>0.681</td><td>0.797</td><td>0.717</td><td>0.057</td><td>0.633</td><td>0.826</td><td>0.724</td><td>0.075</td><td>0.754</td><td>0.832</td><td>0.782</td></tr><tr><td>SCOD [30]</td><td>AAAI23</td><td>0.046</td><td>0.791</td><td>0.897</td><td>0.818</td><td>0.092</td><td>0.709</td><td>0.815</td><td>0.735</td><td>0.049</td><td>0.637</td><td>0.832</td><td>0.733</td><td>0.064</td><td>0.751</td><td>0.853</td><td>0.779</td></tr><tr><td>SCOD+</td><td>—</td><td>0.046</td><td>0.797</td><td>0.900</td><td>0.820</td><td>0.090</td><td>0.716</td><td>0.818</td><td>0.741</td><td>0.047</td><td>0.650</td><td>0.845</td><td>0.742</td><td>0.060</td><td>0.766</td><td>0.862</td><td>0.785</td></tr><tr><td>Ours</td><td>—</td><td>0.046</td><td>0.777</td><td>0.897</td><td>0.824</td><td>0.092</td><td>0.742</td><td>0.818</td><td>0.759</td><td>0.038</td><td>0.719</td><td>0.878</td><td>0.803</td><td>0.052</td><td>0.802</td><td>0.886</td><td>0.829</td></tr><tr><td colspan="18">Point Supervision</td></tr><tr><td>SAM [19]</td><td>—</td><td>0.207</td><td>0.595</td><td>0.647</td><td>0.635</td><td>0.160</td><td>0.597</td><td>0.639</td><td>0.643</td><td>0.093</td><td>0.673</td><td>0.737</td><td>0.723</td><td>0.118</td><td>0.675</td><td>0.723</td><td>0.717</td></tr><tr><td>SAM-P [19]</td><td>—</td><td>0.101</td><td>0.696</td><td>0.745</td><td>0.697</td><td>0.123</td><td>0.649</td><td>0.693</td><td>0.677</td><td>0.069</td><td>0.694</td><td>0.796</td><td>0.765</td><td>0.082</td><td>0.728</td><td>0.786</td><td>0.776</td></tr><tr><td>WSSA [36]</td><td>CVPR20</td><td>0.105</td><td>0.660</td><td>0.712</td><td>0.711</td><td>0.148</td><td>0.607</td><td>0.652</td><td>0.649</td><td>0.087</td><td>0.509</td><td>0.733</td><td>0.642</td><td>0.104</td><td>0.688</td><td>0.756</td><td>0.743</td></tr><tr><td>SCWS [37]</td><td>AAAI21</td><td>0.097</td><td>0.684</td><td>0.739</td><td>0.714</td><td>0.142</td><td>0.624</td><td>0.672</td><td>0.687</td><td>0.082</td><td>0.593</td><td>0.777</td><td>0.738</td><td>0.098</td><td>0.695</td><td>0.767</td><td>0.754</td></tr><tr><td>TEL [38]</td><td>CVPR22</td><td>0.094</td><td>0.712</td><td>0.751</td><td>0.746</td><td>0.133</td><td>0.662</td><td>0.674</td><td>0.645</td><td>0.063</td><td>0.623</td><td>0.803</td><td>0.727</td><td>0.085</td><td>0.725</td><td>0.795</td><td>0.766</td></tr><tr><td>SCOD [30]</td><td>AAAI23</td><td>0.092</td><td>0.688</td><td>0.746</td><td>0.725</td><td>0.137</td><td>0.629</td><td>0.688</td><td>0.663</td><td>0.060</td><td>0.607</td><td>0.802</td><td>0.711</td><td>0.080</td><td>0.744</td><td>0.796</td><td>0.758</td></tr><tr><td>SCOD+</td><td>—</td><td>0.089</td><td>0.704</td><td>0.757</td><td>0.731</td><td>0.129</td><td>0.642</td><td>0.693</td><td>0.666</td><td>0.058</td><td>0.618</td><td>0.812</td><td>0.719</td><td>0.075</td><td>0.767</td><td>0.825</td><td>0.771</td></tr><tr><td>Ours</td><td>—</td><td>0.056</td><td>0.767</td><td>0.868</td><td>0.805</td><td>0.102</td><td>0.703</td><td>0.757</td><td>0.718</td><td>0.039</td><td>0.698</td><td>0.856</td><td>0.790</td><td>0.057</td><td>0.801</td><td>0.859</td><td>0.813</td></tr></table>
170
+
171
+ Table 2: Results for PIS and TOD with point supervision.
172
+
173
+ <table><tr><td rowspan="3">Methods</td><td colspan="11">Polyp Image Segmentation (PIS)</td><td colspan="6">Transparent Object Detection (TOD)</td><td></td><td></td><td></td></tr><tr><td colspan="4">CVC-ColonDB</td><td colspan="4">ETIS</td><td colspan="3">Kvasir</td><td colspan="3">GDD</td><td colspan="3">GSD</td><td></td><td></td><td></td></tr><tr><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td><td>M↓</td><td>Fβ↑</td><td>Eφ↑</td><td>Sα↑</td></tr><tr><td>SAM [19]</td><td>0.479</td><td>0.343</td><td>0.419</td><td>0.427</td><td>0.429</td><td>0.439</td><td>0.512</td><td>0.503</td><td>0.320</td><td>0.545</td><td>0.564</td><td>0.582</td><td>0.245</td><td>0.512</td><td>0.530</td><td>0.551</td><td>0.266</td><td>0.473</td><td>0.501</td><td>0.514</td></tr><tr><td>SAM-P [19]</td><td>0.194</td><td>0.587</td><td>0.664</td><td>0.671</td><td>0.144</td><td>0.625</td><td>0.719</td><td>0.715</td><td>0.108</td><td>0.793</td><td>0.811</td><td>0.802</td><td>0.164</td><td>0.668</td><td>0.715</td><td>0.625</td><td>0.177</td><td>0.687</td><td>0.730</td><td>0.668</td></tr><tr><td>WSSA [36]</td><td>0.127</td><td>0.645</td><td>0.732</td><td>0.713</td><td>0.123</td><td>0.647</td><td>0.733</td><td>0.762</td><td>0.082</td><td>0.822</td><td>0.852</td><td>0.828</td><td>0.173</td><td>0.652</td><td>0.710</td><td>0.616</td><td>0.185</td><td>0.661</td><td>0.712</td><td>0.650</td></tr><tr><td>SCWS [37]</td><td>0.082</td><td>0.674</td><td>0.758</td><td>0.787</td><td>0.085</td><td>0.646</td><td>0.768</td><td>0.731</td><td>0.078</td><td>0.837</td><td>0.860</td><td>0.831</td><td>0.170</td><td>0.631</td><td>0.702</td><td>0.613</td><td>0.172</td><td>0.706</td><td>0.738</td><td>0.673</td></tr><tr><td>TEL [38]</td><td>0.089</td><td>0.669</td><td>0.743</td><td>0.761</td><td>0.083</td><td>0.639</td><td>0.776</td><td>0.726</td><td>0.091</td><td>0.810</td><td>0.826</td><td>0.804</td><td>0.230</td><td>0.640</td><td>0.586</td><td>0.536</td><td>0.275</td><td>0.571</td><td>0.501</td><td>0.495</td></tr><tr><td>SCOD [30]</td><td>0.077</td><td>0.691</td><td>0.795</td><td>0.802</td><td>0.071</td><td>0.664</td><td>0.802</td><td>0.766</td><td>0.071</td><td>0.853</td><td>0.877</td><td>0.836</td><td>0.146</td><td>0.801</td><td>0.778</td><td>0.723</td><td>0.154</td><td>0.743</td><td>0.751</td><td>0.710</td></tr><tr><td>SCOD+</td><td>0.074</td><td>0.702</td><td>0.806</td><td>0.803</td><td>0.066</td><td>0.670</td><td>0.811</td><td>0.769</td><td>0.068</td><td>0.860</td><td>0.880</td><td>0.836</td><td>0.129</td><td>0.818</td><td>0.796</td><td>0.732</td><td>0.145</td><td>0.761</td><td>0.765</td><td>0.720</td></tr><tr><td>Ours</td><td>0.043</td><td>0.721</td><td>0.839</td><td>0.816</td><td>0.037</td><td>0.694</td><td>0.849</td><td>0.797</td><td>0.046</td><td>0.878</td><td>0.917</td><td>0.877</td><td>0.078</td><td>0.858</td><td>0.863</td><td>0.775</td><td>0.089</td><td>0.839</td><td>0.841</td><td>0.764</td></tr></table>
174
+
175
+ the minimum outer wrapping rectangle of the foreground/background scribble and dividing it into a nine-box grid, to sample one point in each box and send them to SAM for segmentation mask generation. Following [2], all images are resized as $352 \times 352$ in both the training and testing phases. For SAM [19], we adopt the ViT-H SAM model to generate segmentation masks. We implement our method with PyTorch and run experiments on two RTX3090 GPUs.
176
+
177
+ Baselines. We explore SAM [19] for the WSCOS task by generating segmentation masks with sparse annotations as prompts and using the segmentation masks to train a COS segmenter. However, a more straightforward way to explore SAM for this task is to use the sparse annotation to fine-tune SAM and then directly apply SAM for the test. To verify the advantages of our method over this direct way, we construct two baseline methods, SAM-S and SAM-P, which fine-tune the mask decoder of SAM with scribble and point supervisions, respectively, by the partial cross-entropy loss. We will show the results of these two baselines in our comparative evaluations. For reference, we also report the results of the vanilla SAM. When applying SAM and its variants, SAM-S and SAM-P, on test images, we use the automatic prompt generation strategy and report the results with the highest IoU scores.
178
+
179
+ Metrics. Following existing methods [1, 2], we use four common metrics for evaluation, including mean absolute error $(M)$ , adaptive F-measure $(F_{\beta})$ [40], mean E-measure $(E_{\phi})$ [41], and structure measure $(S_{\alpha})$ [42]. Smaller $M$ , or larger $F_{\beta}$ , $E_{\phi}$ , $S_{\alpha}$ means better segmentation performance.
180
+
181
+ # 4.2 Comparative Evaluation
182
+
183
+ We perform evaluations on the following COS tasks, namely, Camouflaged Object Detection, Polyp Image Segmentation (PIS), and Transparent Object Detection (TOD). For all the tasks, we evaluate the performance with point annotations. We follow the previous weakly-supervised segmentation method [43] and randomly select two points (one from the foreground and one from the background) from the training masks as the point annotations. For COD, we additionally evaluate the performance using scribble annotations, using the scribble data provided in [43].
184
+
185
+ Camouflaged object detection. Four datasets are used for experiments, i.e., CHAMELEON [44], CAMO [45], COD10K [1], and NC4K [16]. Table 1 shows that our method reaches the best performance over all competing methods and baselines. Notably, while SAM has shown impressive
186
+
187
+ Table 3: Ablations for WS-SAM.
188
+
189
+ <table><tr><td>Baseline</td><td>MAF</td><td>PLW</td><td>ILS</td><td>M↓</td><td>\( {F}_{\beta } \uparrow \)</td><td>\( {E}_{\phi } \uparrow \)</td><td>\( {S}_{\alpha } \uparrow \)</td></tr><tr><td>✓</td><td></td><td></td><td></td><td>0.052</td><td>0.674</td><td>0.838</td><td>0.737</td></tr><tr><td>✓</td><td>✓</td><td></td><td></td><td>0.047</td><td>0.689</td><td>0.853</td><td>0.772</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td></td><td>0.044</td><td>0.697</td><td>0.866</td><td>0.793</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>0.038</td><td>0.719</td><td>0.878</td><td>0.803</td></tr></table>
190
+
191
+ Table 4: Ablations for MFG.
192
+
193
+ <table><tr><td>Metrics</td><td>w/o MFG</td><td>FG-&gt;SA</td><td>w/o multiscale</td><td>WGM-&gt;FC</td><td>w/ MFG</td></tr><tr><td>M ↓</td><td>0.044</td><td>0.038</td><td>0.040</td><td>0.039</td><td>0.038</td></tr><tr><td>Fβ ↑</td><td>0.684</td><td>0.708</td><td>0.702</td><td>0.710</td><td>0.719</td></tr><tr><td>Eφ ↑</td><td>0.857</td><td>0.868</td><td>0.858</td><td>0.871</td><td>0.878</td></tr><tr><td>Sα ↑</td><td>0.780</td><td>0.797</td><td>0.783</td><td>0.792</td><td>0.803</td></tr></table>
194
+
195
+ Table 5: Results of MFG with full supervision.
196
+
197
+ <table><tr><td>Metrics</td><td>Baseline</td><td>SegMaR [17]</td><td>PreyNet [8]</td><td>FGANet [15]</td><td>Ours</td></tr><tr><td>M↓</td><td>0.035</td><td>0.035</td><td>0.034</td><td>0.032</td><td>0.032</td></tr><tr><td>Fβ↑</td><td>0.688</td><td>0.699</td><td>0.715</td><td>0.708</td><td>0.706</td></tr><tr><td>Eφ↑</td><td>0.879</td><td>0.890</td><td>0.894</td><td>0.894</td><td>0.897</td></tr><tr><td>Sα↑</td><td>0.812</td><td>0.813</td><td>0.813</td><td>0.803</td><td>0.813</td></tr></table>
198
+
199
+ Table 6: Results on multi-object images.
200
+
201
+ <table><tr><td>Metrics</td><td>SCWS [37]</td><td>TEL [38]</td><td>SCOD [30]</td><td>Ours</td></tr><tr><td>M↓</td><td>0.094</td><td>0.101</td><td>0.084</td><td>0.070</td></tr><tr><td>Fβ↑</td><td>0.378</td><td>0.350</td><td>0.381</td><td>0.452</td></tr><tr><td>Eφ↑</td><td>0.740</td><td>0.726</td><td>0.718</td><td>0.772</td></tr><tr><td>Sα↑</td><td>0.625</td><td>0.617</td><td>0.643</td><td>0.687</td></tr></table>
202
+
203
+ performance for natural scene images, its performance on the challenging COD task is far from the existing methods particularly designed for this task. We do see performance gains after finetuning SAM with point (SAM-P) and scribble (SAM-S) supervision, but the results are still far below our method. This substantiates the superiority of our way of leveraging SAM to generate segmentation masks with sparse annotations and use the segmentation masks to train the segmenter. To verify our performance improvement over the existing WSCOS methods does not merely come from the usage of SAM, we integrate the most recent WSCOS method, SCOD [30], into our WS-SAM framework to also leverage the additional mask supervision. This results in the method, "SCOD+". We can see that our method still shows better performance, further verifying our advantages for this task.
204
+
205
+ Polyp image segmentation. Three widely-used Polyp datasets are selected, namely CVC-ColonDB [46], ETIS [47], and Kvasir [48]. Table 2 shows that our method significantly surpasses the second-best method, SCOD, with point supervision. SAM and SAM-P do not perform well on this task, further substantiating their weakness on this challenging segmentation task. While empowering SCOD with the proposed WS-SAM framework indeed improves the performance, the results are still lower than our method. This again verifies our benefit in handling challenging segmentation tasks.
206
+
207
+ Transparent object detection. Two datasets, GDD [6] and GSD [29], are used for evaluation. As shown in Table 2, our method surpasses all baseline methods and existing methods for this task as well. This shows strong robustness and generalizability of our proposed method.
208
+
209
+ # 4.3 Ablation Study
210
+
211
+ Our method includes two main components, the SAM-based weakly-supervised mask generation framework, WS-SAM, and the multi-scale feature grouping (MFG) module. We conduct ablation studies about these two components on $COD10K$ of the COD task with scribble supervision.
212
+
213
+ Ablation study for WS-SAM. We establish a baseline by using SAM to generate only one segmentation mask from one training image without augmentations for model training. On top of this baseline, we add the multi-augmentation fusion (MAF), pixel-level weighting (PLW), and image-level selection (ILS) techniques. Table 3 showing adding these components helps improve the performance, thus demonstrating their effectiveness.
214
+
215
+ Ablation study for MFG. We examine the effect of MFG by completely removing the MFG module, substituting the proposed feature grouping (FG) with slot attention (SA) [33], removing the multiscale strategy, and substituting the weighted gate mechanism (WGM) with fixed coefficient (FC). Table 4 shows that our designs reach better performance than the alternative ones.
216
+
217
+ # 4.4 Further Analysis
218
+
219
+ MFG for the fully-supervised setting. The proposed MFG module is designed to evacuate discriminative cues from concealed images. We have demonstrated its effectiveness with sparse annotations for the weakly-supervised setting. However, it is expected to also work in the fully-supervised setting. To verify this, we conduct experiments for the standard fully-supervised COD task. Table 5 shows the results on the $COD10K$ dataset. We can see that MFG indeed helps improve the performance of the baseline model, to the level comparative with state-of-the-art methods.
220
+
221
+ Performance on multi-object images. The proposed MFG module evacuates discriminative cues by performing feature grouping at different granularities, which facilitates discovering multiple objects
222
+
223
+ Table 7: Parameter analysis on $K, {\tau }_{a},{\tau }_{r},T$ ,and $\left( {{N}_{1},{N}_{2}}\right)$ .
224
+
225
+ <table><tr><td rowspan="2">Metrics</td><td colspan="4">K</td><td colspan="4">τa</td><td colspan="4">τr</td><td colspan="4">T</td><td colspan="4">(N1,N2)</td></tr><tr><td>1</td><td>6</td><td>12</td><td>18</td><td>0.05</td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.3</td><td>0.5</td><td>0.7</td><td>0.9</td><td>1</td><td>2</td><td>3</td><td>4</td><td>(2,4)</td><td>(2,8)</td><td>(4,8)</td><td>(2,4,8)</td></tr><tr><td>M↓</td><td>0.052</td><td>0.042</td><td>0.038</td><td>0.039</td><td>0.037</td><td>0.038</td><td>0.038</td><td>0.040</td><td>0.038</td><td>0.038</td><td>0.039</td><td>0.040</td><td>0.039</td><td>0.039</td><td>0.038</td><td>0.038</td><td>0.038</td><td>0.038</td><td>0.039</td><td>0.038</td></tr><tr><td>Fβ↑</td><td>0.674</td><td>0.697</td><td>0.719</td><td>0.718</td><td>0.706</td><td>0.719</td><td>0.716</td><td>0.704</td><td>0.723</td><td>0.719</td><td>0.715</td><td>0.700</td><td>0.706</td><td>0.715</td><td>0.719</td><td>0.720</td><td>0.719</td><td>0.714</td><td>0.711</td><td>0.721</td></tr><tr><td>Eφ↑</td><td>0.838</td><td>0.857</td><td>0.878</td><td>0.878</td><td>0.868</td><td>0.878</td><td>0.876</td><td>0.865</td><td>0.866</td><td>0.878</td><td>0.874</td><td>0.851</td><td>0.862</td><td>0.872</td><td>0.878</td><td>0.876</td><td>0.878</td><td>0.875</td><td>0.873</td><td>0.878</td></tr><tr><td>Sα↑</td><td>0.737</td><td>0.776</td><td>0.803</td><td>0.800</td><td>0.795</td><td>0.803</td><td>0.805</td><td>0.793</td><td>0.792</td><td>0.803</td><td>0.789</td><td>0.781</td><td>0.794</td><td>0.800</td><td>0.803</td><td>0.805</td><td>0.803</td><td>0.802</td><td>0.799</td><td>0.802</td></tr></table>
226
+
227
+ ![](images/39bec782ddf0a5997850b530f97bf5037d929c1c957d924be84dafc758053802.jpg)
228
+
229
+ ![](images/81baf42fa0ece477bb91f170f3610583a3650f81588991ddc6619c97a2333ffd.jpg)
230
+ (b) GT
231
+
232
+ ![](images/b395582c9840edf59e25b3c0761c4cc99489bcae828d50b4625c36a221a9e535.jpg)
233
+ (c) Ours
234
+ Figure 4: Visualized results for COD tasks.
235
+
236
+ ![](images/92765bb20e5025b16b3810d35b2f1fa1019e24ca1d6c62fffb9855eaf12eeda0.jpg)
237
+ (d) SCOD
238
+
239
+ ![](images/c010c3b656f9833c404a6dbc5b798d5a7de6f7541c82b6f3f5ee34c480e64204.jpg)
240
+ (e) TEL
241
+
242
+ ![](images/7090c9accabd201105b4a4ba0b93d6fb068e49147de36ad65e3bb7e647c314d4.jpg)
243
+ (f) SCWS
244
+
245
+ ![](images/3606e478e368b8a0898eebcaed1ba3e2670b1f3a5f376da8b2244d137c4c0dba.jpg)
246
+ (g) WSSA
247
+
248
+ ![](images/747c026e522593814cee60be06361c4054505b7628b73ed35916d3045e5a7d71.jpg)
249
+ (h) SAM-P
250
+
251
+ in images. To verify this, we evaluate the performance on the 186 images with more than one object from $COD10K$ . Table 6 shows that MFG achieves the best performance, surpassing the second-best method (SCOD) by $10.7\%$ . This gap is large than that with all test images, where the gap is $5.8\%$ .
252
+
253
+ Randomness of point supervision. We follow the existing point-supervision segmentation methods and randomly select points from ground truth masks as the point annotation.
254
+
255
+ To study the variance of the random selection, we repeat the random selection 5 times and calculate the mean and standard deviation of the results. Fig. 5 shows that our method reaches the best results while having the smallest deviation.
256
+
257
+ Number of augmented views $K$ . Table 7 shows that more augmented views help improve performance in the beginning, but the effect turns weaker when further increasing it.
258
+
259
+ Hyperparameters in image-level selection. Table 7 shows that it is best to set the absolute uncertainty threshold $\tau_{a} = 0.1$ and the relative uncertainty threshold $\tau_{r} = 0.5$ , and our method is not sensi
260
+
261
+ ![](images/f52de29f1d353a52feef9987cd67dbf3d7fde92877108e62677641309166dcc7.jpg)
262
+ Figure 5: Five runs results with varied point annotations.
263
+
264
+ Hyperparameters in MFG. Table 7 shows that MFG achieves the best results when the iteration number $T$ is set as 3, and the groups and scales setting is set as $(N_{1}, N_{2}) = (2, 4)$ . Notice that when adopting $(2, 4, 8)$ , RK2 is replaced with the third-order RK structure, resulting in extra computational burden with limited benefits. Hence, we select the RK2 structure with $(N_{1}, N_{2}) = (2, 4)$ .
265
+
266
+ Result visualization. Fig. 4 shows the prediction maps with point supervision. We can see that our method produces more complete results than existing methods and localizes multiple objects more comprehensively. More visualization results can be found in the supplementary materials.
267
+
268
+ # 5 Conclusions
269
+
270
+ This paper proposes a new WSCOS method that includes two key components. The first one is the WS-SAM framework that generates segmentation masks with the recently proposed vision foundation model, SAM, and proposes multi-augmentation result fusion, pixel-level uncertainty weighting, and image-level uncertainty filtration to get reliable pseudo labels to train a segmentation model. The second is the MFG module that leverages the extracted clues for additional nuanced discrimination information. MFG improves feature coherence from a grouping aspect, allowing for alleviating incomplete segmentation and better multiple-object segmentation. Experiments on multiple WSCOS tasks confirm the superiority of our method over the baseline and existing methods.
271
+
272
+ Acknowledgements: This research is partly supported by the National Key R&D Program of China (Grants No. 2020AAA0108302 & 2020AAA0108303), and Shenzhen Science and Technology Project (Grant No. JCYJ20200109143041798) & Shenzhen Stable Supporting Program (WDZC20200820200655001) & Shenzhen Key Laboratory of next generation interactive media innovative technology (Grant No. ZDSYS20210623092001004). The authors express their appreciation to Dr. Fengyang Xiao for her insightful comments, improving the quality of this paper.
273
+
274
+ # References
275
+
276
+ [1] Deng-Ping Fan, Ge-Peng Ji, Ming-Ming Cheng, and Ling Shao. Concealed object detection. IEEE Trans. Pattern Anal. Mach. Intell., 2021. 1, 7
277
+ [2] Deng-Ping Fan, Ge-Peng Ji, Guolei Sun, Ming-Ming Cheng, Jianbing Shen, and Ling Shao. Camouflaged object detection. In CVPR, pages 2777-2787, 2020. 1, 3, 7
278
+ [3] Chunming He, Kai Li, Yachao Zhang, Yulun Zhang, Zhenhua Guo, Xiu Li, Martin Danelljan, and Fisher Yu. Strategic preys make acute predators: Enhancing camouflaged object detectors by generating camouflaged objects. arXiv preprint arXiv:2308.03166, 2023. 1
279
+ [4] Deng-Ping Fan, Ge-Peng Ji, Tao Zhou, Geng Chen, Huazhu Fu, Jianbing Shen, and Ling Shao. Pranet: Parallel reverse attention network for polyp segmentation. In MICCAI, pages 263-273. Springer, 2020. 1, 3
280
+ [5] Ruifei Zhang, Guanbin Li, Zhen Li, Shuguang Cui, Dahong Qian, and Yizhou Yu. Adaptive context selection for polyp segmentation. In MICCAI, pages 253-262. Springer, 2020. 1
281
+ [6] Haiyang Mei, Xin Yang, Yang Wang, Yuanyuan Liu, Shengfeng He, Qiang Zhang, Xiaopeng Wei, and Rynson WH Lau. Don't hit me! glass detection in real-world scenes. In CVPR, pages 3687-3696, 2020. 1, 8
282
+ [7] Enze Xie, Wenjia Wang, Wenhai Wang, Mingyu Ding, Chunhua Shen, and Ping Luo. Segmenting transparent objects in the wild. In ECCV. Springer, 2020. 1
283
+ [8] Miao Zhang, Shuang Xu, Yongri Piao, Dongxiang Shi, Shusen Lin, and Huchuan Lu. Preynet: Preying on camouflaged objects. In ACM MM, pages 5323-5332, 2022. 1, 8
284
+ [9] Haiyang Mei, Ge-Peng Ji, Ziqi Wei, Xin Yang, Xiaopeng Wei, and Deng-Ping Fan. Camouflaged object segmentation with distraction mining. In CVPR, pages 8772-8781, 2021.
285
+ [10] Youwei Pang, Xiaqi Zhao, Tian-Zhu Xiang, Lihe Zhang, and Huchuan Lu. Zoom in and out: A mixed-scale triplet network for camouflaged object detection. In CVPR, pages 2160-2170, 2022. 1
286
+ [11] Yijie Zhong, Bo Li, Lv Tang, Senyun Kuang, Shuang Wu, and Shouhong Ding. Detecting camouflaged object in frequency domain. In CVPR, pages 4504-4513, 2022. 1
287
+ [12] Chunming He, Kai Li, Yachao Zhang, Longxiang Tang, Yulun Zhang, Zhenhua Guo, and Xiu Li. Camouflaged object detection with feature decomposition and edge reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22046-22055, 2023. 1, 6
288
+ [13] Qiang Zhai, Xin Li, Fan Yang, Chenglizhao Chen, Hong Cheng, and Deng-Ping Fan. Mutual graph learning for camouflaged object detection. In CVPR, pages 12997-13007, 2021. 1
289
+ [14] Yujia Sun, Shuo Wang, Chenglizhao Chen, and Tian-Zhu Xiang. Boundary-guided camouflaged object detection. arXiv preprint arXiv:2207.00794, 2022.
290
+ [15] Wei Zhai, Yang Cao, Jing Zhang, and Zheng-Jun Zha. Exploring figure-ground assignment mechanism in perceptual organization. In NIPS, volume 35, 2022. 8
291
+ [16] Yunqiu Lv, Jing Zhang, Yuchao Dai, Aixuan Li, Bowen Liu, Nick Barnes, and Deng-Ping Fan. Simultaneously localize, segment and rank the camouflaged objects. In CVPR, pages 11591-11601, 2021.
292
+ [17] Qi Jia, Shuilian Yao, Yu Liu, Xin Fan, Risheng Liu, and Zhongxuan Luo. Segment, magnify and reiterate: Detecting camouflaged objects the hard way. In CVPR, pages 4713-4722, 2022. 8
293
+ [18] Hongwei Zhu, Peng Li, Haoran Xie, Xuefeng Yan, Dong Liang, Dapeng Chen, Mingqiang Wei, and Jing Qin. I can find you! boundary-guided separated attention network for camouflaged object detection. In AAAI, volume 36, pages 3608-3616, 2022. 1
294
+ [19] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 2, 3, 7
295
+ [20] Ge-Peng Ji, Deng-Ping Fan, Peng Xu, Ming-Ming Cheng, Bowen Zhou, and Luc Van Gool. Sam struggles in concealed scenes—empirical study on "segment anything". arXiv preprint arXiv:2304.06022, 2023. 2
296
+
297
+ [21] Lv Tang, Haoke Xiao, and Bo Li. Can sam segment anything? when sam meets camouflaged object detection. arXiv preprint arXiv:2304.04709, 2023.
298
+ [22] Wei Ji, Jingjing Li, Qi Bi, Wenbo Li, and Li Cheng. Segment anything is not always perfect: An investigation of sam on different real-world applications. arXiv preprint arXiv:2304.05750, 2023. 2
299
+ [23] Maciej A Mazurowski, Haoyu Dong, Hanxue Gu, Jichen Yang, Nicholas Konz, and Yixin Zhang. Segment anything model for medical image analysis: an experimental study. arXiv preprint arXiv:2304.10517, 2023. 2
300
+ [24] Dongjie Cheng, Ziyuan Qin, Zekun Jiang, Shaoting Zhang, Qicheng Lao, and Kang Li. Sam on medical images: A comprehensive study on three prompt modes. arXiv preprint arXiv:2305.00035, 2023.
301
+ [25] Christian Mattjie, Luis Vinicius de Moura, Rafaela Cappelari Ravazio, Lucas Silveira Kupssinski, Otávio Parraga, Marcelo Mussi Delucis, and Rodrigo Coelho Barros. Exploring the zero-shot capabilities of the segment anything model (sam) in 2d medical imaging: A comprehensive evaluation and practical guideline. arXiv preprint arXiv:2305.00109, 2023.
302
+ [26] Tao Zhou, Yizhe Zhang, Yi Zhou, Ye Wu, and Chen Gong. Can sam segment polyps? arXiv preprint arXiv:2304.07583, 2023.
303
+ [27] Chunming He, Kai Li, Guoxia Xu, Jiangpeng Yan, Longxiang Tang, Yulun Zhang, Xiu Li, and Yaowei Wang. Hqg-net: Unpaired medical image enhancement with high-quality guidance. arXiv preprint arXiv:2307.07829, 2023. 2
304
+ [28] Dongsheng Han, Chaoning Zhang, Yu Qiao, Maryam Qamar, Yuna Jung, SeungKyu Lee, Sung-Ho Bae, and Choong Seon Hong. Segment anything model (sam) meets glass: Mirror and transparent objects cannot be easily detected. arXiv preprint arXiv:2305.00278, 2023. 2
305
+ [29] Jiaying Lin, Zebang He, and Rynson WH Lau. Rich context aggregation with reflection prior for glass surface detection. In CVPR, pages 13415-13424, 2021. 3, 8
306
+ [30] Ruozhen He, Qihua Dong, Jiaying Lin, and Rynson WH Lau. Weakly-supervised camouflaged object detection with scribble annotations. AAAI, 2023. 3, 6, 7, 8
307
+ [31] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, pages 770-778, 2016. 6
308
+ [32] Chunming He, Kai Li, Guoxia Xu, Yulun Zhang, Runze Hu, Zhenhua Guo, and Xiu Li. Degradation-resistant unfolding network for heterogeneous image fusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12611-12621, 2023. 6
309
+ [33] Francesco Locatello, Dirk Weissenborn, Thomas Unterthiner, Aravindh Mahendran, Georg Heigold, Jakob Uszkoreit, Alexey Dosovitskiy, and Thomas Kipf. Object-centric learning with slot attention. NIPS, 33: 11525-11538, 2020. 6, 8
310
+ [34] Longxiang Tang, Kai Li, Chunming He, Yulun Zhang, and Xiu Li. Source-free domain adaptive fundus image segmentation with class-balanced mean teacher. In International Conference on Medical Image Computing and Computer-Assisted Intervention, pages 684-694. Springer, 2023. 6
311
+ [35] Longxiang Tang, Kai Li, Chunming He, Yulun Zhang, and Xiu Li. Consistency regularization for generalizable source-free domain adaptation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4323-4333, 2023. 6
312
+ [36] Jing Zhang, Xin Yu, Aixuan Li, Peipei Song, Bowen Liu, and Yuchao Dai. Weakly-supervised salient object detection via scribble annotations. In CVPR, pages 12546-12555, 2020. 7
313
+ [37] Siyue Yu, Bingfeng Zhang, Jimin Xiao, and Eng Gee Lim. Structure-consistent weakly supervised salient object detection with local saliency coherence. In AAAI, volume 35, pages 3234-3242, 2021. 7, 8
314
+ [38] Zhiyuan Liang, Tiancai Wang, Xiangyu Zhang, Jian Sun, and Jianbing Shen. Tree energy loss: Towards sparsely annotated semantic segmentation. In CVPR, pages 16907-16916, 2022. 7, 8
315
+ [39] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, pages 248–255. IEEE, 2009. 6
316
+ [40] Ran Margolin, Lihi Zelnik-Manor, and Ayellet Tal. How to evaluate foreground maps? In CVPR, pages 248-255, 2014. 7
317
+
318
+ [41] Deng-Ping Fan, Ge-Peng Ji, Xuebin Qin, and Ming-Ming Cheng. Cognitive vision inspired object segmentation metric and loss function. Scientia Sinica Informationis, 6(6), 2021. 7
319
+ [42] Deng-Ping Fan, Ming-Ming Cheng, Yun Liu, Tao Li, and Ali Borji. Structure-measure: A new way to evaluate foreground maps. In ICCV, pages 4548-4557, 2017. 7
320
+ [43] Shuyong Gao, Wei Zhang, Yan Wang, Qianyu Guo, Chenglong Zhang, Yangji He, and Wenqiang Zhang. Weakly-supervised salient object detection using point supervision. In AAAI, volume 36, pages 670-678, 2022. 7
321
+ [44] Przemysław Skurowski, Hassan Abdulameer, J Błaszczyk, Tomasz Depta, Adam Kornacki, and P Koziel. Animal camouflage analysis: Chameleon database. Unpublished manuscript, 2(6):7, 2018. 7
322
+ [45] Trung-Nghia Le, Tam V Nguyen, Zhongliang Nie, Minh-Triet Tran, and Akihiro Sugimoto. Anabranch network for camouflaged object segmentation. Comput. Vis. Image Underst., 184:45-56, 2019. 7
323
+ [46] Nima Tajbakhsh, Suryakanth R Gurudu, and Jianming Liang. Automated polyp detection in colonoscopy videos using shape and context information. IEEE Trans. Med. Imaging, 35(2):630-644, 2015. 8
324
+ [47] Juan Silva, Aymeric Histace, Olivier Romain, Xavier Dray, and Bertrand Granado. Toward embedded detection of polyps in wce images for early diagnosis of colorectal cancer. Int. J. Comput. Assist. Radiol. Surg., 9:283-293, 2014. 8
325
+ [48] Debesh Jha, Pia H Smedsrud, Michael A Riegler, Pål Halvorsen, Thomas de Lange, Dag Johansen, and Håvard D Johansen. Kvasir-seg: A segmented polyp dataset. In MMM, pages 451-462. Springer, 2020. 8
weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc015b760c36929bdccf47d109379ddd7cae0de8a8372e086db18c6075bf2e0a
3
+ size 607210
weaklysupervisedconcealedobjectsegmentationwithsambasedpseudolabelingandmultiscalefeaturegrouping/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:842892d0170893a92e519311085f4653663283209229fefe174681761d4a434b
3
+ size 423271
weightedroccurveincostspaceextendingauctocostsensitivelearning/f607b8a6-a90d-4725-8e04-9ef5dbbf4297_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:858894b1829ca3f547f5721f083d9ad42d9f6a82fbbc0237fae012568762fb65
3
+ size 85131
weightedroccurveincostspaceextendingauctocostsensitivelearning/f607b8a6-a90d-4725-8e04-9ef5dbbf4297_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:497c13beac206923e9d3aecc3cdc166380e61f337b29c6d2bf6aacacd412b7bc
3
+ size 105152
weightedroccurveincostspaceextendingauctocostsensitivelearning/f607b8a6-a90d-4725-8e04-9ef5dbbf4297_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1fdcc05e1308c759620a127a913aa15366204dd3c3596dbb9825606d3ab3708
3
+ size 4011215
weightedroccurveincostspaceextendingauctocostsensitivelearning/full.md ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Weighted ROC Curve in Cost Space: Extending AUC to Cost-Sensitive Learning
2
+
3
+ Huiyang Shao $^{1,2}$ Qianqian Xu $^{1*}$ Zhiyong Yang $^{2}$ Peisong Wen $^{1,2}$ Peifeng Gao $^{2}$ Qingming Huang $^{2,1,3*}$ $^{1}$ Key Lab. of Intelligent Information Processing, Institute of Computing Tech., CAS
4
+ $^{2}$ School of Computer Science and Tech., University of Chinese Academy of Sciences
5
+ $^{3}$ BDKM, University of Chinese Academy of Sciences
6
+ shaohuiyang21@mails.ucas.ac.cn xuqianqian@ict.ac.cn
7
+ wenpeisong20z@ict.ac.cn gaopeifeng21@mails.ucas.ac.cn
8
+ yangzhiyong21@ucas.ac.cn qmhuang@ucas.ac.cn
9
+
10
+ # Abstract
11
+
12
+ In this paper, we aim to tackle flexible cost requirements for long-tail datasets, where we need to construct a (1) cost-sensitive and (2) class-distribution robust learning framework. The misclassification cost and the area under the ROC curve (AUC) are popular metrics for (1) and (2), respectively. However, limited by their formulations, models trained with AUC are not well-suited for cost-sensitive decision problems, and models trained with fixed costs are sensitive to the class distribution shift. To address this issue, we present a new setting where costs are treated like a dataset to deal with arbitrarily unknown cost distributions. Moreover, we propose a novel weighted version of AUC where the cost distribution can be integrated into its calculation through decision thresholds. To formulate this setting, we propose a novel bilevel paradigm to bridge weighted AUC (WAUC) and cost. The inner-level problem approximates the optimal threshold from sampling costs, and the outer-level problem minimizes the WAUC loss over the optimal threshold distribution. To optimize this bilevel paradigm, we employ a stochastic optimization algorithm (SACCL) which enjoys the same convergence rate $(O(\epsilon^{-4}))$ with the SGD. Finally, experiment results show that our algorithm performs better than existing cost-sensitive learning methods and two-stage AUC decisions approach.
13
+
14
+ # 1 Introduction
15
+
16
+ Receiver Operating Characteristics (ROC) is a popular tool to describe the trade-off between the True Positive Rate (TPR) and False Positive Rate (FPR) of a scoring function. AUC is defined by the area under the ROC curve [17, 18]. This metric naturally measures the average classification performance under different thresholds and is widely used (e.g., disease prediction [19], and anomaly detection [29]). Compared with accuracy, AUC is insensitive to the threshold and cost [7], making it be a popular metric for long-tail learning [32] and achieve remarkable success [24, 44, 26].
17
+
18
+ Similar to AUC optimization, cost-sensitive learning is a common data mining method [10, 2, 4]. The main goal is to incorporate the misclassification costs in the model, which is more compatible with realistic scenarios (e.g., the cost of misdiagnosing a disease as healthy is greater than the counterexample). Over the past two decades, researchers have pointed out that the ROC curve can be transferred to cost space by utilizing a threshold choice method, this is equivalent to computing the area under the convex hull of the ROC curve [21]. In this way, AUC can be seen as the performance of the model with a uniform cost distribution [16]. However, AUC considers all situations, which can not focus more on hard samples, Partial AUC (PAUC) is proposed as an extension of AUC with
19
+
20
+ Table 1: Comparison with existing classification settings. Cost distribution represents the cost condition of each setting.
21
+
22
+ <table><tr><td>Different setting</td><td>Formulation</td><td>Attr.1</td><td>Attr.2</td><td>Cost distribution</td></tr><tr><td>Cost learning</td><td>Ex[c·π·p(1|x) + (1-c)·(1-π)·p(0|x)]</td><td>×</td><td>×</td><td>f(c)c0δ(c=c0)c</td></tr><tr><td>AUC/PAUC</td><td>Eτ[TPR(τ)FPR&#x27;(τ)] τ~U(a,b)</td><td>√</td><td>×</td><td>f(c)U(a,b)c</td></tr><tr><td>WAUC</td><td>Eτ[TPR(τ)FPR&#x27;(τ)W(τ)] W(τ) ~ beta(a,b)</td><td>√</td><td>×</td><td>f(c)beta(a,b)c</td></tr><tr><td>Our method</td><td>Eτ~τ* [TPR(τ)FPR&#x27;(τ)] τ* ∈ arg minτLCost</td><td>√</td><td>√</td><td>f(c)Sampled cost distributionc</td></tr></table>
23
+
24
+ truncated uniform cost distribution [31]. Recently, some studies extend PAUC using parameterized cost distributions and propose WAUC to fit real-world applications [16, 30].
25
+
26
+ Whether we use AUC or cost learning, our main purpose is to train models with these attributes: Attr.1: The trained model can be robust to class distribution shift in the test without class prior.
27
+
28
+ Attr.2: The trained model can be robust to cost distribution in the test without cost prior.
29
+
30
+ However, to the best of our knowledge, there are few methods can train a model to have both of these attributes. According to Tab. 1, both AUC-related methods and cost-sensitive learning require a strong prior knowledge of the cost distribution; (1) The cost learning mainly considers a specified cost $c$ and class imbalanced radio $\pi$ . Models trained under this method are sensitive to class distribution, which does not apply to the scenario where test data distribution with offset. (2) AUC (PAUC) assumes that the cost distribution belongs to (truncated) uniform cost distribution $U(a,b)$ . Models trained with them will have poor performance when the true cost distribution is not uniform [16]. (3) WAUC considers optimizing models based on more complex forms of cost distribution, such as $beta(a,b)$ . However, we can not obtain the cost prior in real problem scenarios, e.g., financial market prediction [14]. Considering the weakness comes from the existing settings, we will explore the following question in this paper:
31
+
32
+ Can we bridge AUC and complicated cost distribution to training robust model on desired cost-sensitive and arbitrary class imbalanced decision scenarios?
33
+
34
+ To answer this question, we propose a view that, in some real applications [14], the cost, like the instance data, is not available prior but can be obtained by sampling. Therefore, we choose to sample desired cost to approximate the true cost distribution. Different from previous settings, ours is closer to real world, the main process can be divided into three parts:
35
+
36
+ Step.1 Cost Sampling: Firstly, we sample some desired costs to construct the empirical cost set.
37
+
38
+ Step.2 Data Sampling: Next, we sample some instance data to construct the empirical dataset.
39
+
40
+ Step.3 Build Formulation: Finally, we construct the appropriate formulation to maximize the performance in different desired costs and ensure model is robust to distribution shift.
41
+
42
+ It is natural for us to ask the question: Can we use the existing methods to realize this process? It's clear the answer is no. For AUC-related methods, they can not perform Step.1, and for cost-sensitive learning, they fail to achieve robust distribution shift and multiple costs in Step.3 (as shown in Fig. 1 (orange line). Hence, we propose a novel bilevel formulation combining the advantages of WAUC and cost learning. The inner-level process calculates the optimal threshold from sampling costs, and the outer-level process minimizes the WAUC loss over the optimal threshold distribution. The method can help the model improve robustness to class distribution in cost-sensitive decision problems. The main process is shown in Fig. 1 (green line). We summarize our contributions below:
43
+
44
+ - We propose a setting that focuses on the robustness of the model to the class distribution and cost distribution simultaneously. This setting treats cost as data that can be sampled, not as prior information, which is closer to the real-world cost-sensitive scenario.
45
+
46
+ ![](images/8f0199eb65c13676ec791b41ed698e29e63bc09726107da406b6143e54461b63.jpg)
47
+ Figure 1: The comparison of our proposed setting with the previous setting. The orange line represents the previous cost-sensitive learning approach, and the green line represents our method.
48
+
49
+ - We present a bilevel paradigm where the inner cost function is an inner constraint of outer WAUC optimization. For sake of optimization, we reformulate this paradigm into a nonconvex-strongly convex bilevel form. Moreover, we employ a stochastic optimization algorithm for WAUC (SACCL), which can solve this problem efficiently.
50
+ - We conduct extensive experiments on multiple imbalanced cost-sensitive classification tasks. The experimental results speak to the effectiveness of our proposed methods.
51
+
52
+ # 2 Observation and Motivation
53
+
54
+ In Tab. 1, we compare the existing methods with ours from different views. However, the table comparison does not have a very visual presentation. In this section, we will analyze the disadvantages of existing settings and explain our motivation. We train the model with all methods on a Cifar-10-Long-Tail training set under the different imbalanced ratios and cost distribution. We visualize the feature representation (the last layer's output of the trained model) in test data by t-SNE. The blue point represents negative samples predicted by the optimal threshold, and the orange point represents positive samples. The smaller the overlap between them, the better the performance. From Fig. 2, we can make the following remarks: (1) According to Fig. 2 (a), AUC is robust to changes in the imbalance ratio but completely not applicable with the cost distribution. (2) According to Fig. 2 (b), cost learning can process different cost distributions, but is sensitive to imbalance ratios.
55
+
56
+ ![](images/d09279ce62015a91bbf07e2bb4f7d74460713d40fdaab201f9ccb4c78ed02ace.jpg)
57
+
58
+ ![](images/7ac8a6ba083d412cb5dc30eeccf4bf248e438eae33aac8a0adc8a906fe4cddca.jpg)
59
+
60
+ ![](images/557e227e7bf8808460f141862f220cb892024a73fc2c508e4b5cd8630c18f845.jpg)
61
+
62
+ ![](images/edb93a8c007cefe5b95084dbdb3434c4ca71c0bc7aa62da6f52ccfbb0831e6d1.jpg)
63
+
64
+ ![](images/1ba465f29cdc5bc23e189f516391377514e87400a47cdabc11ba8638243105e9.jpg)
65
+
66
+ ![](images/b383e1b30e5b82cf01b64ed777133cca2b84fd901aeae58e85fb5fd5526e9cfd.jpg)
67
+
68
+ ![](images/51c484ff938de2782b25832205f9f117332ba08d95b2e7aff93987ad8ab275e3.jpg)
69
+ (a) Tsne result of repensentation (AUC)
70
+
71
+ ![](images/ac4cbe2a684d1ac3eb6a9e04c67b839af4285d3f11ecfa4187ca997b0db08112.jpg)
72
+ Figure 2: The feature representations comparison among different methods. (a) AUC optimization. (b) Cost learning. (c) Ours result. We solve the optimal threshold with the $\hat{\mathcal{L}}_{COST}$ (defined in Sec. 3). $\pi$ denotes the probability of positive class, $c$ denotes the cost ratio of misclassification ( $U$ denotes Uniform, $N$ denotes Normal). For example, $\pi = 0.5$ , $c \sim N$ means model tested on dataset which has imbalanced ratio $\pi$ and cost set sampled from $N$ .
73
+
74
+ ![](images/c382f96fa88e86001fca3026bdc83c6ce1d2ac302cdf5b46f560dc6124a741a6.jpg)
75
+ (b) Tsne result of repensation (COST)
76
+
77
+ ![](images/1f30b7e979c59444945e474f8ae993416092308d19b7c0e3d40c8e283aa60da5.jpg)
78
+
79
+ ![](images/1327c555b7e922e2455307578f6141747983e7681f3d383c4e6573a2b52f7a8f.jpg)
80
+ (c) Tsne result of repensentation (Ours)
81
+
82
+ ![](images/93b3fda4c6666022811976a866da4ed4694d1619b56a66a0e65c3ad2400103bc.jpg)
83
+
84
+ Hence, our motivation is to propose a new approach to solve the problems in AUC and Cost-sensitive learning. As shown in Fig. 2 (c), our proposed method can have better learning results in various complex cost distributions and imbalance ratios. That means our method can overcome the shortcomings of traditional AUC and cost-learning, which perfectly fits the proposed setting.
85
+
86
+ # 3 Preliminaries
87
+
88
+ Notations. In this section, we give definitions and preliminaries about AUC. We denote $(\pmb{x},y)$ be an instance, where $\pmb{x}$ is drawn from feature space $\mathcal{X} \subseteq \mathbb{R}^d$ ( $d$ is feature number) and $y$ is drawn from
89
+
90
+ label space $\mathcal{V} = \{0,1\}$ . Let $\mathcal{D}_{\mathcal{P}}(\mathcal{D}_{\mathcal{N}}\text{resp.})$ be positive (negative resp.) instance distribution. Let $\pmb{x}^{+}\sim \mathcal{D}_{\mathcal{P}}(\pmb{x}^{-}\sim \mathcal{D}_{\mathcal{N}}\text{resp.})$ be positive (negative resp.) instance. We denote $S_{+} = \{(x_{i}^{+},y_{i})\}_{i = 1}^{n_{+}}$ ( $S_{-} = \{(x_{j}^{-},y_{j})\}_{j = 1}^{n_{-}}$ resp.) as a set of training data drawn from $\mathcal{D}_{\mathcal{P}}(\mathcal{D}_{\mathcal{N}}\text{resp.})$ , where $n_+(n_-)$ denotes the instance number of $S_{+}$ ( $S_{-}\text{resp.}$ ). Let $\mathbb{I}_{(\cdot)}$ be the indicator function, which returns 1 when the condition is true and 0 otherwise. In this paper, we focus on the deep neural network scoring function $s(\pmb {w},\pmb {x}):X\mapsto [0,1]$ , where parameterized by $\pmb{w}$ on an input $\pmb{x}$ .
91
+
92
+ AUC & WAUC. For specified threshold $\tau$ , the TPR of a classifier derived from $s(\boldsymbol{w}, \boldsymbol{x})$ measures the likelihood that it accurately predicts a positive instance when getting a random positive instance from $\mathcal{D}_{\mathcal{P}}$ . Formally, we have:
93
+
94
+ $$
95
+ \left(\operatorname {P o p .}\right) \operatorname {T P R} _ {s} (\tau) = \mathbb {P} _ {\boldsymbol {x} ^ {+} \sim \mathcal {D} _ {\mathcal {P}}} [ s (\boldsymbol {w}, \boldsymbol {x} ^ {+}) > \tau ] \quad \left(\operatorname {E m p .}\right) \widehat {\operatorname {T P R}} _ {s} (\tau) = \frac {1}{n _ {+}} \sum_ {i = 1} ^ {n _ {+}} \mathbb {I} _ {s (\boldsymbol {w}, \boldsymbol {x} _ {i} ^ {+}) > \tau}. \tag {1}
96
+ $$
97
+
98
+ In a similar spirit, the classifier's FPR on threshold $\tau$ refers to the probability that it predicts positive when it gets a negative instance from $\mathcal{D}_{\mathcal{N}}$ .
99
+
100
+ $$
101
+ \left(\operatorname {P o p .}\right) \operatorname {F P R} _ {s} (\tau) = \mathbb {P} _ {\boldsymbol {x} ^ {-} \sim \mathcal {D} _ {\mathcal {N}}} [ s (\boldsymbol {w}, \boldsymbol {x} ^ {-}) > \tau ] \quad \left(\operatorname {E m p .}\right) \widehat {\operatorname {F P R}} _ {s} (\tau) = \frac {1}{n _ {-}} \sum_ {j = 1} ^ {n _ {-}} \mathbb {I} _ {s (\boldsymbol {w}, \boldsymbol {x} _ {j} ^ {-}) > \tau}. \tag {2}
102
+ $$
103
+
104
+ AUC measures a scoring function's trade-off between TPR and FPR under uniform thresholds. Denote $\tau$ drawn from the distribution $\mathcal{D}_{\tau}$ , WAUC utilizes the threshold distribution explicitly based on AUC. $\mathrm{FPR}_s^\prime$ denotes the probability density function of $s(\boldsymbol {w},\boldsymbol{x}^{-})$
105
+
106
+ $$
107
+ \mathrm {A U C} = \int_ {\infty} ^ {- \infty} \mathrm {T P R} _ {s} (\tau) \mathrm {F P R} _ {s} ^ {\prime} (\tau) d \tau \tag {3a}
108
+ $$
109
+
110
+ $$
111
+ \mathrm {W A U C} = \int_ {\infty} ^ {- \infty} \mathrm {T P R} _ {s} (\tau) \mathrm {F P R} _ {s} ^ {\prime} (\tau) p (\tau) d \tau , \tag {3b}
112
+ $$
113
+
114
+ Cost function [2]. In some real application scenarios, we need to consider the misclassification cost. We denote $c_{(\cdot)}$ as misclassification cost for class $(\cdot)$ , cost $c$ drawn from $\mathcal{D}_c$ . Since we could not obtain the cost distribution $\mathcal{D}_c$ , we sample empirical set $S_c = \{c_l\}_{l=1}^{n_c}$ , $n_c$ denotes the sample number of cost $c$ . Given a scoring function $s$ and parameter $w$ , the cost function $\mathcal{L}_{COST}$ is (the empirical version of cost function, $\widehat{\mathcal{L}}_{COST}$ contains the empirical forms of TPR and FPR):
115
+
116
+ $$
117
+ \mathcal {L} _ {C O S T} (\boldsymbol {w}, c, \tau^ {*} (c)) = c \cdot \pi \cdot \left(1 - \operatorname {T P R} _ {s} \left(\tau^ {*} (c)\right)\right) + (1 - c) \cdot (1 - \pi) \cdot \operatorname {F P R} _ {s} \left(\tau^ {*} (c)\right), \tag {4}
118
+ $$
119
+
120
+ where $\pi = n_{+} / (n_{+} + n_{-})$ , $c = c_{+} / (c_{+} + c_{-})$ and $\tau^{*}(c)$ is optimal threshold for score function $s$ under specified $c$ [21], the sample number $n_{\tau} = n_{c}$ .
121
+
122
+ # 4 Problem Formulation
123
+
124
+ In this section, we introduce how to link the ROC curve to the cost space. First, we reformulate Eq.(3) into expectation:
125
+
126
+ $$
127
+ \mathrm {A U C} = \underset {\tau \sim U} {\mathbb {E}} \left[ \mathrm {T P R} _ {s} (\tau) \cdot \mathrm {F P R} _ {s} ^ {\prime} (\tau) \right] \tag {5a}
128
+ $$
129
+
130
+ $$
131
+ \operatorname {W A U C} = \underset {\tau \sim \mathcal {D} _ {\tau}} {\mathbb {E}} \left[ \mathrm {T P R} _ {s} (\tau) \cdot \mathrm {F P R} _ {s} ^ {\prime} (\tau) \right]. \tag {5b}
132
+ $$
133
+
134
+ If threshold $\tau$ is drawn from the uniform distribution $U$ , WAUC will degrade to the standard AUC formulation. However, AUC only describes the global mean performance under all possible costs. If we want to extend AUC to cost-sensitive problems, maybe lifting the restriction on the uniform distribution of $\tau$ is a good solution. Hence, we release the $\mathcal{D}_{\tau}$ 's restriction to make it belongs to complicated distribution (e.g., normal distribution, exponential distribution). Then we can extend AUC to WAUC. However, using WAUC raises another question: how do we get $\mathcal{D}_{\tau}$ ? We find that $\tau^{*}(c)$ is one of parameters of $\mathcal{L}_{COST}(\boldsymbol{w}, c, \tau^{*}(c))$ . A natural idea is to use $\mathcal{L}_{COST}$ to solve for the optimal $\tau^{*}(c)$ and to combine the $\tau^{*}(c)$ solved for at different $c$ to obtain $\mathcal{D}_{\tau}$ .
135
+
136
+ $$
137
+ \tau^ {*} (c) = \arg \min _ {\tau} \mathcal {L} _ {C O S T} (\boldsymbol {w}, \tau , c) = c \cdot \pi \cdot \left(1 - \operatorname {T P R} _ {s} (\tau)\right) + (1 - c) \cdot (1 - \pi) \cdot \operatorname {F P R} _ {s} (\tau), \tag {6}
138
+ $$
139
+
140
+ If we couple Eq.(5b) and Eq.(6) together so that WAUC can enjoy the optimal threshold distribution in $\mathcal{L}_{COST}$ , then we can break the barrier between the ROC curve and the cost space. With the help of the threshold as a bridge, we can extend the AUC metric to achieve the WAUC cost-sensitive learning. Then we give the problem formulation (intuitively, from the result of 2 (c), (OPO) satisfies both Attr.1 and Attr.2 simultaneously):
141
+
142
+ $$
143
+ \begin{array}{l} (O P 0) \quad (\text {o u t e r .}) \quad \mathrm {W A U C} = \underset {\tau \sim \tau^ {*}} {\mathbb {E}} \left[ \mathrm {T P R} _ {s} (\tau) \cdot \mathrm {F P R} _ {s} ^ {\prime} (\tau) \right] \\ (\text {i n n e r .}) \quad \tau^ {*} = \left\{\tau^ {*} (c) = \arg \min _ {\tau} \mathcal {L} _ {C O S T} (\boldsymbol {w}, \tau , c) | c \sim \mathcal {D} _ {c} \right\} \end{array} \tag {7}
144
+ $$
145
+
146
+ Nevertheless, there are still three main challenges in WAUC cost-sensitive learning:
147
+
148
+ (1) Given the scoring function $s$ and negative dataset $S_{-}$ , how to estimate $\mathrm{FPR}_s^\prime (\tau)$ in WAUC?
149
+ (2) The inner problem is nonconvex, which is hard to give a theoretical convergence guarantee.
150
+ (3) How to design a formulation that can bridge WAUC and $\mathcal{L}_{COST}$ so that WAUC can be optimized over the cost distribution of the desired problem scenario?
151
+
152
+ We will address the challenge (1) in Sec. 5.1, challenge (2) in Sec. 5.2 and challenge (3) in Sec. 5.3.
153
+
154
+ # 5 Methodology
155
+
156
+ # 5.1 The Estimation of False Positive Rate
157
+
158
+ For challenge (1), we choose the kernel density estimation (KDE) to estimate $\mathrm{FPR}_s^\prime (\tau)$ and denote it as $K(x)$ (please see definition in Sec. C.1). Then we can address the density estimation problem. However, Eq.(5b) still exists non-differentiable and non-smooth term $\mathbb{I}_{(\cdot)}$ , which is hard to optimize. Hence, we propose the following smooth and differentiable WAUC estimator to approximate Eq.(5b).
159
+
160
+ Definition 5.1. Denote $K(x)$ be statistics kernel with bandwidth $m$ and $S_{-}^{w} = \{s(\boldsymbol{w},\boldsymbol{x}_{j}^{-})\}_{j = 1}^{n - }$ . With Lemma 5.2, we have the approximate estimator and loss function for WAUC:
161
+
162
+ $$
163
+ \widehat {\mathrm {W A U C}} = \int_ {\infty} ^ {- \infty} \operatorname {T P R} _ {s} (\tau) \mathcal {K} \left(S _ {-} ^ {\boldsymbol {w}}, \tau\right) p (\tau) d \tau , \quad \widehat {\mathcal {L}} _ {\mathrm {W A U C}} (\boldsymbol {w}, \tau) = \frac {1}{n _ {\tau}} \sum_ {l = 1} ^ {n _ {\tau}} \hat {h} (\boldsymbol {w}, \tau_ {l}) \tag {8}
164
+ $$
165
+
166
+ where $\tau = \{\tau_{l}\}_{l = 1}^{n_{\tau}}$ and the point loss $\hat{h}$ is defined by
167
+
168
+ $$
169
+ \hat {h} (\boldsymbol {w}, \tau) = 1 - \frac {1}{n _ {+} n _ {-}} \sum_ {i = 1} ^ {n _ {+}} \sum_ {j = 1} ^ {n _ {-}} \sigma \left(s \left(\boldsymbol {w}, \boldsymbol {x} _ {i} ^ {+}\right) - \tau_ {l}\right) \cdot K \left(\left(s \left(\boldsymbol {w}, \boldsymbol {x} _ {j} ^ {-}\right) - \tau_ {l}\right) / m\right) / m. \tag {9}
170
+ $$
171
+
172
+ $\sigma(x) = 1 / (1 + \exp(-\beta x))$ , $\beta$ is smooth parameter and we have $\sigma(x) \stackrel{\beta \to \infty}{\longrightarrow} \mathbb{I}_x$ .
173
+
174
+ Lemma 5.2. Given a scoring function $s$ , if $\tau$ is known, when the number of instances is large enough, $\widehat{\mathrm{WAUC}}$ almost surely converges to WAUC.
175
+
176
+ $$
177
+ \lim _ {n _ {-} \rightarrow \infty} | \widehat {\mathrm {W A U C}} - \mathrm {W A U C} | \xrightarrow {a . s .} 0. \tag {10}
178
+ $$
179
+
180
+ With KDE consistency [41], when the negative sample size is large enough, Lemma 5.2 provides theoretical approximation guarantees for our proposed WAUC estimator in Prop. 5.1.
181
+
182
+ # 5.2 The Estimation of Threshold Weighting
183
+
184
+ For challenge (2), a natural idea is to use $\widehat{\mathcal{L}}_{COST}$ to solve for the optimal threshold set $\hat{\tau}^*$ when given the cost set $S_{c}$ and the scoring function $s$ . Then we can use the optimal threshold set $\hat{\tau}^*$ to calculate $\widehat{\mathrm{WAUC}}$ . Firstly, we define the solution for $\hat{\tau}^*$ be
185
+
186
+ $$
187
+ \hat {\boldsymbol {\tau}} ^ {*} = \left\{\hat {\tau} ^ {*} (c) | \hat {\tau} ^ {*} (c) \in \arg \min _ {\tau} \widehat {\mathcal {L}} _ {C O S T} (\boldsymbol {w}, c), c \in S _ {c} \right\}. \tag {11}
188
+ $$
189
+
190
+ However, it's noticed that the arg $\min_{\tau} \widehat{\mathcal{L}}_{COST}$ in Eq.(11) is non-convex. As we analyzed before, $\pmb{\tau}^{*}$ is the inner constraint of WAUC. To the best of our knowledge, there are few studies on optimizing two coupled non-convex problems simultaneously with theoretical convergence guarantees. Most studies on coupled optimization assume that the inner problems have good properties, such as strong convexity. Hence, we propose the approximated convex formulation of the inner problem for $\hat{\pmb{\tau}}^{*}$ .
191
+
192
+ Theorem 5.3. When we set $\kappa, M$ are large positive numbers and $M'^2 < M^2 \frac{6\kappa^2 e^{3\kappa}}{(e^\kappa + 1)^6}$ , then we have the approximated convex formulation for $\hat{\mathcal{L}}_{COST}$
193
+
194
+ $$
195
+ \begin{array}{l} \min _ {\tau , \boldsymbol {P} \in \mathbb {R} ^ {n _ {+}}, \boldsymbol {N} \in \mathbb {R} ^ {n _ {-}}} \widehat {\mathcal {L}} _ {e q} (\boldsymbol {w}, \tau , c) := c \cdot \pi \cdot \left(1 - \frac {1}{n _ {+}} \sum_ {i = 1} ^ {n _ {+}} P _ {i}\right) + (1 - c) \cdot (1 - \pi) \cdot \left(\frac {1}{n _ {-}} \sum_ {j = 1} ^ {n _ {-}} N _ {j}\right) \\ + \frac {1}{n _ {+}} \sum_ {i = 1} ^ {n _ {+}} M ^ {\prime} \psi \left(s \left(\boldsymbol {w}, \boldsymbol {x} _ {i} ^ {+}\right) - \tau\right) - P _ {i} \left(s \left(\boldsymbol {w}, \boldsymbol {x} _ {i} ^ {+}\right) - \tau\right)) + M \psi \left(P _ {i} - 1\right) + M \psi (\tau - 1) \tag {12} \\ + \frac {1}{n _ {-}} \sum_ {j = 1} ^ {n _ {-}} M ^ {\prime} \psi (s (\boldsymbol {w}, \boldsymbol {x} _ {j} ^ {-}) - \tau) - N _ {j} (s (\boldsymbol {w}, \boldsymbol {x} _ {j} ^ {-}) - \tau)) + M \psi (N _ {j} - 1) 0 \leq \tau , P _ {i}, N _ {j} \\ \end{array}
196
+ $$
197
+
198
+ where $\psi(x) = \log(1 + \exp(\kappa x)) / \kappa$ . $\widehat{\mathcal{L}}_{eq}$ in Eq.(12) is $\mu_g$ -strongly convex w.r.t. $\pmb{\tau}$ . Eq.(12) has same solution as $\min_{\tau} \widehat{\mathcal{L}}_{COST}$ when the parameters satisfy the conditions of the penalty.
199
+
200
+ Thm. 5.3 provides an optimization method with good properties. Eq. (12) adopts the penalty function to convert inequality constraints into a part of the objective function. When these inequality constraints are not satisfied, the objective function will increase to infinity. Otherwise, we will get $P_{i} = \mathbb{I}[s(\boldsymbol{w}, x_{i}^{+}) > \tau]$ and $N_{j} = \mathbb{I}[s(\boldsymbol{w}, x_{j}^{-}) > \tau]$ , then we will get the same formulation as $\widehat{\mathcal{L}}_{COST}$ . When the parameters meet the requirements, Eq.(12) has the same solution as $\widehat{\mathcal{L}}_{COST}$ . We give the proof of Thm. 5.3 and the definition $\mu$ in Sec. C.4. Moreover, we give the analysis of approximation error between $\widehat{\mathcal{L}}_{COST}$ and Thm. 5.3 in Sec. B.7.
201
+
202
+ # 5.3 Bilevel Optimization for WAUC learning
203
+
204
+ After answering questions in challenge (1) and (2), we have solved most of the problems in WAUC cost-sensitive learning. However, there remains a challenge (3) in optimization: How do we design learning paradigms to solve the coupled optimization problem of WAUC and $\mathcal{L}_{COST}$ ? In recent years, bilevel optimization has achieved remarkable success. This approach can combine two related optimization problems to form a coupled optimization formulation. Hence, with Prop. 5.1 and Thm.5.3, we propose a bilevel paradigm to formulate this coupled optimization problem.
205
+
206
+ $$
207
+ \begin{array}{l} \left(O P 1\right) \quad \text {(o u t e r .)} \quad \min _ {\boldsymbol {w}} \hat {F} (\boldsymbol {w}) := \hat {f} (\boldsymbol {w}, \boldsymbol {\tau} ^ {*}) := \widehat {\mathcal {L}} _ {\mathrm {W A U C}} (\boldsymbol {w}, \hat {\boldsymbol {\tau}} ^ {*}) \\ \text {(i n n e r .)} \quad \hat {\boldsymbol {\tau}} ^ {*} = \underset {\boldsymbol {\tau}, \boldsymbol {P} _ {a}, \boldsymbol {N} _ {a}} {\arg \min } \hat {g} (\boldsymbol {w}, \boldsymbol {\tau}) := \frac {1}{n _ {\tau}} \sum_ {l = 1} ^ {n _ {\tau}} \widehat {\mathcal {L}} _ {e q} (\boldsymbol {w}, \tau_ {l}, c _ {l}), \tag {13} \\ \end{array}
208
+ $$
209
+
210
+ where $P_{a} \in \mathbb{R}^{n_{\tau} \times n_{+}}$ and $N_{a} \in \mathbb{R}^{n_{\tau} \times n_{-}}$ . (OP1) describes a bilevel optimization formulation for WAUC cost-sensitive learning, where the inner-level provides a threshold optimization process, and the outer-level minimizes the WAUC loss over the optimal threshold distribution. Moreover, this formulation is consistent with the mainstream bilevel optimization problem (outer-level is smooth and non-convex, inner-level is convex and smooth), which enjoys a faster convergence rate.
211
+
212
+ # 6 Optimization Algorithm
213
+
214
+ In this section, we focus on optimizing (OP1) in an end-to-end manner. Hence, we propose a stochastic algorithm for WAUC cost-sensitive learning shown in Alg. 1, which is referred to SAACL.
215
+
216
+ # 6.1 Main Idea of SAACL
217
+
218
+ We provide some intuitive explanations of our algorithm. At each iteration $k$ , SACCL alternates between the inner-level gradient update on $\tau$ and the outer-level gradient update on $w$ . During
219
+
220
+ Algorithm 1 Stochastic Algorithm for WAUC Cost-sensitive Learning
221
+
222
+ Input: training data $S_{+}$ and $S_{-}$ , iteration numbers $K$ and $T$ , batch size $B$ .
223
+
224
+ Initialize: parameters $\pmb{w}_0\in \mathbb{R}^n$ $\pmb {\tau}_0\in \mathbb{R}^{n_\tau}$ , stepsizes $\alpha_{k},\beta_{k}$
225
+
226
+ for $k = 0$ to $K$ do
227
+
228
+ set $\pmb{\tau}_{k,0} = \pmb{\tau}_k$
229
+
230
+ for $t = 0$ to $T$ do
231
+
232
+ drawn $\mathcal{B}_t = \{(x_b,y_b)\}_{b = 1}^B$ from $S_{+}$ and $S_{-}$ uniformly.
233
+
234
+ $\forall c_{l}\in S_{c},\tau_{k,t + 1}^{l} = \tau_{k,t}^{l} - \beta_{k}\nabla_{\pmb{\tau}}\hat{g} (\pmb {w}_{k},c_{l};\mathcal{B}_{t}).$
235
+
236
+ end for
237
+
238
+ set $\pmb{\tau}_{k + 1} = \pmb{\tau}_{k,T}$
239
+
240
+ drawn $\mathcal{B}_k = \{(x_b,y_b)\}_{b = 1}^B$ from $S_{+}$ and $S_{-}$ uniformly.
241
+
242
+ $\pmb{w}_{k + 1} = \pmb{w}_k - \alpha_k[\nabla_{\pmb{w}}\hat{f} (\pmb {w}_k,\pmb{\tau}_{k + 1};\mathcal{B}_k) - \nabla_{\pmb{w}\tau}^2\hat{g} (\pmb {w}_k,\pmb{\tau}_{k + 1};\mathcal{B}_k)\cdot$
243
+
244
+ $\left[\frac{N}{L_{g,1}}\prod_{n = 1}^{N^{\prime}}\left(I - \frac{1}{L_{g,1}}\nabla_{\pmb{\tau}\pmb{\tau}}^{2}\hat{g} (\pmb {w},\pmb{\tau}_{k + 1};\mathcal{B}_{k})^{-1}\right)\right]\nabla_{\pmb{\tau}}\hat{f} (\pmb {w}_k,\pmb {\tau}_{k + 1};\mathcal{B}_k)$
245
+
246
+ end for
247
+
248
+ iteration $k$ , we update $\pmb{\tau}_{k,t}$ with standard SGD $T$ steps to ensure that $\pmb{\tau}_{k+1}$ is as optimal as possible. After updating inner-level variables, we perform outer-level optimization with $\pmb{\tau}_{k+1}$ as the parameter to update $\pmb{w}_k$ . Notice that $T$ will not take a large value to ensure the validity of the coupling update of $\pmb{\tau}$ and $\pmb{w}$ . Let $\alpha_k$ and $\beta_k$ be stepsizes of $\pmb{w}$ and $\pmb{\tau}$ that have the same decrease rate as SGD. We denote $n$ be the number of elements in deep neural network parameters $\pmb{w}$ .
249
+
250
+ # 6.2 Convergence Analysis of SAACL
251
+
252
+ In this subsection, we present the convergence analysis for SAACL. We give some Lipschitz continuity assumptions that are common in bilevel optimization problems [11, 28].
253
+
254
+ Assumption 6.1. (Lipschitz continuity) Assume that $f$ , $\nabla f$ , and $\nabla g$ are respectively $L_{f,0}, L_{f,1}$ , $L_{g,1}$ -Lipschitz continuous.
255
+
256
+ Assumption 6.2. (Bounded stochastic derivatives) The variance of stochastic derivatives $\nabla f(\boldsymbol{w},\tau;\mathcal{B})$ and $\nabla g(\boldsymbol{w},\tau;\mathcal{B})$ are bounded by $\sigma_{f,1}^2$ , $\sigma_{g,1}^2$ , respectively.
257
+
258
+ Based on Assumption 6.1 and Assumption 6.2, following [5], Thm. 6.3 indicates that we can optimize (OP1) with the same convergence rate as the traditional SGD algorithm.
259
+
260
+ Theorem 6.3. Suppose Assumption 6.1 and 6.2 hold. We define
261
+
262
+ $$
263
+ \bar {\alpha} _ {1} = \frac {1}{2 L _ {F} + 4 L _ {f} L _ {y} + 2 L _ {f} L _ {y x} / \left(L _ {y} \eta\right)}, \quad \bar {\alpha} _ {2} = \frac {1 6 T \mu L _ {g , 1}}{\left(\mu + L _ {g , 1}\right) ^ {2} \left(8 L _ {f} L _ {y} + 2 \eta L _ {y x} \tilde {C} _ {f} ^ {2} \bar {\alpha} _ {1}\right)}, \tag {14}
264
+ $$
265
+
266
+ where $\eta = L_F / L_y$ , $L_F$ , $L_f$ , $L_y$ and $\tilde{C}_f^2$ come from Lem. 2 and Lem. 4 in [5]. We select the following stepsize as
267
+
268
+ $$
269
+ \alpha_ {k} = \min \left\{\bar {\alpha} _ {1}, \bar {\alpha} _ {2}, \frac {1}{\sqrt {K}} \right\} \quad \beta_ {k} = \frac {8 L _ {f} L _ {y} + 2 \eta L _ {y x} \tilde {C} _ {f} ^ {2} \bar {\alpha} _ {1}}{4 T \mu} \alpha_ {k} \tag {15}
270
+ $$
271
+
272
+ For any $T\geq 1$ , the iteration sequence $\{\pmb {w}_k\}_{k = 1}^K$ and $\{\pmb {\tau}_k\}_{k = 1}^K$ generated by Algorithm 1 satisfy
273
+
274
+ $$
275
+ \frac {1}{K} \sum_ {k = 0} ^ {K - 1} \mathbb {E} \left[ \| \nabla F (\boldsymbol {w} _ {k}) \| ^ {2} \right] \leq \gamma \left(\frac {3 M \kappa e ^ {\kappa} / (e ^ {\kappa} + 1) ^ {2} + L _ {g , 1}}{2 4 M \kappa e ^ {\kappa} / (e ^ {\kappa} + 1) ^ {2} L _ {g , 1}}\right) ^ {2} \frac {1}{T \sqrt {K}} + O \left(\frac {1}{\sqrt {K}}\right). \tag {16}
276
+ $$
277
+
278
+ where $\gamma = 2\alpha \sigma_{g,1}^{2}\frac{L_{f}}{L_{y}}\left(1 + 5L_{f}L_{y}\bar{\alpha}_{1} + \frac{\eta L_{yx}\tilde{C}_{f}^{2}}{4}\bar{\alpha}_{1}^{2}\right)$ $(8L_{f}L_{y} + 2\eta L_{yx}\tilde{C}_{f}^{2}\bar{\alpha}_{1})^{2}$
279
+
280
+ Remark 6.4. When $\kappa$ and $M$ are large enough positive integers, according to Eq. (16), Alg. 1 is still guaranteed to find a $\epsilon$ -stationary point within $O(\epsilon^{-4})$ iterations ( $\epsilon$ is error tolerance).
281
+
282
+ # 7 Experiments
283
+
284
+ In this section, we conduct a series of experiments for WAUC cost-sensitive learning on common long-tail benchmark datasets. Due to space limitations, please refer to Sec. B for the details of our experiments. The source code is available in supplemental materials.
285
+
286
+ # 7.1 Dataset Details
287
+
288
+ We use three datasets: Binary CIFAR-10-Long-Tail Dataset [23], Binary CIFAR-100-Long-Tail Dataset [23], and Jane Street Market Prediction [14]. Binary CIFAR-10-Long-Tail Dataset and Binary CIFAR-100-Long-Tail Dataset are common datasets in long-tail learning, and we construct their cost distributions. Jane Street Market Prediction is data from real cost-sensitive learning application scenarios. For all datasets, we divide them into the training set, validation set, and test set with a proportion 0.7:0.15:0.15. All image data is normalized to ensure a more stable training process.
289
+
290
+ Table 2: Performance comparisons on benchmark datasets with different metrics. The first and second best results are highlighted with bold text and underline, respectively.
291
+
292
+ <table><tr><td rowspan="2">dataset</td><td rowspan="2">type</td><td rowspan="2">methods</td><td colspan="2">Subset1</td><td colspan="2">Subset2</td><td colspan="2">Subset3</td><td colspan="3">AUC↑</td></tr><tr><td>WAUC↑</td><td>\( \widehat{\mathcal{L}}_{COST} \)↓</td><td>WAUC↑</td><td>\( \widehat{\mathcal{L}}_{COST} \)↓</td><td>WAUC↑</td><td>\( \widehat{\mathcal{L}}_{COST} \)↓</td><td>Subset1</td><td>Subset2</td><td>Subset3</td></tr><tr><td rowspan="12">CIFAR-10-LT</td><td rowspan="10">Competitors</td><td>BCE</td><td>0.525</td><td>0.027</td><td>0.533</td><td>0.015</td><td>0.318</td><td>0.029</td><td>0.822</td><td>0.960</td><td>0.870</td></tr><tr><td>ExAUC</td><td>0.516</td><td>0.029</td><td>0.518</td><td>0.013</td><td>0.366</td><td>0.028</td><td>0.845</td><td>0.963</td><td>0.858</td></tr><tr><td>SqAUC</td><td>0.407</td><td>0.028</td><td>0.548</td><td>0.012</td><td>0.327</td><td>0.031</td><td>0.811</td><td>0.933</td><td>0.867</td></tr><tr><td>NWAUC</td><td>0.565</td><td>0.030</td><td>0.574</td><td>0.017</td><td>0.396</td><td>0.027</td><td>0.786</td><td>0.885</td><td>0.827</td></tr><tr><td>PAUC-exp</td><td>0.549</td><td>0.029</td><td>0.508</td><td>0.015</td><td>0.354</td><td>0.028</td><td>0.650</td><td>0.801</td><td>0.736</td></tr><tr><td>PAUC-poly</td><td>0.526</td><td>0.028</td><td>0.470</td><td>0.015</td><td>0.354</td><td>0.029</td><td>0.661</td><td>0.812</td><td>0.742</td></tr><tr><td>PAUCI</td><td>0.516</td><td>0.027</td><td>0.520</td><td>0.015</td><td>0.382</td><td>0.028</td><td>0.704</td><td>0.847</td><td>0.734</td></tr><tr><td>CS-hinge</td><td>0.566</td><td>0.026</td><td>0.633</td><td>0.010</td><td>0.377</td><td>0.022</td><td>0.675</td><td>0.782</td><td>0.762</td></tr><tr><td>AdaCOS</td><td>0.576</td><td>0.025</td><td>0.559</td><td>0.014</td><td>0.391</td><td>0.023</td><td>0.758</td><td>0.873</td><td>0.742</td></tr><tr><td>ECL</td><td>0.589</td><td>0.026</td><td>0.561</td><td>0.014</td><td>0.388</td><td>0.020</td><td>0.694</td><td>0.918</td><td>0.762</td></tr><tr><td rowspan="2">Our method</td><td>WAUC-Gau</td><td>0.679</td><td>0.024</td><td>0.660</td><td>0.012</td><td>0.467</td><td>0.015</td><td>0.787</td><td>0.934</td><td>0.843</td></tr><tr><td>WAUC-Log</td><td>0.653</td><td>0.023</td><td>0.674</td><td>0.011</td><td>0.468</td><td>0.014</td><td>0.820</td><td>0.958</td><td>0.869</td></tr><tr><td rowspan="12">CIFAR-100-LT</td><td rowspan="10">Competitors</td><td>BCE</td><td>0.556</td><td>0.022</td><td>0.463</td><td>0.012</td><td>0.512</td><td>0.019</td><td>0.912</td><td>0.957</td><td>0.806</td></tr><tr><td>ExAUC</td><td>0.522</td><td>0.019</td><td>0.502</td><td>0.011</td><td>0.506</td><td>0.017</td><td>0.933</td><td>0.967</td><td>0.833</td></tr><tr><td>SqAUC</td><td>0.483</td><td>0.024</td><td>0.367</td><td>0.015</td><td>0.474</td><td>0.018</td><td>0.889</td><td>0.955</td><td>0.855</td></tr><tr><td>NWAUC</td><td>0.654</td><td>0.025</td><td>0.511</td><td>0.016</td><td>0.631</td><td>0.019</td><td>0.867</td><td>0.925</td><td>0.807</td></tr><tr><td>PAUC-exp</td><td>0.464</td><td>0.020</td><td>0.282</td><td>0.014</td><td>0.469</td><td>0.016</td><td>0.826</td><td>0.811</td><td>0.787</td></tr><tr><td>PAUC-poly</td><td>0.461</td><td>0.022</td><td>0.262</td><td>0.017</td><td>0.473</td><td>0.017</td><td>0.828</td><td>0.887</td><td>0.791</td></tr><tr><td>PAUCI</td><td>0.549</td><td>0.018</td><td>0.439</td><td>0.016</td><td>0.514</td><td>0.018</td><td>0.812</td><td>0.843</td><td>0.822</td></tr><tr><td>CS-hinge</td><td>0.523</td><td>0.017</td><td>0.457</td><td>0.010</td><td>0.515</td><td>0.014</td><td>0.734</td><td>0.910</td><td>0.716</td></tr><tr><td>AdaCOS</td><td>0.590</td><td>0.018</td><td>0.474</td><td>0.011</td><td>0.587</td><td>0.016</td><td>0.769</td><td>0.919</td><td>0.727</td></tr><tr><td>ECL</td><td>0.583</td><td>0.017</td><td>0.497</td><td>0.009</td><td>0.595</td><td>0.015</td><td>0.863</td><td>0.939</td><td>0.794</td></tr><tr><td rowspan="2">Our method</td><td>WAUC-Gau</td><td>0.745</td><td>0.015</td><td>0.589</td><td>0.005</td><td>0.728</td><td>0.013</td><td>0.842</td><td>0.928</td><td>0.745</td></tr><tr><td>WAUC-Log</td><td>0.719</td><td>0.012</td><td>0.560</td><td>0.003</td><td>0.745</td><td>0.010</td><td>0.906</td><td>0.960</td><td>0.875</td></tr></table>
293
+
294
+ Table 3: Performance comparisons on benchmark datasets in real world cost-sensitive problem. Profit means represents the money earned by the model over the entire trading period.
295
+
296
+ <table><tr><td>Methods</td><td>BCE</td><td>ExAUC</td><td>SqAUC</td><td>NWAUC</td><td>PAUC-exp</td><td>PAUC-poly</td><td>PAUCI</td><td>CS-hinge</td><td>AdaCOS</td><td>ECL</td><td>WAUC-Gau</td><td>WAUC-Log</td></tr><tr><td>VAUC †</td><td>0.5427</td><td>0.594 ± .003</td><td>0.508 ± .005</td><td>0.562 ± .002</td><td>0.576 ± .004</td><td>0.481 ± .005</td><td>0.529 ± .006</td><td>0.592 ± .002</td><td>0.6527 ± .005</td><td>0.625 ± .004</td><td>0.698 ± .002</td><td>0.675 ± .001</td></tr><tr><td>LcostT ↓</td><td>0.254 ± .004</td><td>0.269 ± .005</td><td>0.246 ± .003</td><td>0.251 ± .002</td><td>0.270 ± .004</td><td>0.246 ± .004</td><td>0.243 ± .001</td><td>0.237 ± .006</td><td>0.229 ± .004</td><td>0.226 ± .007</td><td>0.209 ± .003</td><td>0.213 ± .002</td></tr><tr><td>AUC ↑</td><td>0.528 ± .005</td><td>0.539 ± .004</td><td>0.526 ± .005</td><td>0.520 ± .004</td><td>0.529 ± .002</td><td>0.519 ± .005</td><td>0.510 ± .003</td><td>0.522 ± .005</td><td>0.5246 ± .002</td><td>0.530 ± .003</td><td>0.526 ± .002</td><td>0.5235 ± .003</td></tr><tr><td>Profit ↑</td><td>4955 ± 20.14</td><td>5468 ± 17.90</td><td>5183 ± 30.91</td><td>5395 ± 22.48</td><td>5418 ± 14.06</td><td>4862 ± 28.04</td><td>4963 ± 15.09</td><td>5583 ± 30.05</td><td>5839 ± 34.92</td><td>5764 ± 25.09</td><td>6526 ± 15.98</td><td>6308 ± 16.09</td></tr></table>
297
+
298
+ # 7.2 Overall Performance
299
+
300
+ In Tab. 2 and Tab. 3, we collect all the methods' performance on test sets of three types of datasets. For cost distribution of $c$ , we sample some data from a normal distribution $\mathcal{N}(0.5,1)$ to construct a dataset $S_{c}$ (we clip all data to [0,1]). We also conduct numerous experiments for other types of distribution of $c$ , and please see Appendix B for the details. From the results, we make the following observations:
301
+
302
+ (1) For WAUC and $\widehat{\mathcal{L}}_{COST}$ metric, Our proposed algorithm achieves superior performance in most benchmark datasets compared to other methods. This demonstrates that our proposed WAUC cost-sensitive learning can extend the ROC curve into the cost space. Models trained with our proposed bilevel optimization formulation can enjoy high WAUC and cost-related metrics.
303
+ (2) AUC and cost-related metrics are inconsistent. From the high-performing heatmap of Tab. 2, it can be noticed that $\widehat{\mathcal{L}}_{COST}$ and $\widehat{\mathbf{AUC}}$ have two completely different highlight regions. This indicates that the assumption of uniform distribution of AUC does not match the realistic scenario.
304
+ (3) We also find that AUC-related and traditional classification algorithms do not perform well in cost-sensitive problems. This means that if we first train the model with the classification algorithm, subsequently using the cost function to solve for the optimal threshold for decision does not work well. Meanwhile, the algorithm that can learn from scratch has better scalability. Two-stage decision method Therefore, designing one-stage algorithms for WAUC cost-sensitive learning is necessary.
305
+
306
+ # 7.3 Sensitivity Analysis
307
+
308
+ In this subsection, we show the sensitivity of $\beta, T$ , and bandwidth on test data.
309
+
310
+ Effect of $\beta$ . In Fig. 3 (a) and (d), we observe that for both WAUC and cost metrics, when $\beta$ closes to 7, the model will have the largest performance improvement and the lowest variance. This can be explained in two ways: (1) When the $\beta$ is too small, the error between the $\sigma(x)$ and the 0-1 loss function is large, resulting in a large approximation error between the WAUC and the WAUC. (2) When the $\beta$ is too large, the gradient also tends to be 0. Therefore, choosing a beta value that trades off the approximation error and the gradient is essential.
311
+
312
+ ![](images/625fbe32e73ff09152506ed3beb029f4c189eda9a82679586ac72e7ecd28410b.jpg)
313
+ (a) The effect of $\beta$ on WAUC
314
+
315
+ ![](images/d152d53300c64c6fc81d0adf94878ddf0380ad44ca55bfd1e7da86cda4d9badb.jpg)
316
+ (b) The effect of $T$ on WAUC
317
+
318
+ ![](images/4ed64cf950ddf3d7ba90a326d3f7c997980eaf01d16e11d5a3e3d0b109076cbf.jpg)
319
+ (c) The effect of $m$ on WAUC
320
+
321
+ ![](images/d91bf0c36fca48a6a498f55247c8d4ae88e6cd6b899b1a2bfe8d61e7610169fe.jpg)
322
+ (d) The effect of $\beta$ on cost
323
+
324
+ ![](images/75b7e2868e9e1f69686bb1b2603be2c8d47f9d37d76d98d5211d4ae439ec7f61.jpg)
325
+ (e) The effect of $T$ on cost
326
+
327
+ ![](images/c694000c590a10eea8d18c7700304eec705cb9821800e5a50f3a877c7fc23ce7.jpg)
328
+ (f) The effect of $m$ cost
329
+ Figure 3: Sensitivity analysis on test data where WAUC and cost for WAUC-Gau with respect to $\beta$ , $T$ , and bandwidth. The other two variables are fixed for each box in the plots, and the scattered points along the box show the variation.
330
+
331
+ Effect of $T$ . As we mentioned in the Section 6.1, choosing a smaller $T$ can effectively improve the performance of the model. However, as shown in Fig. 3(e), a larger $T$ value can reduce the variance. Hence, as set in our experiments, $T = 3$ is a good choice to ensure the average performance and variance of the model.
332
+
333
+ Effect of $m$ . From Fig. 3(c), we find that the kernel's bandwidth strongly influences the model's performance. The model's bandwidth and performance are almost proportional; the closer the bandwidth is to [0.4, 0.5], the better the effect; otherwise, the effect is worse. This indicates that our proposed method is sensitive to the bandwidth parameter, which also compounds the bandwidth characteristics in the KDE method.
334
+
335
+ # 8 Conclusion
336
+
337
+ This paper focuses on extending the traditional AUC metric to associate with misclassification costs. Restricted by the assumption of cost distribution, existing settings could not describe the model's performance in the complicated cost-sensitive scenario. To address this problem, we propose a novel setting that treats the cost as sampled data. We employ the WAUC metric and propose a novel estimator to approximate it. With the help of threshold weighting, we establish the correspondence between WAUC and the cost function. To describe this connection, we present a bilevel optimization formulation to couple them, where the inner-level problem provides a threshold optimization process, and the outer-level minimizes the WAUC loss based on the inner thresholds. This paradigm ensures that the WAUC can always be optimized at the optimal threshold value based on the complicated cost distribution in reality. Moreover, we propose a stochastic algorithm to optimize this formulation. We prove that our algorithm enjoys the same convergence rate as standard SGD. Finally, numerous
338
+
339
+ experiments have shown that our method can extend AUC to cost-sensitive scenarios with significant performance.
340
+
341
+ # Acknowledgements
342
+
343
+ This work was supported in part by the National Key R&D Program of China under Grant 2018AAA0102000, in part by National Natural Science Foundation of China: 62236008, U21B2038, U2001202, 61931008, 62122075, 61976202, and 62206264, in part by the Fundamental Research Funds for the Central Universities, in part by Youth Innovation Promotion Association CAS, in part by the Strategic Priority Research Program of Chinese Academy of Sciences (Grant No. XDB28000000) and in part by the Innovation Funding of ICT, CAS under Grant No. E000000.
344
+
345
+ # References
346
+
347
+ [1] J A, Hanley, B J, and McNeil. The meaning and use of the area under a receiver operating characteristic (roc) curve. Radiology, 1982.
348
+ [2] Daniel Andrade and Yuzuru Okajima. Efficient bayes risk estimation for cost-sensitive classification. In The 22nd international conference on artificial intelligence and statistics, pages 3372-3381. PMLR, 2019.
349
+ [3] Jerome Bracken and James T McGill. Mathematical programs with optimization problems in the constraints. Operations Research, 21(1):37-44, 1973.
350
+ [4] Nontawat Charoenphakdee, Zhenghang Cui, Yivan Zhang, and Masashi Sugiyama. Classification with rejection based on cost-sensitive classification. In International Conference on Machine Learning, pages 1507-1517. PMLR, 2021.
351
+ [5] Tianyi Chen, Yuejiao Sun, and Wotao Yin. Tighter analysis of alternating stochastic gradient method for stochastic nested problems. arXiv preprint arXiv:2106.13781, 2021.
352
+ [6] Benoit Colson, Patrice Marcotte, and Gilles Savard. An overview of bilevel optimization. Annals of operations research, 153(1):235-256, 2007.
353
+ [7] Corinna Cortes and Mehryar Mohri. AUC optimization vs. error rate minimization. In Advances in Neural Information Processing Systems, pages 313-320, 2003.
354
+ [8] Yin Cui, Menglin Jia, Tsung-Yi Lin, Yang Song, and Serge Belongie. Class-balanced loss based on effective number of samples. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9268-9277, 2019.
355
+ [9] Tom Fawcett. An introduction to roc analysis. Pattern recognition letters, 27(8):861-874, 2006.
356
+ [10] Alberto Fernández, Salvador García, Mikel Galar, Ronaldo C Prati, Bartosz Krawczyk, Francisco Herrera, Alberto Fernández, Salvador García, Mikel Galar, Ronaldo C Prati, et al. Cost-sensitive learning. Learning from Imbalanced Data Sets, pages 63-78, 2018.
357
+ [11] Luca Franceschi, Paolo Frasconi, Saverio Salzo, Riccardo Grazzi, and Massimiliano Pontil. Bilevel programming for hyperparameter optimization and meta-learning. In International Conference on Machine Learning, pages 1568-1577. PMLR, 2018.
358
+ [12] Yoav Freund, Raj Iyer, Robert E Schapire, and Yoram Singer. An efficient boosting algorithm for combining preferences. Journal of Machine Learning Research, 4:933-969, 2003.
359
+ [13] Thore Graepel, Klaus Obermayer, et al. Large margin rank boundaries for ordinal regression. In Advances in Large Margin Classifiers, pages 115-132. 2000.
360
+ [14] Jane Street Group. Jane street market prediction. https://www.kaggle.com/competitions/jane-street-market-prediction/overview, 2021.
361
+ [15] Zhishuai Guo, Mingrui Liu, Zhuoning Yuan, Li Shen, Wei Liu, and Tianbao Yang. Communication-efficient distributed stochastic auc maximization with deep neural networks. In International Conference on Machine Learning, pages 3864-3874, 2020.
362
+
363
+ [16] David J Hand. Measuring classifier performance: a coherent alternative to the area under the roc curve. Machine learning, 77(1):103-123, 2009.
364
+ [17] James A Hanley and Barbara J McNeil. The meaning and use of the area under a receiver operating characteristic (roc) curve. Radiology, 143(1):29-36, 1982.
365
+ [18] James A Hanley and Barbara J McNeil. A method of comparing the areas under receiver operating characteristic curves derived from the same cases. Radiology, 148(3):839-843, 1983.
366
+ [19] Huaying Hao, Huazhu Fu, Yanwu Xu, Jianlong Yang, Fei Li, Xiulan Zhang, Jiang Liu, and Yitian Zhao. Open-narrow-synechiae anterior chamber angle classification in as-oct sequences. arXiv preprint arXiv:2006.05367, 2020.
367
+ [20] José Hernández-Orallo, Peter Flach, and César Ferri Ramírez. A unified view of performance metrics: Translating threshold choice into expected classification loss. Journal of Machine Learning Research, 13:2813–2869, 2012.
368
+ [21] José Hernández-Orallo, Peter Flach, and César Ferri. Roc curves in cost space. Machine learning, 93(1):71-91, 2013.
369
+ [22] Thorsten Joachims. A support vector method for multivariate performance measures. In International Conference on Machine Learning, pages 377-384, 2005.
370
+ [23] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009.
371
+ [24] Yunwen Lei and Yiming Ying. Stochastic proximal auc maximization. JMLP, 22(61):1-45, 2021.
372
+ [25] Jialiang Li and Jason P Fine. Weighted area under the receiver operating characteristic curve and its application to gene selection. Journal of the Royal Statistical Society: Series C (Applied Statistics), 59(4):673-692, 2010.
373
+ [26] Mingrui Liu, Xiaoxuan Zhang, Zaiyi Chen, Xiaoyu Wang, and Tianbao Yang. Fast stochastic auc maximization with $o(1/n)$ -convergence rate. In ICML, pages 3189-3197. PMLR, 2018.
374
+ [27] Mingrui Liu, Zhuoning Yuan, Yiming Ying, and Tianbao Yang. Stochastic AUC maximization with deep neural networks. In International Conference on Learning Representations, 2020.
375
+ [28] Risheng Liu, Pan Mu, Xiaoming Yuan, Shangzhi Zeng, and Jin Zhang. A generic first-order algorithmic framework for bi-level programming beyond lower-level singleton. In International Conference on Machine Learning, pages 6305-6315. PMLR, 2020.
376
+ [29] Wen Liu, Weixin Luo, Dongze Lian, and Shenghua Gao. Future frame prediction for anomaly detection-a new baseline. In CVPR, pages 6536-6545, 2018.
377
+ [30] Andreas Maurer and Massimiliano Pontil. Estimating weighted areas under the roc curve. Advances in Neural Information Processing Systems, 33:7733-7742, 2020.
378
+ [31] Donna Katzman McClish. Analyzing a portion of the roc curve. Medical decision making, 9(3): 190-195, 1989.
379
+ [32] Charles E Metz. Basic principles of roc analysis. In Seminars in nuclear medicine, volume 8, pages 283-298. Elsevier, 1978.
380
+ [33] Harikrishna Narasimhan and Shivani Agarwal. Support vector algorithms for optimizing the partial area under the roc curve. Neural Computation, 29(7):1919-1963, 2017.
381
+ [34] Sakrapee Paisitkriangkrai, Chunhua Shen, and Anton Van Den Hengel. Efficient pedestrian detection by directly optimizing the partial area under the roc curve. In Proceedings of the IEEE international conference on computer vision, pages 1057-1064, 2013.
382
+ [35] Margaret Sullivan Pepe and Mary Lou Thompson. Combining diagnostic test results to increase accuracy. Biostatistics, 1(2):123-140, 2000.
383
+
384
+ [36] Alain Rakotomamonjy. Support vector machines and area under roc curve. PSI-INSA de Rouen: Technical Report, 2004.
385
+ [37] Shoham Sabach and Shimrit Shtern. A first order method for solving convex bilevel optimization problems. SIAM Journal on Optimization, 27(2):640-660, 2017.
386
+ [38] HuiYang Shao, Qianqian Xu, Zhiyong Yang, Shilong Bao, and Qingming Huang. Asymptotically unbiased instance-wise regularized partial auc optimization: Theory and algorithm. In Advances in Neural Information Processing Systems, 2022.
387
+ [39] Luis N Vicente and Paul H Calamai. Bilevel and multilevel programming: A bibliography review. Journal of Global optimization, 5(3):291-306, 1994.
388
+ [40] Sam Wieand, Mitchell H Gail, Barry R James, and Kang L James. A family of nonparametric statistics for comparing diagnostic markers with paired or unpaired data. Biometrika, 76(3): 585-592, 1989.
389
+ [41] Dominik Wied and Rafael Weibach. Consistency of the kernel density estimator: a survey. Statistical Papers, 53(1):1-21, 2012.
390
+ [42] Lian Yan, Robert H Dodier, Michael Mozer, and Richard H Wolniewicz. Optimizing classifier performance via an approximation to the wilcoxon-mann-whitney statistic. In International Conference on Machine Learning, pages 848-855, 2003.
391
+ [43] Tianbao Yang and Yiming Ying. Auc maximization in the era of big data and ai: A survey. arXiv preprint arXiv:2203.15046, 2022.
392
+ [44] Zhenhuan Yang, Wei Shen, Yiming Ying, and Xiaoming Yuan. Stochastic auc optimization with general loss. CPAA, 19(8), 2020.
393
+ [45] Zhiyong Yang, Qianqian Xu, Shilong Bao, Yuan He, Xiaochun Cao, and Qingming Huang. When all we need is a piece of the pie: A generic framework for optimizing two-way partial auc. In International Conference on Machine Learning, pages 11820-11829, 2021.
394
+ [46] Yao Yao, Qihang Lin, and Tianbao Yang. Large-scale optimization of partial auc in a range of false positive rates. arXiv preprint arXiv:2203.01505, 2022.
395
+ [47] Yiming Ying, Longyin Wen, and Siwei Lyu. Stochastic online auc maximization. Advances in Neural Information Processing Systems, 29:451-459, 2016.
396
+ [48] Zhuoning Yuan, Zhishuai Guo, Nitesh Chawla, and Tianbao Yang. Compositional training for end-to-end deep auc maximization. In International Conference on Learning Representations, 2021.
397
+ [49] Adriano Z Zambom and Dias Ronaldo. A review of kernel density estimation with applications to econometrics. International Econometric Review, 5(1):20-42, 2013.
398
+ [50] Dixian Zhu, Gang Li, Bokun Wang, Xiaodong Wu, and Tianbao Yang. When auc meets dro: Optimizing partial auc for deep learning with non-convex convergence guarantee. arXiv preprint arXiv:2203.00176, 2022.
weightedroccurveincostspaceextendingauctocostsensitivelearning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:533e3d0fda7932f8cceca06285950dfc2cc3dc37ce909c6b63967a7344b4188e
3
+ size 560874
weightedroccurveincostspaceextendingauctocostsensitivelearning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9cc7d9639aa6ca8a92087916a06237e246c443cf54889fc16fc76d27a73bfb
3
+ size 535087
weitzmansruleforpandorasboxwithcorrelations/a7f0b3be-a11f-4a26-ba19-8c7b79febafe_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c572c71fe689ca53575fce96c26f5632de554f5a12d24b2fe7386182649b937b
3
+ size 136259
weitzmansruleforpandorasboxwithcorrelations/a7f0b3be-a11f-4a26-ba19-8c7b79febafe_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1004bb95c2d31d031e5d69709d5f571366926c4c8da9209d92b5f6dd7d593383
3
+ size 162958
weitzmansruleforpandorasboxwithcorrelations/a7f0b3be-a11f-4a26-ba19-8c7b79febafe_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d59d1adff7484ab28f72c39730e56c74688bfeee00478017d973e3037214cbcb
3
+ size 485724
weitzmansruleforpandorasboxwithcorrelations/full.md ADDED
@@ -0,0 +1,665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Weitzman's Rule for Pandora's Box with Correlations
2
+
3
+ Evangelia Gergatsouli
4
+
5
+ University of Wisconsin-Madison
6
+
7
+ evagers@cs.wisc.edu
8
+
9
+ Christos Tzamos
10
+
11
+ University of Wisconsin-Madison
12
+
13
+ & University of Athens
14
+
15
+ tzamos@wisc.edu
16
+
17
+ # Abstract
18
+
19
+ PANDORA'S BOX is a central problem in decision making under uncertainty that can model various real life scenarios. In this problem we are given $n$ boxes, each with a fixed opening cost, and an unknown value drawn from a known distribution, only revealed if we pay the opening cost. Our goal is to find a strategy for opening boxes to minimize the sum of the value selected and the opening cost paid.
20
+
21
+ In this work we revisit PANDORA's BOX when the value distributions are correlated, first studied in Chawla et al. [2020]. We show that the optimal algorithm for the independent case, given by Weitzman's rule, directly works for the correlated case. In fact, our algorithm results in significantly improved approximation guarantees compared to the previous work, while also being substantially simpler. We also show how to implement the rule given only sample access to the correlated distribution of values. Specifically, we find that a number of samples that is polynomial in the number of boxes is sufficient for the algorithm to work.
22
+
23
+ # 1 Introduction
24
+
25
+ In various minimization problems where uncertainty exists in the input, we are allowed to obtain information to remove this uncertainty by paying an extra price. Our goal is to sequentially decide which piece of information to acquire next, in order to minimize the sum of the search cost and the value of the option we chose.
26
+
27
+ This family of problems is naturally modeled by PANDORA'S BOX, first formulated by Weitzman [1979] in an economics setting, with multiple application in consumer search, housing markets and job search (see [McCall and McCall, 2007] for more applications). In this problem where we are given $n$ boxes, each containing a value drawn from a known distribution and each having a fixed known opening cost. We can only see the exact value realized in a box if we open it and pay the opening cost. Our goal is to minimize the sum of the value we select and the opening costs of the boxes we opened.
28
+
29
+ In the original work of Weitzman, an optimal solution was proposed when the distributions on the values of the boxes were independent [Weitzman, 1979]. This algorithm was based on calculating a reservation value $(\sigma)$ for each box, and then choosing the box with the lowest reservation value to open at every step. Independence, however, is an unrealistic assumption in real life; in a housing market neighboring houses' price are affected the same way, or in a job search setting, candidates might share qualifications that affect them similarly. Wanting to tackle a more realistic setting, Chawla et al. [2020] first studied the problem where the distributions are correlated, and designed an algorithm giving a constant approximation guarantee. This algorithm is quite involved, it requires solving an LP to convert the PANDORA's BOX instance to a MIN SUM SET COVER one, and then solving this instance to obtain an ordering of opening the boxes. Finally, it reduces the problem of deciding when to stop to an online algorithm question corresponding to SKI-RENTAL.
30
+
31
+ # 1.1 Our Contribution
32
+
33
+ In this work we revisit PANDORA's BOX with correlations, and provide simpler, learnable algorithms with better approximation guarantees, that directly generalize Weitzman's reservation values. More specifically, our results are the following.
34
+
35
+ - Generalizing: we first show how the original reservation values given by Weitzman [1979] can be generalized to work in correlated distributions, thus allowing us to use a version of their initial greedy algorithm.
36
+ - Better approximation: we give two different variants of our main algorithm, that each uses different updates on the distribution $\mathcal{D}$ after every step.
37
+
38
+ 1. Variant 1: partial updates. We condition on the algorithm not having stopped yet.
39
+ 2. Variant 2: full updates. We condition on the exact value $v$ revealed in the box opened.
40
+
41
+ Both variants improve the approximation given by Chawla et al. [2020] from 9.22 to 4.428 for Variant 1 and to 5.828 for Variant 2. It is worth noting that our result for Variant 1 is almost tight, since the best possible approximation factor we can obtain is 4, implied by Feige [1998]. We include more details on the lower bound in Section A.4 of the Appendix.
42
+
43
+ - Simplicity: our algorithms are greedy and only rely on the generalized version of the reservation value, while the algorithms in previous work rely on solving a linear program, and reducing first to MIN SUM SET COVER then to SKI-RENTAL, making them not straightforward to implement. A 9.22 approximation was also given in Gergatsouli and Tzamos [2022], which followed the same approach but bypassed the need to reduce to MIN SUM SET COVER by directly rounding the linear program via randomized rounding.
44
+ - Learnability: we show how given sample access to the correlated distribution $\mathcal{D}$ we are able to still maintain the approximation guarantees. Specifically, for Variant 1 only $\mathrm{poly}(n,1 / \varepsilon ,\log (1 / \delta))$ samples are enough to obtain $4.428 + \varepsilon$ approximation with probability at least $1 - \delta$ . Variant 2 is however impossible to learn.
45
+
46
+ Our analysis is enabled by drawing similarities from PANDORA's BOX to MIN SUM SET COVER, which corresponds to the special case of when the values inside the boxes are 0 or $\infty$ . For MIN SUM SET COVER a simple greedy algorithm was shown to achieve the optimal 4-approximation [Feige et al., 2002]. Surprisingly, Weitzman's algorithm can be seen as a direct generalization of that algorithm. Our analysis follows the histogram method introduced in Feige et al. [2002], for bounding the approximation ratio. However, we significantly generalize it to handle values in the boxes and work with tree-histograms required to handle the case with full-updates.
47
+
48
+ # 1.2 Related Work
49
+
50
+ Since Weitzman's initial work [Weitzman, 1979] on PANDORA's BOX there has been a renewed interest in studying this problem in various settings. Specifically Doval [2018], Beyhaghi and Kleinberg [2019], Beyhaghi and Cai [2023a], Fu et al. [2023] study PANDORA's BOX when we can select a box without paying for it (non-obligatory inspection), in Boodaghians et al. [2020] there are tree or line constraints on the order in which the boxes can be opened. In Chawla et al. [2020, 2021] the distributions on the values inside the boxes are correlated and the goal is to minimize the search and value cost, while finally in Bechtel et al. [2022] the task of searching over boxes is delegated by an agent to a principal, while the agent makes the final choice. The recent work of Chawla et al. [2020] is the first one that explores the correlated distributions variant and gives the first approximation guarantees. The recent survey by Beyhaghi and Cai [2023b] summarizes the recent work on Pandora's Box and its variants.
51
+
52
+ This problem can be seen as being part of the "price of information" literature [Charikar et al., 2000, Gupta and Kumar, 2001, Chen et al., 2015b,a], where we can remove part of the uncertainty of the problem at hand by paying a price. In this line of work, more recent papers study the structure of approximately optimal rules for combinatorial problems [Goel et al., 2006, Gupta and Nagarajan, 2013, Adamczyk et al., 2016, Gupta et al., 2016, 2017, Singla, 2018, Gupta et al., 2019].
53
+
54
+ For the special case of MIN SUM SET COVER, since the original work of Feige et al. [2002], there has been many follow-ups and generalizations where every set has a requirement of how many elements
55
+
56
+ contained in it we need to choose [Azar et al., 2009, Bansal et al., 2010, Azar and Gamzu, 2011, Skutella and Williamson, 2011, Im et al., 2014].
57
+
58
+ Note also that multiple results on problems related to Pandora's box have been published in ML-related conferences, as this is a problem that encompasses both algorithmic and learning aspects (e.g. Esfandiari et al. [2019], Gergatsouli and Tzamos [2022], Bhaskara et al. [2020], Cesa-Bianchi et al. [2021], Guo et al. [2021]).
59
+
60
+ # 2 Preliminaries
61
+
62
+ In PANDORA's BOX $(\mathcal{PB})$ we are given a set of $n$ boxes $\mathcal{B}$ , each with a known opening cost $c_{b} \in \mathbb{R}^{+}$ and a distribution $\mathcal{D}$ over a vector of unknown values $\boldsymbol{v} = (v_{1}, \ldots, v_{n}) \in \mathbb{R}_{+}^{n}$ inside the boxes. Each box $b \in \mathcal{B}$ , once it is opened, reveals the value $v_{b}$ . The algorithm can open boxes sequentially, by paying the opening cost each time, and observe the value instantiated inside the box. The goal of the algorithm is to choose a box of small value, while spending as little cost as possible "opening" boxes. Formally, denoting by $\mathcal{O} \subseteq \mathcal{B}$ the set of opened boxes, we want to minimize
63
+
64
+ $$
65
+ \mathbb {E} _ {v \sim \mathcal {D}} \left[ \min _ {b \in \mathcal {O}} v _ {b} + \sum_ {b \in \mathcal {O}} c _ {b} \right].
66
+ $$
67
+
68
+ A strategy for PANDORA's BOX is an algorithm that in every step decides which is the next box to open and when to stop. We measure the performance of our algorithm using the competitive (or approximation) ratio; a strategy $\mathcal{A}$ is $\alpha$ -approximation if $\mathbb{E}[\mathcal{A}] \leq \alpha \mathrm{OPT}$ , where $\mathrm{OPT}$ is the optimal online algorithm<sup>1</sup>
69
+
70
+ A strategy can pick any open box to select at any time. To model this, we assume without loss of generality that after a box is opened the opening cost becomes 0, allowing us to select the value without opening it again. In its full generality, a strategy can make decisions based on every box opened and value seen so far. We call this the Fully-Adaptive (FA) strategy.
71
+
72
+ Different Benchmarks. As it was initially observed in Chawla et al. [2020], optimizing over the class of fully-adaptive strategies is intractable, therefore we consider the simpler benchmark of partially-adaptive (PA) strategies. In this case, the algorithm has to fix the opening order of the boxes, while the stopping rule can arbitrarily depend on the values revealed.
73
+
74
+ # 2.1 Weitzman's Algorithm
75
+
76
+ When the distributions of values in the boxes are independent, Weitzman [1979] described a greedy algorithm that is also the optimal strategy. In this algorithm, we first calculate an index for every box $b$ , called reservation value $\sigma_{b}$ , defined as the value that satisfies the following equation
77
+
78
+ $$
79
+ \mathbb {E} _ {\boldsymbol {v} \sim \mathcal {D}} \left[ \left(\sigma_ {b} - v _ {b}\right) ^ {+} \right] = c _ {b}, \tag {1}
80
+ $$
81
+
82
+ where $(a - b)^{+} = \max(0, a - b)$ . Then, the boxes are ordered by increasing $\sigma_{b}$ and opened until the minimum value revealed is less than the next box in the order. Observe that this is a partially-adaptive strategy.
83
+
84
+ # 3 Competing with the Partially-Adaptive
85
+
86
+ We begin by showing how Weitzman's algorithm can be extended to correlated distributions. Our algorithm calculates a reservation value $\sigma$ for every box at each step, and opens the box $b \in \mathcal{B}$ with the minimum $\sigma_{b}$ . We stop if the value is less than the reservation value calculated, and proceed in making this box free; we can re-open this for no cost, to obtain the value just realized at any later point. The formal statement is shown in Algorithm 1.
87
+
88
+ We give two different variants based on the type of update we do after every step on the distribution $\mathcal{D}$ . In the case of partial updates, we only condition on $V_{b} > \sigma_{b}$ , which is equivalent to the algorithm
89
+
90
+ not having stopped. On the other hand, for full updates we condition on the exact value that was instantiated in the box opened. Theorem 3.1 gives the approximation guarantees for both versions of this algorithm.
91
+
92
+ Algorithm 1: Weitzman's algorithm, for correlated $\mathcal{D}$
93
+ Input: Boxes with costs $c_{i}\in \mathbb{R}$ distribution over scenarios $\mathcal{D}$
94
+ 1 An unknown vector of values $v\sim \mathcal{D}$ is drawn
95
+ 2 repeat
96
+ 3 Calculate $\sigma_{b}$ for each box $b\in \mathcal{B}$ by solving: $\mathbb{E}_{\boldsymbol {v}\sim \mathcal{D}}[(\sigma_b - v_b)^+ ] = c_b.$
97
+ 4 Open box $b = \mathrm{argmin}_{b\in \mathcal{B}}\sigma_{b}$
98
+ 5 Stop if the value the observed $V_{b} = v_{b}\leq \sigma_{b}$
99
+ 6 $c_{b}\gets 0 / /$ Box is always open now or can be reopened
100
+ 7 Update the prior distribution - Variant 1: $\mathcal{D}\gets \mathcal{D}|_{V_b > \sigma_b}$ (partial updates) - Variant 2: $\mathcal{D}\gets \mathcal{D}|_{V_b = v_b}$ (full updates)
101
+ 8 until termination;
102
+
103
+ Theorem 3.1. Algorithm 1 is a 4.428-approximation for Variant 1 and 5.828-approximation for Variant 2 of PANDORA'S BOX against the partially-adaptive optimal.
104
+
105
+ Proof. We separately show the two components of this theorem in Theorems 3.2 and 3.3.
106
+
107
+ ![](images/b2d7996fac7f4182fa54112c7fa755193aae08269926577b3c553d6173b1f0ed.jpg)
108
+
109
+ Observe that for independent distributions this algorithm is exactly the same as Weitzman's [Weitzman, 1979], since the product prior $\mathcal{D}$ remains the same, regardless of the values realized. Therefore, the calculation of the reservation values does not change in every round, and suffices to calculate them only once at the beginning.
110
+
111
+ Scenarios To proceed with the analysis of Theorem 3.1, we assume that $\mathcal{D}$ is supported on a collection of $m$ vectors, $(\pmb{v}^s)_{s\in S}$ , which we call scenarios, and sometimes abuse notation to say that a scenario is sampled from the distribution $\mathcal{D}$ . We assume that all scenarios have equal probability. The general case with unequal probabilities follows by creating more copies of the higher probability scenarios until the distribution is uniform.
112
+
113
+ A scenario is covered when the algorithm decides to stop and choose a value from the opened boxes. For a specific scenario $s \in S$ we denote by $c(s)$ the total opening cost paid by an algorithm before this scenario is covered and by $v(s)$ the value chosen for this scenario.
114
+
115
+ Reservation Values To analyze Theorem 3.1, we introduce a new way of defining the reservation values of the boxes that is equivalent to (1). For a box $b$ , we have that
116
+
117
+ $$
118
+ \sigma_ {b} = \min _ {A \subseteq S} \frac {c _ {b} + \sum_ {s \in A} \mathbf {P r} _ {\mathcal {D}} [ s ] v _ {b} ^ {s}}{\sum_ {s \in A} \mathbf {P r} _ {\mathcal {D}} [ s ]}
119
+ $$
120
+
121
+ The equivalence to (1), follows since $\sigma_{b}$ is defined as the root of the expression
122
+
123
+ $$
124
+ \begin{array}{l} \mathbb {E} _ {s \sim \mathcal {D}} \left[ \left(\sigma_ {b} - v _ {b} ^ {s}\right) ^ {+} \right] - c _ {b} = \sum_ {s \in \mathcal {S}} \mathbf {P r} _ {\mathcal {D}} [ s ] \left(\sigma_ {b} - v _ {b} ^ {s}\right) ^ {+} - c _ {b} \\ = \max _ {A \subseteq S} \sum_ {s \in A} \mathbf {P r} _ {\mathcal {D}} [ s ] (\sigma_ {b} - v _ {b} ^ {s}) - c _ {b}. \\ \end{array}
125
+ $$
126
+
127
+ If we divide the above expression by any positive number, the result will not be affected since we require the root of the equation; $\sigma_{b}$ being the root is equivalent to $\sigma_{b}$ being the root of the numerator.
128
+
129
+ Thus, dividing by $\sum_{s\in A}\mathbf{Pr}_D[s]$ we get that $\sigma_{b}$ is also the root of
130
+
131
+ $$
132
+ \max _ {A \subseteq S} \frac {\sum_ {s \in A} \mathbf {P r} _ {\mathcal {D}} [ s ] \left(\sigma_ {b} - v _ {b} ^ {s}\right) - c _ {b}}{\sum_ {s \in A} \mathbf {P r} _ {\mathcal {D}} [ s ]} = \sigma_ {b} - \min _ {A \subseteq S} \frac {c _ {b} + \sum_ {s \in A} \mathbf {P r} _ {\mathcal {D}} [ s ] v _ {b} ^ {s}}{\sum_ {s \in A} \mathbf {P r} _ {\mathcal {D}} [ s ]}. \tag {2}
133
+ $$
134
+
135
+ This, gives our formula for computing $\sigma_{b}$ , which we can further simplify using our assumption that all scenarios have equal probability. In this case, $\mathbf{Pr}_{\mathcal{D}}[s] = 1 / |S|$ which implies that
136
+
137
+ $$
138
+ \sigma_ {b} = \min _ {A \subseteq S} \frac {c _ {b} | \mathcal {S} | + \sum_ {s \in A} v _ {b} ^ {s}}{| A |}. \tag {3}
139
+ $$
140
+
141
+ # 3.1 Conditioning on $V_{b} > \sigma_{b}$
142
+
143
+ We start by describing the simpler variant of our algorithm where after opening each box we update the distribution by conditioning on the event $V_{b} > \sigma_{b}$ . This algorithm is partially adaptive, since the order for each scenario does not depend on the actual value that is realized every time. At every step the algorithm will either stop or continue opening boxes conditioned on the event "We have not stopped yet" which does not differentiate among the surviving scenarios.
144
+
145
+ Theorem 3.2. Algorithm 1 is a 4.428-approximation for PANDORA's BOX against the partially-adaptive optimal, when conditioning on $V_{b} > \sigma_{b}$ .
146
+
147
+ In this section we show a simpler proof for Theorem 3.2 that gives a $3 + 2\sqrt{2} \approx 5.828$ -approximation. The full proof for the 4.428-approximation is given in section A.2 of the Appendix. Using the equivalent definition of the reservation value (Equation (3)) we can rewrite Algorithm 1 as follows.
148
+
149
+ Algorithm 2: Weitzman's rule for Partial Updates
150
+ Input: Boxes with costs $c_{i}\in \mathbb{R}$ , set of scenarios S.
151
+ 1 $t\gets 0$
152
+ 2 $R_0\gets S$ the set of scenarios still uncovered
153
+ 3 while $R_{t}\neq \emptyset$ do
154
+ 4 Let $\sigma_t\gets \min_{b\in \mathcal{B},A\subseteq R_t}\frac{c_b|R_t| + \sum_{s\in A}v_b^s}{|A|}$
155
+ 5 Let $b_{t}$ and $A_{t}$ be the box and the set of scenarios that achieve the minimum
156
+ 6 Open box $b_{t}$ and pay $c_{bt}$
157
+ 7 Stop and choose the value $v_{b_t}$ at box $b_{t}$ if it is less than $\sigma_t$ (see also Fact 3.2.1)
158
+ 8 Set $c_{b_t}\gets 0$
159
+ 9 $R_{t}\leftarrow R_{t}\backslash A_{t}$
160
+ 10 $t\gets t + 1$
161
+ 11 end
162
+
163
+ Structure of the solution. An important property to note is that by the equivalent definition of the reservation value (3) the set of scenarios that stop at each step are the ones that give a value at most $\sigma$ for the box opened, as we formally state in the following fact.
164
+
165
+ Fact 3.2.1. The value at box $b_{t}$ is less than $\sigma_{t}$ if and only if $s \in A_{t}$ .
166
+
167
+ In equation (8) the set $A_{t}$ that maximizes the expression contains all the scenarios with value at most $\sigma_{b}$ for the box $b$ . Therefore, the set $A_{t}$ are exactly the scenarios covered at each step $t$ of the algorithm, and can be removed from consideration.
168
+
169
+ Before showing our result, observe that this algorithm is partially adaptive; the order of the boxes does not depend on the scenario realized. This holds since we only condition on "not having stopped" (i.e. $\mathcal{D}_{V_b > \sigma_b}$ ) and therefore each scenario either stops or uses the same updated prior as all other surviving scenarios to calculate the next reservation values. If we were to draw our solution, it would look like a line, (see also Figure 2 in Appendix A.2), which as we observe in Section 3.2 differs from Variant 2.
170
+
171
+ Moving on to show the proof, we first start by giving a bound on the cost of the algorithm. The cost can be broken down into opening cost plus the value obtained. Since at any time $t$ , all remaining
172
+
173
+ scenarios $R_{t}$ pay the opening cost $c_{b_t}$ , we have that the total opening cost is $\sum_{t}c_{b_t}|R_t|$ . Moreover, the chosen value is given as $\sum_{t}\sum_{s\in A_{t}}v_{b_{t}}^{s}$ . Overall, we have that
174
+
175
+ $$
176
+ \mathrm {A L G} = \sum_ {t} \left(c _ {b _ {t}} | R _ {t} | + \sum_ {s \in A _ {t}} v _ {b _ {t}} ^ {s}\right) = \sum_ {t} | A _ {t} | \frac {c _ {b _ {t}} | R _ {t} | + \sum_ {s \in A _ {t}} v _ {b _ {t}} ^ {s}}{| A _ {t} |} = \sum_ {t} | A _ {t} | \sigma_ {t}.
177
+ $$
178
+
179
+ Defining $\sigma_{s}$ to be the reservation value of scenario $s$ at the time it is covered, i.e. when $s\in A_t$ , we get $\mathrm{ALG} = \sum_{s\in S}\sigma_s^2$ . We follow a histogram analysis similar to the proof of Theorem 4 in Feige et al. [2004] for MIN SUM SET COVER and construct the following histograms.
180
+
181
+ - The $\mathrm{OPT}_o$ histogram: put the scenarios on the x-axis on increasing opening cost order $c_s^{\mathrm{OPT}}$ according to OPT, the height of each scenario is the opening cost it paid.
182
+ - The $\mathrm{OPT}_v$ histogram: put the scenarios on the x-axis on increasing covering value order $v_s^{\mathrm{OPT}}$ according to OPT, the height of each scenario is the value with which it was covered.
183
+ - The ALG histogram: put scenarios on the x-axis in the order the algorithm covers them. The height of each scenario is $\sigma_{s}$ . Observe that the area of the ALG histogram is exactly the cost of the algorithm.
184
+
185
+ Proof of Theorem 3.2. Initially, observe that the algorithm will eventually stop; every time we open a box we cover at least one scenario (since line 3 is cannot be $\infty$ while scenarios are left uncovered).
186
+
187
+ To show the approximation factor, we scale the histograms as follows; $\mathrm{OPT}_o$ scale horizontally by $1 / \alpha_{o}$ and vertically by $1 / (\beta \cdot \gamma)$ , and $\mathrm{OPT}_v$ scale by $1 / \alpha_v$ horizontally, for some constants $\alpha_o,\alpha_v,\gamma ,\beta \in (0,1)$ to be determined later. We align the ALG histogram with $\mathrm{OPT}_v$ and $\mathrm{OPT}_o$ so that all of them have the same right-hand side. Observe that the optimal opening cost is the area below the histogram $\mathrm{OPT}_o$ and has increased by $\beta \cdot \gamma \cdot \alpha_o$ , and similarly the area below $\mathrm{OPT}_v$ has increased by $\alpha_v$ as a result of the scaling.
188
+
189
+ To conclude the proof it suffices to show that any point in the ALG histogram is inside the sum of the rescaled $\mathrm{OPT}_v$ and $\mathrm{OPT}_o$ histograms. Consider any point $p$ in the ALG histogram, and let $s$ be its corresponding scenario and $t$ be the time this scenario is covered. We have that the height of the ALG histogram is
190
+
191
+ $$
192
+ \sigma_ {s} = \frac {c _ {b _ {t}} \left| R _ {t} \right| + \sum_ {s \in A _ {t}} v _ {b _ {t}} ^ {s}}{\left| A _ {t} \right|} \leq \frac {c _ {b} \left| R _ {t} \right| + \sum_ {s \in A} v _ {b} ^ {s}}{\left| A \right|} \tag {4}
193
+ $$
194
+
195
+ where the last inequality holds for all $A \subseteq R_t$ and any $b \in \mathcal{B}$ .
196
+
197
+ Denote by $c^*$ the opening cost such that $\gamma |R_t|$ of the scenarios in $R_t$ have opening cost less than $c^*$ , and by $R_{\mathrm{low}} = \{s \in R_t : c_s^{\mathrm{OPT}} \leq c^*\}$ the set of these scenarios. Similarly denote by $v^*$ the value of scenarios in $R_{\mathrm{low}}$ such that $\beta |R_{\mathrm{low}}|$ of the scenarios have value less than $v^*$ and by $L = \{s \in R_{\mathrm{low}} : v_s^{\mathrm{OPT}} \leq v^*\}$ these scenarios. This split is shown in Figure 1, and the constants $\beta, \gamma \in (0,1)$ will be determined at the end of the proof.
198
+
199
+ ![](images/dcbcaab2a9704247f8ceb690ce4fff20d65d70b65764ec3f8aa6e166d8a5e9f3.jpg)
200
+ Figure 1: Split of scenarios in $R_{t}$ .
201
+
202
+ Let $B_{L}$ be the set of boxes that the optimal solution uses to cover the scenarios in $L$ . Let $L_{b} \subseteq L \subseteq R_{t}$ be the subset of scenarios in $L$ that choose the value at box $b$ in OPT. Using inequality (4) with
203
+
204
+ $b \in B_L$ and $A = L_b$ , we obtain $\sigma_s|L_b| \leq c_b|R_t| + \sum_{s \in L_b} v_s^{\mathrm{OPT}}$ , and by summing up the inequalities for all $b \in B_L$ we get
205
+
206
+ $$
207
+ \sigma_ {s} \leq \frac {\left| R _ {t} \right| \sum_ {b \in B _ {L}} c _ {b} + \sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |} \leq \frac {\left| R _ {t} \right| c ^ {*} + \sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |} \leq \frac {c ^ {*}}{\beta \cdot \gamma} + \frac {\sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |} \tag {5}
208
+ $$
209
+
210
+ where for the second inequality we used that the cost for covering the scenarios in $L$ is at most $c^*$ by construction, and in the last inequality that $|L| = |R_t| / (\beta \cdot \gamma)$ . We consider each term above separately, to show that the point $p$ is within the histograms.
211
+
212
+ Bounding the opening cost. By the construction of $c^*$ , the point in the $\mathrm{OPT}_o$ histogram that has cost at least $c^*$ is at distance at least $(1 - \gamma)|R_t|$ from the right hand side. This means that in the rescaled histogram, the point that has cost at least $c^* / (\beta \cdot \gamma)$ is at distance at least $(1 - \gamma)|R_t| / \alpha_o$ from the right hand side.
213
+
214
+ On the other hand, in the ALG histogram the distance of $p$ from the right edge of the histogram is at most $|R_{t}|$ , therefore for the point $p$ to be inside the OPT $_o$ histogram we require
215
+
216
+ $$
217
+ \alpha_ {o} \leq 1 - \gamma . \tag {6}
218
+ $$
219
+
220
+ Observe that throughout the proof we did not use the fact that we change the opening cost to 0, therefore the bound on our cost works even if we re-pay the boxes that are reopened.
221
+
222
+ The fact that the opening cost becomes 0 is not directly used in the analysis (i.e. inequalities (4) and (5)). Our analysis gives an upper bound on the cost of the algorithm, even if the algorithm never changes the cost of an opened box to 0. That is the reason in (4) and (5) the cost appears unchanged but the analysis still works for the algorithm since we just want an upper bound (and if we changed the cost to 0 this would only lower the cost of the algorithm).
223
+
224
+ Bounding the values cost. By the construction of $v^{*}$ , the point in the OPT $_{v}$ histogram that has value $v^{*}$ is at distance at least $|R_{t}| (1 - \beta)\gamma$ from the right hand side. This means that in the rescaled histogram, the point that has value at least $v^{*}$ is at distance at least $(1 - \beta)\gamma |R_{t}| / \alpha_{v}$ from the right hand side.
225
+
226
+ On the other hand, in the ALG histogram the distance of $p$ from the right edge of the histogram is at most $|R_t|$ , therefore for the point $p$ to be inside the OPT $_o$ histogram we require
227
+
228
+ $$
229
+ \alpha_ {v} \leq (1 - \beta) \gamma . \tag {7}
230
+ $$
231
+
232
+ We optimize the constants $\alpha_{o},\alpha_{v},\beta ,\gamma$ by ensuring that inequalities (6) and (7) hold. We set $\alpha_{o} = 1 - \gamma$ and $\alpha_{v} = (1 - \beta)\gamma$ , and obtain that $\mathrm{ALG}\leq \mathrm{OPT}_o / (\beta \cdot \gamma \cdot (1 - \gamma)) + \mathrm{OPT}_v / ((1 - \beta)\gamma)$ . Requiring these to be equal we get $\beta = 1 / (2 - \gamma)$ , which is minimized for $\beta = 1 / \sqrt{2}$ and $\gamma = 2 - \sqrt{2}$ for a value of $3 + 2\sqrt{2}$ .
233
+
234
+ # 3.2 Conditioning on $V_{b} = v$
235
+
236
+ In this section we switch gears to our second variant of Algorithm 1, where in each step we update the prior $\mathcal{D}$ conditioning on the event $V_{b} = v$ . We state our result in Theorem 3.3. In this case, the conditioning on $\mathcal{D}$ implies that the algorithm at every step removes the scenarios that are inconsistent with the value realized. In order to understand better the differences of the two variants and their conditioning we included an example and a discussion in section A.1 of the Appendix.
237
+
238
+ Theorem 3.3. Algorithm 1 is a $3 + 2\sqrt{2} \approx 5.828$ -approximation for PANDORA's BOX against the partially-adaptive optimal, when conditioning on $V_{b} = v$ .
239
+
240
+ The main challenge was that the algorithm's solution is now a tree with respect to scenarios instead of a line as in the case of $\mathcal{D}|_{V_b > \sigma_b}$ . Specifically, in the $D|_{V_b > \sigma_b}$ variant at every step all scenarios that had $V_b \leq \sigma_b$ were covered and removed from consideration. However in the $D|_{V_b = v}$ variant the remaining scenarios are split into different cases, based on the realization of $V$ , as shown in the example of Figure 4, which is deferred to Section A.3 of the Appendix due to space constraints.
241
+
242
+ This results into the ALG histogram not being well defined, since there is no unique order of covering the scenarios. We overcome this by generalizing the histogram approach to trees.
243
+
244
+ Proof of Theorem 3.3. The proof follows similar steps to that of Theorem 3.2, thus we only highlight the differences. The algorithm is presented below, the only change is line 5 where we remove the inconsistent with the value revealed scenarios, which also leads to our solution branching out for different scenarios and forming a tree.
245
+
246
+ Algorithm 3: Weitzman's rule for Full Updates
247
+ Input: Boxes with costs $c_{i}\in \mathbb{R}$ , set of scenarios S.
248
+ 1 Define a root node $u$ corresponding to the set S
249
+ 2 $R_{u}\gets S$ the set of scenarios still uncovered
250
+ 3 while $R_{u}\neq \emptyset$ do
251
+ 4 Let $\sigma_u\gets \min_{b\in \mathcal{B},A\subseteq R_u}\frac{c_b|R_u| + \sum_{s\in A}v_b^s}{|A|}$
252
+ 5 Let $b_{u}$ and $A_{u}$ be the box and the set of scenarios that achieve the minimum
253
+ 6 Open box $b_{u}$ paying $c_{b_u}$ and observe value $v$
254
+ 7 Stop and choose the value at box $b_{u}$ if it is less than $\sigma_{u}$ : this holds iff $s\in A_u$
255
+ 8 Set $c_{b_u}\gets 0$
256
+ 9 Let $u^{\prime}$ be a vertex corresponding to the set of consistent scenarios with $R_{u^{\prime}}\triangleq R_{u}\backslash (A_{u}\cup \{s\in R_{u}:v_{b_{u}}^{s}\neq v\}) / /$ Remove inconsistent scenarios
257
+ 10 Set $u\gets u^{\prime}$
258
+ 11 end
259
+
260
+ Bounding the opening cost Consider the tree $\mathcal{T}$ of ALG where at every node $u$ a set $A_u$ of scenarios is covered. We associate this tree with node weights, where at every node $u$ , we assign $|A_u|$ weights $(\sigma_u,\dots,\sigma_u)$ . Denote, the weighted tree by $\mathcal{T}_{\mathrm{ALG}}$ . As before, the total cost of ALG is equal to the sum of the weights of the tree.
261
+
262
+ We now consider two alternative ways of assigning weights to the nodes, forming trees $\mathcal{T}_{\mathrm{OPT}_o}$ , $\mathcal{T}_{\mathrm{OPT}_v}$ using the following process.
263
+
264
+ - $\mathcal{T}_{\mathrm{OPT}_o}$ . At every node $u$ we create a vector of weights $w_{u}^{\mathrm{OPT}_{o}} = (c_{s}^{\mathrm{OPT}})_{s \in A_{u}}$ where each $c_{s}^{\mathrm{OPT}}$ is the opening cost that scenario $s \in A_{u}$ has in the optimal solution.
265
+ - $\mathcal{T}_{\mathrm{OPT}_v}$ . At every node $u$ we create a vector of weights $\boldsymbol{w}_u^{\mathrm{OPT}_v} = (v_s^{\mathrm{OPT}})_{s \in A_u}$ where each $v_s^{\mathrm{OPT}}$ is the value the optimal uses to cover scenario $s \in A_u$ .
266
+
267
+ We denote by $\mathrm{cost}(\mathcal{T}_{\mathrm{ALG}})$ the sum of all weights in every node of the tree $\mathcal{T}$ . We have that $\mathrm{cost}(\mathcal{T})$ is equal to the total cost of ALG, while $\mathrm{cost}(\mathcal{T}_{\mathrm{OPT}_o})$ and $\mathrm{cost}(\mathcal{T}_{\mathrm{OPT}_v})$ is equal to the optimal opening cost $\mathrm{OPT}_o$ and optimal value $\mathrm{OPT}_v$ respectively. Intuitively, the weighted trees correspond to the histograms in the previous analysis of Theorem 3.2.
268
+
269
+ We want to relate the cost of ALG, to that of $\mathcal{T}_{\mathrm{OPT}_o}$ and $\mathcal{T}_{\mathrm{OPT}_v}$ . To do this, we define an operation similar to histogram scaling, which replaces the weights of every node $u$ in a tree with the top $\rho$ -percentile of the weights in the subtree rooted at $u$ . As the following lemma shows, this changes the cost of a tree by a bounded multiplicative factor.
270
+
271
+ Lemma 3.3.1. Let $\mathcal{T}$ be a tree with a vector of weights $w_{u}$ at each node $u\in \mathcal{T}$ , and let $\mathcal{T}^{(\rho)}$ be the tree we get when we substitute the weights of every node with the top $\rho$ -percentile of all the weights in the subtree of $\mathcal{T}$ rooted at $u$ . Then
272
+
273
+ $$
274
+ \rho \cdot c o s t (\mathcal {T} ^ {(\rho)}) \leq c o s t (\mathcal {T}).
275
+ $$
276
+
277
+ We defer the proof of Lemma 3.3.1 to Section A.3 of the Appendix. To complete the proof of Theorem 3.3, and bound cost $(\mathcal{T}_{\mathrm{ALG}})$ , we show as before that the weights at every node $u$ , are bounded by the weights of $\mathcal{T}_{\mathrm{OPT}_o}^{(1 - \gamma)}$ scaled by $\frac{1}{\beta\gamma}$ plus the weights of $\mathcal{T}_{\mathrm{OPT}_v}^{((1 - \beta)\gamma)}$ , for the constants $\beta, \gamma \in (0,1)$ chosen in the proof of Theorem 3.2. This implies that
278
+
279
+ $$
280
+ \operatorname {c o s t} \left(\mathcal {T} _ {\mathrm {O P T} _ {o}}\right) \leq \frac {1}{\beta \gamma} \operatorname {c o s t} \left(\mathcal {T} _ {\mathrm {O P T} _ {o}} ^ {(1 - \gamma)}\right) + \operatorname {c o s t} \left(\mathcal {T} _ {\mathrm {O P T} _ {v}} ^ {((1 - \beta) \gamma)}\right)
281
+ $$
282
+
283
+ $$
284
+ \leq \frac {1}{\beta \gamma (1 - \gamma)} \operatorname {c o s t} \left(\mathcal {T} _ {\mathrm {O P T} _ {o}}\right) + \frac {1}{(1 - \beta) \gamma} \operatorname {c o s t} \left(\mathcal {T} _ {\mathrm {O P T} _ {v}}\right)
285
+ $$
286
+
287
+ which gives $\mathrm{ALG} \leq 5.828$ OPT for the choice of $\beta$ and $\gamma$ . The details of the proof are similar to the one of Theorem 3.1, and are deferred to section A.3 of the Appendix.
288
+
289
+ ![](images/b5aae6e9ac5547835b8238fbbb99c853459db0a027371d1ba8bce91cf39db9b0.jpg)
290
+
291
+ Note on the approximation factors. Observe that Variant 2, where we condition on $V_{b} = v$ has a worse approximation factor than Variant 1 where we only condition on $V_{b} > \sigma_{b}$ . Intuitively someone might expect that with more information the approximation factor will improve. However, it is challenging to argue about this formally. It is also plausible that such monotonicity may not hold as more information might lead the greedy algorithm to make wrong decisions. Instead of making any such claims, we analyze this case directly by showing that our proof approach extends to the full update variant with a generalization of the histogram method to work on trees. Our technique for improving the approximation for the partial updates variant could not be generalized however and thus we only obtain the worse approximation guarantee.
292
+
293
+ # 4 Learning from Samples
294
+
295
+ In this section we show that our algorithm also works when we are only given sample access to the correlated distribution $\mathcal{D}$ .
296
+
297
+ We will mainly focus on the first variant with partial updates $\mathcal{D}|_{V > v}$ . The second variant with full Bayesian updates $\mathcal{D}|_{V = v}$ requires full knowledge of the underlying distribution and can only work with sample access if one can learn the full distribution. To see this consider for example an instance where the values are drawn uniformly from $[0,1]^d$ . No matter how many samples one draws, it is impossible to know the conditional distribution $\mathcal{D}|_{V = v}$ after opening the first box for fresh samples $v$ , and the Bayesian update is not well defined<sup>4</sup>.
298
+
299
+ Variant 1 does not face this problem and can be learned from samples if the costs of the boxes are polynomially bounded by $n$ , i.e. if there is a constant $c > 0$ such that for all $b \in \mathcal{B}$ , $c_b \in [1, n^c]$ . If the weights are unbounded, it is impossible to get a good approximation with few samples. To see this consider the following instance. Box 1 has cost $1 / H \to 0$ , while every other box has cost $H$ for a very large $H > 0$ . Now consider a distribution where with probability $1 - \frac{1}{H} \to 1$ , the value in the first box is 0, and with probability $1 / H$ is $+\infty$ . In this case, with a small number of samples we never observe any scenario where $v_1 \neq 0$ and believe the overall cost is near 0. However, the true cost is at least $H \cdot 1 / H \geq$ and is determined by how the order of boxes is chosen when the scenario has $v_1 \neq 0$ . Without any such samples it is impossible to pick a good order.
300
+
301
+ Therefore, we proceed to analyze Variant 1 with $\mathcal{D}|_{V > \sigma}$ in the case when the box costs are similar. We show that polynomial, in the number of boxes, samples suffice to obtain an approximately-optimal algorithm, as we formally state in the following theorem. We present the case where all boxes have cost 1 but the case where the costs are polynomially bounded easily follows.
302
+
303
+ Theorem 4.1. Consider an instance of Pandora's Box with opening costs equal to 1. For any given parameters $\varepsilon, \delta > 0$ , using $m = \text{poly}(n, 1 / \varepsilon, \log(1 / \delta))$ samples from $\mathcal{D}$ , Algorithm 1 (Variant 1) obtains a $4.428 + \varepsilon$ approximation policy against the partially-adaptive optimal, with probability at least $1 - \delta$ .
304
+
305
+ To prove the theorem, we first note that variant 1 of Algorithm 1 takes a surprisingly simple form, which we call a threshold policy. It can be described by a permutation $\pi$ of visiting the boxes and a vector of thresholds $\pmb{\tau}$ that indicate when to stop. The threshold for every box corresponds to the reservation value the first time the box is opened. To analyze the sample complexity of Algorithm 1, we study a broader class of algorithms parameterized by a permutation and vector of thresholds given in Algorithm 4.
306
+
307
+ Our goal now is to show that polynomially many samples from the distribution $\mathcal{D}$ suffice to learn good parameters for Algorithm 4. We first show a Lemma that bounds the cost of the algorithm calculated in the empirical $\hat{\mathcal{D}}$ instead of the original $\mathcal{D}$ (Lemma 4.1.1), and a Lemma 4.1.2 that shows how capping the reservation values by $n / \varepsilon$ can also be done with negligible cost.
308
+
309
+ Algorithm 4: General format of PANDORA's BOX algorithm.
310
+ Input: Set of boxes, permutation $\pi$ , vector of thresholds $\pmb {\tau}\in \mathbb{R}^n$
311
+ 1 best $\leftarrow \infty$
312
+ 2 foreach $i\in [n]$ do
313
+ 3 if best $>\tau_{i}$ then
314
+ 4 Open box $\pi_i$ , see value $v_{i}$
315
+ 5 best $\leftarrow \min (\text{best},v_{i})$
316
+ 6 else
317
+ 7 Accept best
318
+ 8 end
319
+
320
+ Lemma 4.1.1. Let $\varepsilon, \delta > 0$ and let $\mathcal{D}'$ be the empirical distribution obtained from poly $(n, 1/\varepsilon, \log(1/\delta))$ samples from $\mathcal{D}$ . Then, with probability $1 - \delta$ , it holds that
321
+
322
+ $$
323
+ \left| \mathbb {E} _ {\hat {D}} \left[ A L G (\pi , \tau) - \underset {b \in \mathcal {B}} {\min } v _ {b} \right] - \mathbb {E} _ {D} \left[ A L G (\pi , \tau) - \underset {b \in \mathcal {B}} {\min } v _ {b} \right] \right| \leq \varepsilon
324
+ $$
325
+
326
+ for any permutation $\pi$ and any vector of thresholds $\mathbf{v} \in \left[0, \frac{n}{\varepsilon}\right]^n$ .
327
+
328
+ We defer the proof of Lemmas 4.1.1, 4.1.2 and that of Theorem 4.1 to Section A.5 of the Appendix.
329
+
330
+ Lemma 4.1.2. Let $\mathcal{D}$ be any distribution of values. Let $\varepsilon >0$ and consider a permutation $\pi$ and thresholds $\pmb{\tau}$ . Moreover, let $\tau^{\prime}$ be the thresholds capped to $n / \varepsilon$ , i.e. setting $\tau_b^\prime = \min \{\tau_b,n / \varepsilon \}$ for all boxes $b$ . Then,
331
+
332
+ $$
333
+ \mathbb {E} _ {v \sim D} \left[ A L G \left(\pi , \tau^ {\prime}\right) \right] \leq (1 + \varepsilon) \mathbb {E} _ {v \sim D} \left[ A L G \left(\pi , \tau\right) \right].
334
+ $$
335
+
336
+ Note on Continuous vs Discrete Distributions. The results of Section 4 apply for general distributions (discrete or continuous) and show that the partial updates variant leads to good approximation when run on the empirical distribution obtained just with polynomially many samples. In contrast, the full updates variant requires a complete description of the distribution. However, as the approximation factor does not depend on the support size, It can also apply even for continuous distributions with arbitrary large support by taking a limit over a very fine discretization
337
+
338
+ # 5 Conclusion
339
+
340
+ We present a summary of our results with a comparison to previous work on Table 1. Our main contribution was to improve the approximation factor for Pandora's Box with correlations given by Chawla et al. [2020], while also greatly simplifying their approach. Our algorithm also directly extends the independent case algorithm, giving us a unified way to solve this problem. An interesting open question is to try and improve their results for more complex combinatorial constraints, like selecting $k$ boxes (instead of one) or for selecting a basis of size $k$ , when the boxes are part of a matroid.
341
+
342
+ <table><tr><td></td><td>Approx. Factor</td><td>Learnable from Samples</td></tr><tr><td>Algorithm of Chawla et al. [2020]</td><td>9.22</td><td>Yes</td></tr><tr><td>Variant 1 (DVb&gt;σb)</td><td>4.428 (Thm 3.2)</td><td>Yes (Thm 4.1)</td></tr><tr><td>Variant 2 (DVb=v)</td><td>5.828 (Thm 3.3)</td><td>No (Sec. 4)</td></tr></table>
343
+
344
+ Table 1: Summary of our results (in bold) and comparison to previous work.
345
+
346
+ Observe also that the more natural Variant 2 seems worse than Variant 1 even though the algorithm has more accurate information through the update of the prior. Intuitively we would expect a better factor, however since the algorithm is greedy approximation, and not the optimal, the factor may not necessarily be monotone on the amount of information given. We leave as an open problem whether our analysis in Variant 2 is tight or this greedy algorithm cannot perform better under full information.
347
+
348
+ # References
349
+
350
+ Marek Adamczyk, Maxim Sviridenko, and Justin Ward. Submodular stochastic probing on matroids. Math. Oper. Res., 41(3):1022-1038, 2016. doi: 10.1287/moor.2015.0766. URL https://doi.org/10.1287/moor.2015.0766.
351
+ Yossi Azar and Iftah Gamzu. Ranking with submodular valuations. In Proceedings of the Twenty-Second Annual ACM-SIAM Symposium on Discrete Algorithms, SODA 2011, San Francisco, California, USA, January 23-25, 2011, pages 1070-1079, 2011. doi: 10.1137/1.9781611973082.81. URL https://doi.org/10.1137/1.9781611973082.81.
352
+ Yossi Azar, Iftah Gamzu, and Xiaoxin Yin. Multiple intents re-ranking. In Proceedings of the 41st Annual ACM Symposium on Theory of Computing, STOC 2009, Bethesda, MD, USA, May 31 - June 2, 2009, pages 669-678, 2009. doi: 10.1145/1536414.1536505. URL https://doi.org/10.1145/1536414.1536505.
353
+ Nikhil Bansal, Anupam Gupta, and Ravishankar Krishnaswamy. A constant factor approximation algorithm for generalized min-sum set cover. In Proceedings of the Twenty-First Annual ACM-SIAM Symposium on Discrete Algorithms, SODA 2010, Austin, Texas, USA, January 17-19, 2010, pages 1539-1545, 2010. doi: 10.1137/1.9781611973075.125. URL https://doi.org/10.1137/1.9781611973075.125.
354
+ Curtis Bechtel, Shaddin Dughmi, and Neel Patel. Delegated pandas box. In David M. Pennock, Ilya Segal, and Sven Seuken, editors, EC '22: The 23rd ACM Conference on Economics and Computation, Boulder, CO, USA, July 11 - 15, 2022, pages 666-693. ACM, 2022. doi: 10.1145/3490486.3538267. URL https://doi.org/10.1145/3490486.3538267.
355
+ Hedyeh Beyhaghi and Linda Cai. Pandora's problem with nonobligatory inspection: Optimal structure and a PTAS. In Barna Saha and Rocco A. Servedio, editors, Proceedings of the 55th Annual ACM Symposium on Theory of Computing, STOC 2023, Orlando, FL, USA, June 20-23, 2023, pages 803-816. ACM, 2023a. doi: 10.1145/3564246.3585217. URL https://doi.org/10.1145/3564246.3585217.
356
+ Hedyeh Beyhaghi and Linda Cai. Recent developments in pandas box problem: Variants and applications. SIGcom Exch., 20(1), 2023b.
357
+ Hedyeh Beyhaghi and Robert Kleinberg. Pandora's problem with nonobligatory inspection. In Anna Karlin, Nicole Immorlica, and Ramesh Johari, editors, Proceedings of the 2019 ACM Conference on Economics and Computation, EC 2019, Phoenix, AZ, USA, June 24-28, 2019, pages 131-132. ACM, 2019. doi: 10.1145/3328526.3329626. URL https://doi.org/10.1145/3328526.3329626.
358
+ Aditya Bhaskara, Sreenivas Gollapudi, Kostas Kollias, and Kamesh Munagala. Adaptive probing policies for shortest path routing. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/ hash/62da5a6d47be0029801ba74a17e47e1a-Abstraction.html.
359
+ Anselm Blumer, Andrzej Ehrenfeucht, David Haussler, and Manfred K. Warmuth. Learnability and the vapnik-chervonenkis dimension. J. ACM, 36(4):929-965, 1989. doi: 10.1145/76359.76371. URL https://doi.org/10.1145/76359.76371.
360
+ Shant Boodaghians, Federico Fusco, Philip Lazos, and Stefano Leonardi. Pandora's box problem with order constraints. In Péter Biró, Jason D. Hartline, Michael Ostrovsky, and Ariel D. Procaccia, editors, EC '20: The 21st ACM Conference on Economics and Computation, Virtual Event, Hungary, July 13-17, 2020, pages 439-458. ACM, 2020. doi: 10.1145/3391403.3399501. URL https://doi.org/10.1145/3391403.3399501.
361
+ Nicolò Cesa-Bianchi, Tommaso Cesari, Yishay Mansour, and Vianney Perchet. A new theoretical framework for fast and accurate online decision-making. In Marc'Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan, editors, Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual,
362
+
363
+ pages 9152-9166, 2021. URL https://proceedings.neurips.cc/paper/2021/bash/4c4ea5258ef3fb3fb1fc48fee9b4408c-AbsAbstract.html.
364
+ Moses Charikar, Ronald Fagin, Venkatesan Guruswami, Jon M. Kleinberg, Prabhakar Raghavan, and Amit Sahai. Query strategies for priced information (extended abstract). In Proceedings of the Thirty-Second Annual ACM Symposium on Theory of Computing, May 21-23, 2000, Portland, OR, USA, pages 582-591, 2000. doi: 10.1145/335305.335382. URL https://doi.org/10.1145/335305.335382.
365
+ Shuchi Chawla, Evangelia Gergatsouli, Yifeng Teng, Christos Tzamos, and Ruimin Zhang. Pandora's box with correlations: Learning and approximation. In Sandy Irani, editor, 61st IEEE Annual Symposium on Foundations of Computer Science, FOCS 2020, Durham, NC, USA, November 16-19, 2020, pages 1214-1225. IEEE, 2020. doi: 10.1109/FOCS46700.2020.00116. URL https://doi.org/10.1109/FOCS46700.2020.00116.
366
+ Shuchi Chawla, Evangelia Gergatsouli, Jeremy McMahan, and Christos Tzamos. Approximating Pandora's box with correlations. CoRR, abs/2108.12976, 2021. URL https://arxiv.org/abs/2108.12976.
367
+ Yuxin Chen, S. Hamed Hassani, Amin Karbasi, and Andreas Krause. Sequential information maximization: When is greedy near-optimal? In Proceedings of The 28th Conference on Learning Theory, COLT 2015, Paris, France, July 3-6, 2015, pages 338-363, 2015a. URL http://proceedings.mlr.press/v40/Chen15b.html.
368
+ Yuxin Chen, Shervin Javdani, Amin Karbasi, J. Andrew Bagnell, Siddhartha S. Srinivasa, and Andreas Krause. Submodular surrogates for value of information. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, January 25-30, 2015, Austin, Texas, USA., pages 3511-3518, 2015b. URL http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/view/9841.
369
+ Laura Doval. Whether or not to open pandas box. J. Econ. Theory, 175:127-158, 2018. doi: 10.1016/j-jet.2018.01.005. URL https://doi.org/10.1016/j-jet.2018.01.005.
370
+ Hossein Esfandiari, Mohammad Taghi Hajiaghayi, Brendan Lucier, and Michael Mitzenmacher. Online Pandora's boxes and bandits. In The Thirty-Third AAAI Conference on Artificial Intelligence, AAAI 2019, The Thirty-First Innovative Applications of Artificial Intelligence Conference, IAAI 2019, The Ninth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2019, Honolulu, Hawaii, USA, January 27 - February 1, 2019, pages 1885-1892. AAAI Press, 2019. doi: 10.1609/aaai.v33i01.33011885. URL https://doi.org/10.1609/aaai.v33i01.33011885.
371
+ Uriel Feige. A threshold of ln n for approximating set cover. J. ACM, 45(4):634-652, July 1998. ISSN 0004-5411. doi: 10.1145/285055.285059. URL http://doi.acm.org/10.1145/285055.285059.
372
+ Uriel Feige, László Lovász, and Prasad Tetali. Approximating min-sum set cover. In Approximation Algorithms for Combinatorial Optimization, 5th International Workshop, APPROX 2002, Rome, Italy, September 17-21, 2002, Proceedings, pages 94-107, 2002. doi: 10.1007/3-540-45753-4_10. URL https://doi.org/10.1007/3-540-45753-4_10.
373
+ Uriel Feige, László Lovász, and Prasad Tetali. Approximating min sum set cover. Algorithmica, 40 (4):219-234, 2004.
374
+ Hu Fu, Jiawei Li, and Daogao Liu. Pandora box problem with nonobligatory inspection: Hardness and approximation scheme. In Barna Saha and Rocco A. Servedio, editors, Proceedings of the 55th Annual ACM Symposium on Theory of Computing, STOC 2023, Orlando, FL, USA, June 20-23, 2023, pages 789-802. ACM, 2023. doi: 10.1145/3564246.3585229. URL https://doi.org/10.1145/3564246.3585229.
375
+ Evangelia Gergatsouli and Christos Tzamos. Online learning for min sum set cover and pandas box. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvári, Gang Niu, and Sivan Sabato, editors, International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, volume 162 of Proceedings of Machine Learning Research, pages 7382-7403. PMLR, 2022. URL https://proceedings.mlr.press/v162/gergatsouli22a.html.
376
+
377
+ Ashish Goel, Sudipto Guha, and Kamesh Munagala. Asking the right questions: model-driven optimization using probes. In Proceedings of the Twenty-Fifth ACM SIGACT-SIGMOD-SIGART Symposium on Principles of Database Systems, June 26-28, 2006, Chicago, Illinois, USA, pages 203-212, 2006. doi: 10.1145/1142351.1142380. URL https://doi.org/10.1145/1142351.1142380.
378
+ Chenghao Guo, Zhiyi Huang, Zhihao Gavin Tang, and Xinzhi Zhang. Generalizing complex hypotheses on product distributions: Auctions, prophet inequalities, and pandas' problem. In Mikhail Belkin and Samory Kpotufe, editors, Conference on Learning Theory, COLT 2021, 15-19 August 2021, Boulder, Colorado, USA, volume 134 of Proceedings of Machine Learning Research, pages 2248-2288. PMLR, 2021. URL http://proceedings.mlr.press/v134/guo21a.html.
379
+ Anupam Gupta and Amit Kumar. Sorting and selection with structured costs. In 42nd Annual Symposium on Foundations of Computer Science, FOCS 2001, 14-17 October 2001, Las Vegas, Nevada, USA, pages 416-425, 2001. doi: 10.1109/SFCS.2001.959916. URL https://doi.org/10.1109/SFCS.2001.959916.
380
+ Anupam Gupta and Viswanath Nagarajan. A stochastic probing problem with applications. In *Integer Programming and Combinatorial Optimization* - 16th International Conference, IPCO 2013, Valparaíso, Chile, March 18-20, 2013. Proceedings, pages 205-216, 2013. doi: 10.1007/978-3-642-36694-9\_18. URL https://doi.org/10.1007/978-3-642-36694-9_18.
381
+ Anupam Gupta, Viswanath Nagarajan, and Sahil Singla. Algorithms and adaptivity gaps for stochastic probing. In Proceedings of the Twenty-Seventh Annual ACM-SIAM Symposium on Discrete Algorithms, SODA 2016, Arlington, VA, USA, January 10-12, 2016, pages 1731-1747, 2016. doi: 10.1137/1.9781611974331.ch120. URL https://doi.org/10.1137/1.9781611974331.ch120.
382
+ Anupam Gupta, Viswanath Nagarajan, and Sahil Singla. Adaptivity gaps for stochastic probing: Submodular and XOS functions. In Proceedings of the Twenty-Eighth Annual ACM-SIAM Symposium on Discrete Algorithms, SODA 2017, Barcelona, Spain, Hotel Porta Fira, January 16-19, pages 1688-1702, 2017. doi: 10.1137/1.9781611974782.111. URL https://doi.org/10.1137/1.9781611974782.111.
383
+ Anupam Gupta, Haotian Jiang, Ziv Scully, and Sahil Singla. The markovian price of information. In *Integer Programming and Combinatorial Optimization* - 20th International Conference, IPCO 2019, Ann Arbor, MI, USA, May 22-24, 2019, Proceedings, pages 233-246, 2019. doi: 10.1007/978-3-030-17953-3_18. URL https://doi.org/10.1007/978-3-030-17953-3_18.
384
+ Sungjin Im, Maxim Sviridenko, and Ruben Van Der Zwaan. Preemptive and non-preemptive generalized min sum set cover. Mathematical Programming, 145(1-2):377-401, 2014.
385
+ Brian McCall and John McCall. The economics of search. Routledge, 2007.
386
+ Sahil Singla. The price of information in combinatorial optimization. In Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, SODA 2018, New Orleans, LA, USA, January 7-10, 2018, pages 2523-2532, 2018. doi: 10.1137/1.9781611975031.161. URL https://doi.org/10.1137/1.9781611975031.161.
387
+ Martin Skutella and David P. Williamson. A note on the generalized min-sum set cover problem. Oper. Res. Lett., 39(6):433-436, 2011. doi: 10.1016/j.orl.2011.08.002. URL https://doi.org/10.1016/j.orl.2011.08.002.
388
+ Martin L Weitzman. Optimal Search for the Best Alternative. *Econometrica*, 47(3):641-654, May 1979.
389
+
390
+ # A Appendix
391
+
392
+ # A.1 Supplemental Preliminaries
393
+
394
+ Updating the prior. We include an example showing the process of updating the prior for our two different updating rules. The (correlated) distribution is a set of vectors of size $n$ , where each is drawn with some probability. When we open a box and see a value, some scenarios are not "possible" anymore, i.e. we know they cannot be the ones realized. We illustrate in the following example. Assume there are 3 of these vectors (scenarios).
395
+
396
+ <table><tr><td></td><td>b1</td><td>b2</td><td>b3</td></tr><tr><td>S1</td><td>3</td><td>4</td><td>7</td></tr><tr><td>S2</td><td>6</td><td>4</td><td>2</td></tr><tr><td>S3</td><td>7</td><td>7</td><td>2</td></tr></table>
397
+
398
+ Table 2: Example with 3 scenarios and 3 boxes.
399
+
400
+ The rows in the matrix above are the scenarios, and the columns are the boxes. For example, if scenario $S_{2}$ is the one realized (i.e. drawn from the distribution) then the values inside boxes $b_{1}, b_{2}$ and $b_{3}$ are 6, 4 and 2 respectively. The distribution $\mathcal{D}$ is essentially drawing one of the scenarios with some probability.
401
+
402
+ To see what the conditioning means: assume we open box $b_{1}$ and we see the value 6 (and assume for the sake of the example that the reservation value of box 1 is $\sigma_{1} = 5$ ).
403
+
404
+ - Variant 1: we condition on $6 = V_{b} > \sigma_{1} = 5$ meaning that scenario $S_{1}$ is not possible anymore (because if $S_{1}$ was the one drawn from $\mathcal{D}$ , then we would have seen a value less than $\sigma_{1} = 5$ when opening the box), and is removed from the set S the algorithm considers (line 9, Alg 2)
405
+ - Variant 2: we condition on $V_{b} = 6$ , which means that scenarios $S_{1}$ and $S_{3}$ are both removed (similarly, because if any of these were drawn, we would not have seen 6 upon opening the box)
406
+
407
+ Differences in the variants. As a result of the different conditioning, the solution for the $V_{b} > \sigma$ variant is partially adaptive meaning that the next box the algorithm opens, only depends on the scenarios that remain. However, for the $V_{b} = v$ variant the solution is fully adaptive (meaning that the next box opened, depends on the exact value seen). This is illustrated in Figures 2 and 4 in the Appendix, where Variant 1's solution can be represented by a line graph (Figure 2), while Variant 2's solution is a tree (Figure 4).
408
+
409
+ # A.2 Proofs from Section 3
410
+
411
+ ![](images/165a484838fff5ca6cba0bfcf944af18d655a37fa6e3a664d55f28d302127ea2.jpg)
412
+ Figure 2: Algorithm's solution when $\mathcal{D} \gets \mathcal{D}_{V > \sigma}$ , for an instance with scenarios $\mathcal{S} = \{s_1, s_2, s_3\}$ and boxes $\mathcal{B} = \{b_1, b_2, b_3, b_4\}$ . The circles contain the scenarios that have not stopped at each step. Scenario $s_1$ stopped at box $b_2$ , scenario $s_2$ stopped at box $b_1$ and $s_3$ at box $b_4$ .
413
+
414
+ Theorem 3.2. Algorithm 1 is a 4.428-approximation for PANDORA's BOX against the partially-adaptive optimal, when conditioning on $V_{b} > \sigma_{b}$ .
415
+
416
+ The tighter guarantee proof follows the steps of the proof in section 3.1 for the opening cost, but provides a tighter analysis for the values of cost.
417
+
418
+ Tight proof of Theorem 3.2. Denote by $\sigma_s$ the reservation value for scenario $s$ when it was covered by ALG and by $\mathcal{T}$ the set of boxes opened i.e. the steps taken by the algorithm. Then we can write the cost paid by the algorithm as follows
419
+
420
+ $$
421
+ \mathrm {A L G} = \frac {1}{| \mathcal {S} |} \sum_ {s \in \mathcal {S}} \sigma_ {s} = \frac {1}{| \mathcal {S} |} \sum_ {p \in \mathcal {T}} \left| A _ {t} \right| \sigma_ {p}. \tag {8}
422
+ $$
423
+
424
+ We use the same notation as section 3.1 which we repeat here for convenience. Consider any point $p$ in the ALG histogram, and let $s$ be its corresponding scenario and $t$ be the time this scenario is covered.
425
+
426
+ - $R_{t}$ : set of uncovered scenarios at step $t$
427
+ - $A_{t}$ : set of scenarios that ALG chooses to cover at step $t$
428
+ - $c^*$ : the opening cost such that $\gamma |R_t|$ of the scenarios in $R_t$ have opening cost less than $c^*$ .
429
+ - $R_{\mathrm{low}} = \{s \in R_t : c_s^{\mathrm{OPT}} \leq c^*\}$ the set of these scenarios
430
+ - $v^{*}$ : the value of scenarios in $R_{\mathrm{low}}$ such that $b|R_{\mathrm{low}}|$ of the scenarios have value less than $v^{*}$ .
431
+ - $L = \{ s \in R_{\mathrm{low}} : v_s^{\mathrm{OPT}} \leq v^* \}$ the set of scenarios with value at most $v^*$ .
432
+ - $B_L$ : set of boxes the optimal uses to cover the scenarios in $L$ of step $t$
433
+
434
+ The split described in the definitions above is again shown in Figure 3, and the constants $1 > \beta, \gamma > 0$ will be determined in the end of the proof.
435
+
436
+ ![](images/367da186962e3838bd0e0da94840646d284f3dad9ff84760eea68dd1fed1a08d.jpg)
437
+ Figure 3: Split of scenarios in $R_{t}$ .
438
+
439
+ Continuing from equation (8) we obtain the following.
440
+
441
+ $$
442
+ \begin{array}{l} \mathrm {A L G} \leq \frac {1}{| \mathcal {S} |} \sum_ {t \in \mathcal {T}} \left| A _ {t} \right| \frac {\left| R _ {t} \right| \sum_ {b \in B _ {L}} c _ {b} + \sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |} \quad \text {I n e q u a l i t y 5} \\ \leq \frac {1}{| \mathcal {S} |} \sum_ {t \in \mathcal {T}} \left(| A _ {t} | \frac {c ^ {*}}{\beta \gamma} + \frac {\sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |}\right) \quad \text {I n e q .} 5 \text {a n d} | L | = \gamma \beta | R _ {t} | \\ \leq \frac {\mathrm {O P T} _ {o}}{\beta \gamma (1 - \gamma)} \sum_ {t \in \mathcal {T}} \frac {| A _ {t} |}{| \mathcal {S} |} + \sum_ {t \in \mathcal {T}} \frac {| A _ {t} |}{| \mathcal {S} |} \frac {\sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |} \quad \text {S i n c e} c ^ {*} \leq \mathrm {O P T} _ {o} / (1 - \gamma) \\ = \frac {\mathrm {O P T} _ {o}}{\beta \gamma (1 - \gamma)} + \sum_ {p \in \mathcal {T}} \frac {| A _ {t} |}{| S |} \frac {\sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |} \quad \text {S i n c e} \sum_ {t} | A _ {t} | = | S | \\ \end{array}
443
+ $$
444
+
445
+ Where in the second to last inequality we used the same histogram argument from section 3.1, to bound $c^*$ by $\mathrm{OPT}_o / (1 - \gamma)$ .
446
+
447
+ To bound the values term, observe that if we sorted the optimal values $v_{s}^{\mathrm{OPT}}$ that cover each scenario by decreasing order, and denote $j_{s}$ the index of $v_{s}^{\mathrm{OPT}}$ in this ordering, we add $v_{s}^{\mathrm{OPT}}$ multiplied by
448
+
449
+ the length of the interval every time $j_{s} \in \left[(1 - \beta)\gamma |R_{t}|, \gamma |R_{t}|\right]$ . This implies that the length of the intervals we sum up for $v_{s}^{\mathrm{OPT}}$ ranges from $j_{s} / \gamma$ to $j_{s} / ((1 - \beta)\gamma)$ , therefore the factor for each $v_{s}^{\mathrm{OPT}}$ is
450
+
451
+ $$
452
+ \frac {1}{\gamma} \sum_ {i = j _ {s} / \gamma} ^ {j _ {s} / (1 - \beta) \gamma} \frac {1}{i} \leq \frac {1}{\gamma} \log \left(\frac {1}{1 - \beta}\right)
453
+ $$
454
+
455
+ We want to balance the terms $1 / (\beta \gamma (1 - \gamma))$ and $1 / \gamma \log (1 / (1 - \beta))$ which gives that
456
+
457
+ $$
458
+ \gamma = 1 - \frac {1}{\beta \log \left(\frac {1}{1 - \beta}\right)}.
459
+ $$
460
+
461
+ Since we balanced the opening cost and value terms, by substituting the expression for $\gamma$ we get that the approximation factor is
462
+
463
+ $$
464
+ \frac {1}{\beta \gamma (1 - \gamma)} = \frac {\beta \log^ {2} \left(\frac {1}{1 - \beta}\right)}{\beta \log \left(\frac {1}{1 - \beta}\right) - 1}.
465
+ $$
466
+
467
+ Numerically minimizing that ratio for $\beta$ and ensuring that $0 < \beta, \gamma < 1$ we get that the minimum is 4.428 obtained at $\beta \approx 0.91$ and $\gamma \approx 0.55$ .
468
+
469
+ # A.3 Proofs from Section 3.2
470
+
471
+ ![](images/1334780b9d9fae3dd4b5c50723736f4007a6bdc4120297a5876e0a88aa6b5543.jpg)
472
+ Figure 4: Algorithm's solution when conditioning on $V = v$ , for an instance with scenarios $S = \{s_1, s_2, s_3\}$ , and boxes $\mathcal{B} = \{b_1, b_2\}$ . The nodes contain the consistent scenarios at each step, and the values $V$ are revealed once we open the corresponding box.
473
+
474
+ Theorem 3.3. Algorithm 1 is a $3 + 2\sqrt{2} \approx 5.828$ -approximation for PANDORA's BOX against the partially-adaptive optimal, when conditioning on $V_{b} = v$ .
475
+
476
+ Continued proof of Theorem 3.3. We now proceed to give the bound on the weights of the nodes of $\mathcal{T}_{\mathrm{ALG}}$ . Consider any node $u$ . We have that the weights at this node are equal to
477
+
478
+ $$
479
+ \sigma_ {u} = \frac {c _ {b _ {u}} | R _ {u} | + \sum_ {s \in A _ {t}} v _ {b _ {u}} ^ {s}}{| A _ {t} |} \leq \frac {c _ {b} | R _ {u} | + \sum_ {s \in A} v _ {b} ^ {s}}{| A |}
480
+ $$
481
+
482
+ where the last inequality holds for all $A \subseteq R_u$ and any $b \in \mathcal{B}$ .
483
+
484
+ Let $c_{u}^{*}$ be the opening cost such that $\gamma |R_u|$ of the scenarios in $R_{u}$ have opening cost less than $c_{u}^{*}$ , and by $R_{\mathrm{low}} = \{s\in R_u:c_s^{\mathrm{OPT}}\leq c_u^*\}$ the set of these scenarios. Similarly denote by $v_{u}^{*}$ the value of scenarios in $R_{\mathrm{low}}$ such that $\beta |R_{\mathrm{low}}|$ of the scenarios have value less than $v_{u}^{*}$ and by $L = \{s\in R_{\mathrm{low}}^p:v_s^{\mathrm{OPT}}\leq v_u^*\}$ these scenarios. This split is shown in Figure 1.
485
+
486
+ Note that, $c_{u}^{*}$ corresponds to the weights of node $u$ in $\mathcal{T}_{\mathrm{OPT}_o}^{(1 - \gamma)}$ , while the weights of node $u$ at $\mathcal{T}_{\mathrm{OPT}_v}^{(1 - \gamma)}$ are at least $v_{u}^{*}$ .
487
+
488
+ Let $B_{L}$ be the set of boxes that the optimal solution uses to cover the scenarios in $L$ . Let $L_{b} \subseteq L \subseteq R_{u}$ be the subset of scenarios in $L$ that choose the value at box $b$ in OPT. Using inequality (4) with $b \in B_{L}$ and $A = L_{b}$ , we obtain $\sigma_{u}|L_{b}| \leq c_{b}|R_{u}| + \sum_{s \in L_{b}} v_{s}^{\mathrm{OPT}}$ , and by summing up the inequalities for all $b \in B_{L}$ we get
489
+
490
+ $$
491
+ \sigma_ {u} \leq \frac {\left| R _ {u} \right| \sum_ {b \in B _ {L}} c _ {b} + \sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |} \leq \frac {\left| R _ {u} \right| c ^ {*} + \sum_ {s \in L} v _ {s} ^ {\mathrm {O P T}}}{| L |} \leq \frac {c _ {u} ^ {*}}{\beta \cdot \gamma} + v _ {u} ^ {*} \tag {9}
492
+ $$
493
+
494
+ where for the second inequality we used that the cost for covering the scenarios in $L$ is at most $c_u^*$ by construction, and in the last inequality that $|L| = |R_t| / (\beta \cdot \gamma)$ . We consider each term above separately, to show that the point $p$ is within the histograms.
495
+
496
+ Lemma 3.3.1. Let $\mathcal{T}$ be a tree with a vector of weights $w_{u}$ at each node $u\in \mathcal{T}$ , and let $\mathcal{T}^{(\rho)}$ be the tree we get when we substitute the weights of every node with the top $\rho$ -percentile of all the weights in the subtree of $\mathcal{T}$ rooted at $u$ . Then
497
+
498
+ $$
499
+ \rho \cdot c o s t (\mathcal {T} ^ {(\rho)}) \leq c o s t (\mathcal {T}).
500
+ $$
501
+
502
+ Proof of Lemma 3.3.1. We denote by $\mathcal{T}_u$ the subtree rooted at $u$ , by $W(\mathcal{T}) = \{w : w \in \mathbf{w}_v \text{ for } v \in \mathcal{T}\}$ the (multi)set of weights in the tree $\mathcal{T}$ . Denote, by $q^\rho(\mathcal{T})$ be the top $\rho$ percentile of all the weights in $\mathcal{T}$ . Finally, we define $Q(\rho|\mathcal{T})$ for any tree $\mathcal{T}$ as follows:
503
+
504
+ - We create a histogram $H(x)$ of the weights in $W(\mathcal{T})$ in increasing order.
505
+ - We calculate the area enclosed within $(1 - \rho)|W(\mathcal{T})|$ until $|W(\mathcal{T})|$ :
506
+
507
+ $$
508
+ Q \left(\rho | \mathcal {T}\right) = \int_ {(1 - \rho) | W (\mathcal {T}) |} ^ {| W (\mathcal {T}) |} H (x) d x
509
+ $$
510
+
511
+ This is approximately equal to the sum of all the values greater than $q^{\rho}(\mathcal{T})$ with values exactly $q^{\rho}(\mathcal{T})$ taken fractionally so that exactly $\rho$ fraction of values are selected.
512
+
513
+ We show by induction that for every node $u$ , it holds that $\rho \cdot \mathrm{cost}(\mathcal{T}_u^{(\rho)}) \leq Q(\rho|\mathcal{T})$
514
+
515
+ - For the base case, for all leaves $u$ , the subtree $\mathcal{T}_u$ only has one node and the lemma holds as $\rho q^{\rho}(\mathcal{T}_u) \leq Q(\rho |\mathcal{T}_u)$ .
516
+ - Now, let $r$ be any node of the tree, and denote by $\operatorname{child}(r)$ the set of the children nodes of $r$ .
517
+
518
+ $$
519
+ \begin{array}{l} \rho \cdot \operatorname {c o s t} \left(\mathcal {T} _ {r} ^ {(\rho)}\right) = \rho \cdot q ^ {\rho} \left(\mathcal {T} _ {r}\right) \left| \boldsymbol {w} _ {r} \right| + \rho \cdot \sum_ {v \in \operatorname {c h i l d} (r)} \operatorname {c o s t} \left(\mathcal {T} _ {v} ^ {(\rho)}\right) \quad \text {D e f i n i t i o n} \\ \leq \rho \cdot q ^ {\rho} \left(\mathcal {T} _ {r}\right) \left| \boldsymbol {w} _ {r} \right| + \rho \cdot \sum_ {v \in \operatorname {c h i l d} (r)} Q (\rho | T _ {v}) \quad \text {F r o m i n d u c t i o n h y p o t h e s i s} \\ \leq \rho \cdot q ^ {\rho} (\mathcal {T} _ {r}) | \boldsymbol {w} _ {r} | + Q \left(\rho \frac {| W (\mathcal {T} _ {r}) | - | \boldsymbol {w} _ {r} |}{| W (\mathcal {T} _ {r}) |} \Bigg | T _ {r}\right) \quad \text {S i n c e} \mathcal {T} _ {v} \subseteq T _ {r} \\ \leq Q \left(\rho | T _ {r}\right) \\ \end{array}
520
+ $$
521
+
522
+ The second-to-last inequality follows since $Q$ is defined as the area of the largest weights of the histogram. Including more weights only increases and keeping the length of the integration range the same (equal to $\rho(|W(\mathcal{T}_r)| - |\mathbf{w}_r|)$ ) can only increase the value $Q$ .
523
+
524
+ The last inequality follows by noting that if $H(x)$ is the histogram corresponding to the values of $\mathcal{T}_r$ , then
525
+
526
+ $$
527
+ Q \left(\rho \mid T _ {r}\right) - Q \left(\rho \frac {\left| W \left(\mathcal {T} _ {r}\right) \right| - \left| \boldsymbol {w} _ {r} \right|}{\left| W \left(\mathcal {T} _ {r}\right) \right|} \Bigg | T _ {r}\right) = \int_ {(1 - \rho) \mid W \left(\mathcal {T} _ {r}\right)} ^ {\left| W \left(\mathcal {T} _ {r}\right) \right|} H (x) d x - \int_ {(1 - \rho) \mid W \left(\mathcal {T} _ {r}\right) + \rho \mid \boldsymbol {w} _ {r} \mid} ^ {\left| W \left(\mathcal {T} _ {r}\right) \right|} H (x) d x
528
+ $$
529
+
530
+ $$
531
+ \begin{array}{l} = \int_ {(1 - \rho) | W (\mathcal {T} _ {r}) |} ^ {(1 - \rho) | W (\mathcal {T} _ {r}) | + \rho | \boldsymbol {w} _ {r} |} H (x) d x \geq \int_ {(1 - \rho) | W (\mathcal {T} _ {r}) |} ^ {(1 - \rho) | W (\mathcal {T} _ {r}) | + \rho | \boldsymbol {w} _ {r} |} q ^ {\rho} (\mathcal {T} _ {r}) d x \\ = \rho q ^ {\rho} \left(\mathcal {T} _ {r}\right) | \boldsymbol {w} _ {r} | \\ \end{array}
532
+ $$
533
+
534
+ where the inequality follows since $H(x) \geq q^{\rho}(\mathcal{T}_r)$ for $x \geq (1 - \rho)|W(\mathcal{T}_r)|$ by the definition of $q^{\rho}(\mathcal{T}_r)$ as the top- $r$ quantile of the weights in $\mathcal{T}_r$ .
535
+
536
+ ![](images/9e1523aa02c2b3a601f7527a5108af1baed9907cfc5972a5a2fd36437dd9ab69.jpg)
537
+ Figure 5: Picture depicting the proof above.
538
+
539
+ ![](images/557657357b51eaf6531aef9563cb3afac4fcbd2a356d84f6fbdeaf44f77b5181.jpg)
540
+
541
+ # A.4 Lower Bound
542
+
543
+ To show that our algorithm is almost tight, we observe that the lower bound of Min Sum Set Cover presented in Feige et al. [2004] also applies to PANDORA's BOX. In MIN SUM SET COVER we are given $n$ elements $e_i$ , and $m$ sets $s_j$ where each $s_j \subseteq [n]$ . We say a set $s_j$ covers an element $e_i$ if $e_i \in s_j$ . The goal is to select elements in order to minimize the sum of the covering times of all the sets, where covering time of a set is the first time an element $e_i \in s_j$ is chosen. This lower bound is also mentioned in Chawla et al. [2020], but we include it here with more details for the sake of completeness.
544
+
545
+ In Feige et al. [2004] the authors show that MIN SUM SET COVER cannot be approximated better than $4 - \varepsilon$ even in the special case where every set contains the same number of elements<sup>5</sup>. We restate the theorem below.
546
+
547
+ Theorem A.1 (Theorem 13 of Feige et al. [2004]). For every $\varepsilon > 0$ , it is NP-hard to approximate min sum set cover within a ratio of $4 - \varepsilon$ on uniform hypergraphs.
548
+
549
+ Our main observation is that MIN SUM SET COVER is a special case of PANDORA'S BOX. When the boxes all have the same opening cost $c_{b} = 1$ and the values inside are $v_{s}^{b} \in \{0, \infty\}$ , we are required to find a 0 for each scenario; equivalent to covering a scenario. The optimal solution of MIN SUM SET COVER is an algorithm that selects elements one by one, and stops whenever all the sets are covered. This is exactly the partially adaptive optimal we defined for PANDORA'S BOX. The theorem restated above results in the following Corollary.
550
+
551
+ Corollary A.1.1. For every $\varepsilon > 0$ it is NP-Hard to approximate Pandora's Box against the partially-adaptive within a ratio better than $4 - \varepsilon$ .
552
+
553
+ # A.5 Proofs from Section 4
554
+
555
+ We first present an example of a discrete distribution that shows that one needs exponentially many samples in the number of boxes to learn $D_{V = v}$ .
556
+
557
+ Discrete Distribution Example Consider a distribution that only takes values $0, H, H + 1$ for some very large $H > 0$ . The scenario is drawn by choosing a random bit $b_i \in \{0, 1\}$ for every box
558
+
559
+ and depending on the realized sequence $\mathbf{b}$ a single box $f(\mathbf{b}) \in [n]$ is chosen for an unknown and arbitrary function $f$ . The value at box $i$ is then chosen to be $H + b_{i}$ unless $i$ is the box $f(\mathbf{b})$ in which case it is 0. In this case learning the probability $D_{V = v}$ would require learning the unknown function $f$ on all inputs which are exponentially many. In particular, if we only take $s < 2^{n}$ samples, for any order of choosing boxes after $\approx \log s$ steps, none of the samples in our collection will match the observed sequence of bits, therefore it will not be possible to compute a posterior distribution.
560
+
561
+ We continue by giving the omitted proofs.
562
+
563
+ Lemma 4.1.1. Let $\varepsilon, \delta > 0$ and let $\mathcal{D}'$ be the empirical distribution obtained from poly $(n, 1/\varepsilon, \log(1/\delta))$ samples from $\mathcal{D}$ . Then, with probability $1 - \delta$ , it holds that
564
+
565
+ $$
566
+ \left| \mathbb {E} _ {\hat {D}} \left[ A L G (\pi , \tau) - \min _ {b \in \mathcal {B}} v _ {b} \right] - \mathbb {E} _ {D} \left[ A L G (\pi , \tau) - \min _ {b \in \mathcal {B}} v _ {b} \right] \right| \leq \varepsilon
567
+ $$
568
+
569
+ for any permutation $\pi$ and any vector of thresholds $\pmb{v} \in \left[0, \frac{n}{\varepsilon}\right]^n$
570
+
571
+ Proof of Lemma 4.1.1. We first argue that we can accurately estimate the cost for any vector of thresholds $\pmb{\tau}$ when the order of visiting boxes is fixed.
572
+
573
+ Consider any fixed permutation $\pi = \pi_1, \pi_2, \ldots, \pi_n$ be any permutation of the boxes, we relabel the boxes without loss of generality so that $\pi_i$ is box $i$ .
574
+
575
+ Denote by $\hat{V}_i = \min_{j\leq i}v_j$ , and observe that $\hat{V}_i$ is a random variable that depends on the distribution $\mathcal{D}$ . Then we can write the expected cost of the algorithm as the expected sum of the opening cost and the chosen value: $\mathbb{E}_{\mathcal{D}}[\mathrm{ALG}] = \mathbb{E}_{\mathcal{D}}[\mathrm{ALG}_o] + \mathbb{E}_{\mathcal{D}}[\mathrm{ALG}_v]$ . We have that:
576
+
577
+ $$
578
+ \mathbb {E} _ {\mathcal {D}} \left[ \mathrm {A L G} _ {o} \right] = \sum_ {i = 1} ^ {n} \mathbf {P r} _ {\mathcal {D}} [ \text {r e a c h} i ] = \sum_ {i = 1} ^ {n} \mathbf {P r} _ {\mathcal {D}} \left[ \bigwedge_ {j = 1} ^ {i - 1} \left(\hat {V} _ {j} > \tau_ {j + 1}\right) \right]
579
+ $$
580
+
581
+ Moreover, we denote by $\overline{V}_{\tau}^{i} = \bigwedge_{j=1}^{i-1}\left(\hat{V}_{j} > \tau_{j+1}\right)$ and we have
582
+
583
+ $$
584
+ \begin{array}{l} \mathbb {E} _ {\mathcal {D}} \left[ \mathrm {A L G} _ {v} - \hat {V} _ {n} \right] = \sum_ {i = 1} ^ {n} \mathbb {E} _ {\mathcal {D}} \left[ \left(\hat {V} _ {i} - \hat {V} _ {n}\right) \cdot \mathbb {1} \{\text {s t o p a t} i \} \right] \\ = \sum_ {i = 1} ^ {n - 1} \mathbb {E} _ {\mathcal {D}} \left[ \left(\hat {V} _ {i} - \hat {V} _ {n}\right) \cdot \mathbb {1} \left\{\overline {{V}} _ {\tau} ^ {i} \wedge \left(\hat {V} _ {i} \leq \tau_ {i + 1}\right) \right\} \right] \\ = \sum_ {i = 1} ^ {n - 1} \mathbb {E} _ {\mathcal {D}} \left[ \tau_ {i + 1} \mathbf {P r} _ {r \sim U [ 0, \tau_ {i + 1} ]} \left[ r < \hat {V} _ {i} - \hat {V} _ {n} \right] \cdot \mathbb {1} \left\{\overline {{V}} _ {\tau} ^ {i} \wedge (\hat {V} _ {i} \leq \tau_ {i + 1}) \right\} \right] \\ = \sum_ {i = 1} ^ {n - 1} \tau_ {i + 1} \mathbf {P r} _ {\mathcal {D}, r \sim U [ 0, \tau_ {i + 1} ]} \left[ \bar {V} _ {\tau} ^ {i} \wedge \left(r + \hat {V} _ {n} \leq \hat {V} _ {i} \leq \tau_ {i + 1}\right) \right] \\ \end{array}
585
+ $$
586
+
587
+ In order to show our result, we use from Blumer et al. [1989] that for a class with VC dimension $d < \infty$ that we can learn it with error at most $\varepsilon$ with probability $1 - \delta$ using $m = \mathrm{poly}(1 / \varepsilon, d, \log(1 / \delta))$ samples.
588
+
589
+ Consider the class $\mathcal{F}_{\tau}(\hat{V},r) = \bigwedge_{j = 1}^{i - 1}(\hat{V}_j > \tau_{j + 1})$ . This defines an axis parallel rectangle in $\mathbb{R}^i$ , therefore its VC-dimension is $2i$ . Using the observation above we have that using $m = \mathrm{poly}(1 / \varepsilon ,n,\log (1 / \delta))$ samples, with probability at least $1 - \delta$ , it holds
590
+
591
+ $$
592
+ \left| \mathbf {P r} _ {\mathcal {D}} \left[ \mathcal {F} _ {\boldsymbol {\tau}} (\hat {V}, r) \right] - \mathbf {P r} _ {\hat {\mathcal {D}}} \left[ \mathcal {F} _ {\boldsymbol {\tau}} (\hat {V}, r) \right] \right| \leq \varepsilon
593
+ $$
594
+
595
+ for all $\pmb {\tau}\in \mathbb{R}^n$
596
+
597
+ Similarly, the class $\mathcal{C}_{\tau}(\hat{V},r) = \bigwedge_{j = 1}^{i - 1}\left(\hat{V}_j > \tau_{j + 1}\right)\wedge \left(r + \hat{V}_n\leq \hat{V}_i\leq \tau_{i + 1}\right)$ has VC-dimension $O(n)$ since it is an intersection of at most $n$ (sparse) halfspaces. Therefore, the same argument as before applies and for $m = \mathrm{poly}(1 / \varepsilon ,n,\log (1 / \delta))$ samples, we get
598
+
599
+ $$
600
+ \left| \mathbf {P r} _ {\mathcal {D}, r \sim U [ 0, \tau_ {i + 1} ]} \left[ \mathcal {C} _ {\boldsymbol {\tau}} (\hat {V}, r) \right] - \mathbf {P r} _ {\hat {\mathcal {D}}, r \sim U [ 0, \tau_ {i + 1} ]} \left[ \mathcal {C} _ {\boldsymbol {\tau}} (\hat {V}, r) \right] \right| \leq \varepsilon
601
+ $$
602
+
603
+ for all $\pmb {\tau}\in \mathbb{R}^n$ , with probability at least $1 - \delta$
604
+
605
+ Putting it all together, the error can still be unbounded if the thresholds $\tau$ are too large. However, since we assume that $\tau_{i} \leq n / \varepsilon$ for all $i \in [n]$ , $\mathrm{poly}(n, 1 / \varepsilon, \log(1 / \delta))$ samples suffice to get $\varepsilon$ error overall, by setting $\varepsilon \gets \frac{\varepsilon^2}{n}$ .
606
+
607
+ While we obtain the result for a fixed permutation, we can directly obtain the result for all $n!$ permutations through a union bound. Setting $\delta \gets \frac{\delta}{n!}$ only introduces an additional factor of $\log(n!) = n \log n$ in the overall sample complexity.
608
+
609
+ Lemma 4.1.2. Let $\mathcal{D}$ be any distribution of values. Let $\varepsilon >0$ and consider a permutation $\pi$ and thresholds $\pmb{\tau}$ . Moreover, let $\tau^\prime$ be the thresholds capped to $n / \varepsilon$ , i.e. setting $\tau_b^{\prime} = \min \{\tau_b,n / \varepsilon \}$ for all boxes $b$ . Then,
610
+
611
+ $$
612
+ \mathbb {E} _ {v \sim_ {D}} \left[ A L G \left(\pi , \tau^ {\prime}\right) \right] \leq (1 + \varepsilon) \mathbb {E} _ {v \sim_ {D}} \left[ A L G \left(\pi , \tau\right) \right].
613
+ $$
614
+
615
+ Proof of Lemma 4.1.2. We compare the expected cost of ALG with the original thresholds and the transformed one $\mathrm{ALG}'$ with the capped thresholds. For any value vector $v \sim \mathcal{D}$ , either (1) the algorithms stopped at the same point having the same opening cost and value, or (2) ALG stopped earlier at a threshold $\tau > n / \varepsilon$ , while $\mathrm{ALG}'$ continued. In the latter case, the value $v$ that ALG gets is greater than $n / \varepsilon$ , while the value $v'$ that $\mathrm{ALG}'$ gets is smaller, $v' \leq v$ . For such a scenario, the opening cost $c$ of ALG, and the opening cost $c'$ of $\mathrm{ALG}'$ satisfy $c' \leq c + n$ . Thus, the total cost is $c' + v' \leq c + v + n \leq (1 + \varepsilon)(c + v)$ . Overall, we get that
616
+
617
+ $$
618
+ \mathbb {E} _ {\mathcal {D}} \left[ \mathrm {A L G} ^ {\prime} \right] \leq \mathbb {E} _ {\mathcal {D}} \left[ \mathrm {A L G} \right] (1 + \varepsilon).
619
+ $$
620
+
621
+ ![](images/3ca464553623ff329b3c936de0e2f100b60b69cc55836181cd7a5d9daebea988.jpg)
622
+
623
+ Theorem 4.1. Consider an instance of Pandora's Box with opening costs equal to 1. For any given parameters $\varepsilon, \delta > 0$ , using $m = \text{poly}(n, 1 / \varepsilon, \log(1 / \delta))$ samples from $\mathcal{D}$ , Algorithm 1 (Variant 1) obtains a $4.428 + \varepsilon$ approximation policy against the partially-adaptive optimal, with probability at least $1 - \delta$ .
624
+
625
+ Proof of Theorem 4.1. With $\mathrm{poly}(n,\varepsilon ,\log (1 / \delta))$ samples from $\mathcal{D}$ , we obtain an empirical distribution $\hat{\mathcal{D}}$ .
626
+
627
+ From Lemma 4.1.1, we have that with probability at least $1 - \delta \varepsilon / \log(1/\delta)$ , the following holds
628
+
629
+ $$
630
+ \left| \mathbb {E} _ {v \sim \hat {D}} \left[ \mathrm {A L G} (\pi , \tau) - \min _ {b \in \mathcal {B}} v _ {b} \right] - \mathbb {E} _ {v \sim D} \left[ \mathrm {A L G} (\pi , \tau) - \min _ {b \in \mathcal {B}} v _ {b} \right] \right| \leq \varepsilon \tag {10}
631
+ $$
632
+
633
+ for any permutation $\pi$ and any vector of thresholds $\pmb{v} \in \left[0, \frac{n}{\varepsilon}\right]^n$ . This gives us that we can estimate the cost of a threshold policy accurately.
634
+
635
+ To compare with the set of all partially adaptive policies that may not take the form of a threshold policy, we consider the set of scenario aware policies (SA). These are policies $\mathrm{SA}(\pi)$ parameterized by a permutation $\pi$ of boxes and are forced to visit the boxes in that order. However, they are aware of all values in the boxes in advance and know precisely when to stop. These are unrealistic policies introduced in Chawla et al. [2020] which serve as an upper bound to the set of all partially adaptive policies.
636
+
637
+ As shown in Chawla et al. [2020] (Lemma 3.3), scenario-aware policies are also learnable from samples. With probability at least $1 - \delta \varepsilon / \log(1/\delta)$ , it holds that for any permutation $\pi$
638
+
639
+ $$
640
+ \left| \mathbb {E} _ {v \sim \hat {D}} \left[ S A (\pi) - \min _ {b \in \mathcal {B}} v _ {b} \right] - \mathbb {E} _ {v \sim D} \left[ S A (\pi) - \min _ {b \in \mathcal {B}} v _ {b} \right] \right| \leq \varepsilon . \tag {11}
641
+ $$
642
+
643
+ The $\alpha$ -approximation guarantees (with $a \approx 4.428$ ) of Algorithm 1 hold even against scenario aware policies as there is no restriction on how the partially-adaptive policy may choose to stop. So for the empirical distribution, we can compute a permutation $\hat{\pi}$ and thresholds $\hat{\tau}$ such that:
644
+
645
+ $$
646
+ \mathbb {E} _ {\hat {D}} \left[ \mathrm {A L G} (\hat {\pi}, \hat {\tau}) \right] \leq \alpha \cdot \min _ {\pi} \mathbb {E} _ {\hat {D}} \left[ S A (\pi) \right]
647
+ $$
648
+
649
+ Clipping the thresholds to obtain $\hat{\tau}^{\prime} = \min \{\hat{\tau}, n / \varepsilon\}$ , and letting $\Delta = \mathbb{E}_{v \sim \hat{D}}[\min_{b \in \mathcal{B}} v_b] - \mathbb{E}_{v \sim D}[\min_{b \in \mathcal{B}} v_b]$ , we have that:
650
+
651
+ $$
652
+ \mathbb {E} _ {D} \left[ \mathrm {A L G} (\hat {\pi}, \hat {\tau} ^ {\prime}) \right] \leq \mathbb {E} _ {\hat {D}} \left[ \mathrm {A L G} (\hat {\pi}, \hat {\tau} ^ {\prime}) \right] - \Delta + \varepsilon
653
+ $$
654
+
655
+ $$
656
+ \begin{array}{l} \leq (1 + \varepsilon) \mathbb {E} _ {\hat {D}} [ \mathrm {A L G} (\hat {\pi}, \hat {\tau}) ] + \Delta + \varepsilon / 4 \\ \leq (1 + \varepsilon) \alpha \cdot \min _ {\pi} \mathbb {E} _ {\hat {D}} [ S A (\pi) ] - \Delta + \varepsilon / 4 \\ \leq (1 + \varepsilon) \alpha \cdot \min _ {\pi} \mathbb {E} _ {D} [ S A (\pi) ] + O (\Delta + \varepsilon) \\ \end{array}
657
+ $$
658
+
659
+ By Markov's inequality, we have that $\mathbf{Pr}\left[\mathbb{E}_{v\sim \hat{D}}\left[\min_{b\in \mathcal{B}}v_b\right]\leq (1 + \varepsilon)\mathbb{E}_{v\sim D}\left[\min_{b\in \mathcal{B}}v_b\right]\right]\geq \frac{\varepsilon}{1 + \varepsilon}\geq$ $\varepsilon /2$
660
+
661
+ Thus, repeating the sampling process $\frac{O(\log 1 / \delta)}{\varepsilon}$ times and picking the empirical distribution with minimum $\mathbb{E}_{v\sim \hat{D}}[\min_{b\in \mathcal{B}}v_b]$ satisfies $\Delta \leq \varepsilon \mathbb{E}_{v\sim D}[\min_{b\in \mathcal{B}}v_b]$ with probability at least $1 - \delta$ and simultaneously satisfies equations (10) and (11).
662
+
663
+ This shows that $\mathbb{E}_D[\mathrm{ALG}(\hat{\pi},\hat{\tau}')]\leq (1 + O(\varepsilon))\alpha \cdot \min_{\pi}\mathbb{E}_D[SA(\pi)]$ which completes the proof by rescaling $\varepsilon$ by a constant.
664
+
665
+ ![](images/9297f41df636478d3bbdfc3a7655ea785a36564d86bd804609c28e2774954506.jpg)
weitzmansruleforpandorasboxwithcorrelations/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb60d17d0fdb7ef288941926a0859fadd93b55a9f43b0ecd7c56bec0d9d6b9d4
3
+ size 454248
weitzmansruleforpandorasboxwithcorrelations/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5d34ecb2d80bf366083de13130fa3ba8e482b80d9fcc7d2e758b8b695973d3b
3
+ size 1025700
whatamessmultidomainevaluationofzeroshotsemanticsegmentation/e8b1c9b1-4de1-411f-af83-670ef5d9788e_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9175b87f607a57ad32ad6c0e82ab7d757b1f08153c275d0f89dd2bb67a4c280e
3
+ size 71589
whatamessmultidomainevaluationofzeroshotsemanticsegmentation/e8b1c9b1-4de1-411f-af83-670ef5d9788e_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:564cd29557a7f1f70d799faa95af4cae8a5cd7789a3cf0e9e6df4bd2c2b9fe00
3
+ size 88701
whatamessmultidomainevaluationofzeroshotsemanticsegmentation/e8b1c9b1-4de1-411f-af83-670ef5d9788e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4bb043492c6e88137ce65796c6b986d61f08c0cf6c67befb5aa54787fcb3cce
3
+ size 2544570
whatamessmultidomainevaluationofzeroshotsemanticsegmentation/full.md ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # What a PRESS: Multi-Domain Evaluation of Zero-Shot Semantic Segmentation
2
+
3
+ Benedikt Blumenstiel*
4
+ Karlsruhe Institute of Technology
5
+ IBM Research Europe
6
+ benedikt.blumenstiel@kit.edu
7
+
8
+ Hilde Kühne
9
+ University of Bonn
10
+ MIT-IBM Watson AI Lab
11
+ hildegard.kuehne@ibm.com
12
+
13
+ Johannes Jakubik*
14
+ Karlsruhe Institute of Technology
15
+ IBM Research Europe
16
+ johannes.jakubik@kit.edu
17
+
18
+ Michael Vössing
19
+ Karlsruhe Institute of Technology
20
+ IBM Germany
21
+ michael.voessing@kit.edu
22
+
23
+ # Abstract
24
+
25
+ While semantic segmentation has seen tremendous improvements in the past, there are still significant labeling efforts necessary and the problem of limited generalization to classes that have not been present during training. To address this problem, zero-shot semantic segmentation makes use of large self-supervised vision-language models, allowing zero-shot transfer to unseen classes. In this work, we build a benchmark for Multi-domain Evaluation of Semantic Segmentation (MESS), which allows a holistic analysis of performance across a wide range of domain-specific datasets such as medicine, engineering, earth monitoring, biology, and agriculture. To do this, we reviewed 120 datasets, developed a taxonomy, and classified the datasets according to the developed taxonomy. We select a representative subset consisting of 22 datasets and propose it as the POSS benchmark. We evaluate eight recently published models on the proposed POSS benchmark and analyze characteristics for the performance of zero-shot transfer models. The toolkit is available at https://github.com/blumenstiel/MESS.
26
+
27
+ # 1 Introduction
28
+
29
+ Zero-shot semantic segmentation utilizes self-supervised models such as CLIP to minimize labeling requirements during training and to improve model generalization. Recent models are already able to include classes during inference that were not present during training. For this reason, zero-shot semantic segmentation is becoming increasingly relevant for real-world scenarios. In particular, the performance on domain-specific datasets such as earth monitoring datasets, as visualized in Figure 1, becomes more and more relevant. Current standard benchmarks tend to focus on in-domain tasks but do not capture performance comparisons across domains. This is problematic because it limits insight into the applicability of zero-shot semantic segmentation to new domains. It also makes it difficult to assess whether architectures might be suitable for datasets that pose additional challenges (e.g., different sensor types or specialized vocabulary). To better understand the behavior of zero-shot semantic segmentation models on a wider range of more complex, domain-specific datasets, we propose a holistic Multi-domain Evaluation of Semantic Segmentation (MESS). To this end, we have examined 120 datasets and classified them within a developed taxonomy. We leverage our benchmark to evaluate eight recently published models for zero-shot semantic segmentation including
30
+
31
+ ![](images/d9935717263879126a740bc8c3541a30ae201fa5b3ec74026843dd33a18e47db.jpg)
32
+ Figure 1: CAT-Seg-L [7] predictions for a range of domain-specific datasets. The model achieves promising predictions on everyday and satellite images, while it faces difficulties in segmenting small segments such as blood vessels, and distinguishing similar classes such as bird species.
33
+
34
+ the state-of-the-art models on 22 datasets from the fields of medical sciences, earth monitoring, agriculture and biology, engineering as well as a general domain including datasets on, e.g., driving scenes, maritime scenes, paintings, and body parts. Our evaluation focuses on zero-shot text-to-mask models—also known as open-vocabulary semantic segmentation (OVSS)—and later also compares their performance with zero-shot point-to-mask and box-to-mask approaches of SAM [20]. Using the proposed benchmark, we identify and analyze several characteristics that influence the performance of OVSS models, i.a., showing that the semantic, textual similarity of classes as well as the underlying sensor type, significantly affect the performance of current models.
35
+
36
+ Our experiments reveal various challenges for the application of zero-shot semantic segmentation on domain-specific datasets, e.g., we found that the selection of class labels can significantly affect the quality of predictions. We also observe that the models are sensitive to the semantics of the textual prompts, e.g., general terminology leads to better performance than domain-specific terminology. Overall, we hope that our benchmark will support accelerating zero-shot semantic segmentation and improve the real-world applicability of semantic segmentation in general.
37
+
38
+ We summarize the contributions of this work as follows: (1) We develop a taxonomy based on a quantitative and qualitative analysis of a broad variety of semantic segmentation datasets. (2) We propose a new benchmark for multi-domain semantic segmentation. (3) We evaluate eight zero-shot models on theMESS benchmark with an in-depth analysis of the task characteristics.
39
+
40
+ # 2 Related work
41
+
42
+ # 2.1 Zero-shot semantic segmentation
43
+
44
+ Large-scale self-supervised pre-training has revolutionized the field of computer vision over the last couple of years. One stream of work focuses on vision-language pre-training such as in recent foundation model architectures like CLIP [37], ALIGN [19], and Florence [54]. These models are trained on image-text pairs and encode both visual and text semantics in a shared embedding space. This approach particularly enables so-called open-vocabulary image classification by computing the similarity between the embeddings of the image and the embeddings of natural language describing the classes in the image. The text describing the images can be any arbitrary textual sequence and might describe classes on the images that have been unseen during training. This is in contrast to recent segmentation models, like Segment-Anything (SAM, see [20]), which are trained only on image data and therefore do not include a text encoder to encode semantic concepts. Hence, segmentation models like SAM do not facilitate open-vocabulary out of the box and need to be adapted to support the processing of textual information (e.g., by using additional models that generate text embeddings or models that provide bounding boxes as input such as Grounding DINO [27]).
45
+
46
+ Early approaches in OVSS have been built upon standard zero-shot semantic segmentation, such as ZS3Net [5], using simple word2vec text encoders. Subsequent two-stage approaches made use of mask proposals based on MaskFormer [6] in stage 1 followed by predictions of each mask by
47
+
48
+ CLIP [9, 10, 25, 51]. Recently, one-stage frameworks like SAN [50] generate masks in a side adapter network during the CLIP inference. Therefore, CLIP does not classify many mask proposals but only the image ones, resulting in a faster inference. Other mask-based models like GroupViT [49] and ViewCo [40] are grouping pixels into larger segments which are then classified.
49
+
50
+ Decoder-focused approaches such as DenseCLIP [39] and LSeg [23] encode the image with CLIP and obtain the pixel-level patch embeddings. Because the pre-training is focused on the class embedding, the approaches append additional decoders to refine the patch embeddings. For this refinement, CAT-Seg [7] utilizes multiple stages of cost aggregation to generate the final segmentation mask. PACL [34] aligns patch embeddings and class embeddings during training and, as a result, does not require segmentation-specific training data or additional modules. Zero-shot semantic segmentation models have been combined with other tasks as well. OpenSeeD [55] implements open-vocabulary for object detection and segmentation. SEEM [58] processes text prompts and additional inputs like visual prompts similar to SAM. Apart from differences in the architecture, the models vary in the training process—particularly in fine-tuning CLIP's vision encoder.
51
+
52
+ # 2.2 Evaluation and benchmarking of zero-shot semantic segmentation
53
+
54
+ Zero-shot semantic segmentation models are typically evaluated on datasets consisting exclusively of everyday images, such as ADE20K [56], Pascal Context [33], and Pascal VOC [12]. These datasets are the de facto standard for evaluating these models (see [7, 14, 25, 50, 51, 57]). Few studies have considered additional datasets. Notably, Zou et al. [57] proposed a Segmentation in the Wild (SegInW) benchmark with 25 datasets. However, the majority of the datasets in SegInW still consist of everyday images with only two exceptions: brain tumor segmentation and a bird's eye view in stables. To the best of our knowledge, zero-shot semantic segmentation and OVSS have not been evaluated on other datasets. Outside of zero-shot semantic segmentation and OVSS, semantic segmentation is usually evaluated based on collections of datasets, like MSeg [22]. These datasets generally only include everyday images, indoor scenes, and driving datasets and lack domain-specific datasets. SAM has been evaluated on 23 instance segmentation datasets in a point-to-mask setting [20]. This collection of datasets is the most extensive for segmentation tasks but still misses domains, such as engineering and earth monitoring. Other works evaluate specifically domain dataset collections such as medical tasks [32] or satellite data [21].
55
+
56
+ Table 1: Multi-domain benchmark for zero-shot semantic segmentation models consisting of 22 downstream tasks, a total of 448 classes, and 25,079 images.
57
+
58
+ <table><tr><td>Dataset</td><td>Domain</td><td>Sensor type</td><td>Segment size</td><td>Number of classes</td><td>Class similarity</td><td>Vocabulary</td><td>Number of images</td><td>Task</td></tr><tr><td>BDD100K [53]</td><td rowspan="6">General</td><td>Visible spectrum</td><td>Medium</td><td>19 (Medium)</td><td>Low</td><td>Generic</td><td>1,000</td><td>Driving</td></tr><tr><td>Dark Zurich [41]</td><td>Visible spectrum</td><td>Medium</td><td>20 (Medium)</td><td>Low</td><td>Generic</td><td>50</td><td>Driving</td></tr><tr><td>MHP v1 [24]</td><td>Visible spectrum</td><td>Small</td><td>19 (Medium)</td><td>High</td><td>Task-spec.</td><td>980</td><td>Body parts</td></tr><tr><td>FoodSeg103 [47]</td><td>Visible spectrum</td><td>Medium</td><td>104 (Many)</td><td>High</td><td>Generic</td><td>2,135</td><td>Ingredients</td></tr><tr><td>ATLANTIS [11]</td><td>Visible spectrum</td><td>Small</td><td>56 (Many)</td><td>Low</td><td>Generic</td><td>1,295</td><td>Maritime</td></tr><tr><td>DRAM [8]</td><td>Visible spectrum</td><td>Medium</td><td>12 (Medium)</td><td>Low</td><td>Generic</td><td>718</td><td>Paintings</td></tr><tr><td>iSAID [46]</td><td rowspan="5">Earth Monitoring</td><td>Visible spectrum</td><td>Small</td><td>16 (Medium)</td><td>Low</td><td>Generic</td><td>4,055</td><td>Objects</td></tr><tr><td>ISPRS Potsdam [4]</td><td>Multispectral</td><td>Small</td><td>6 (Few)</td><td>Low</td><td>Generic</td><td>504</td><td>Land use</td></tr><tr><td>WorldFloods [31]</td><td>Multispectral</td><td>Medium</td><td>3 (Binary)</td><td>Low</td><td>Generic</td><td>160</td><td>Floods</td></tr><tr><td>FloodNet [38]</td><td>Visible spectrum</td><td>Medium</td><td>10 (Few)</td><td>Low</td><td>Task-spec.</td><td>5,571</td><td>Floods</td></tr><tr><td>UAVid [29]</td><td>Visible spectrum</td><td>Small</td><td>8 (Few)</td><td>High</td><td>Task-spec.</td><td>840</td><td>Objects</td></tr><tr><td>Kvasir-Inst. [18]</td><td rowspan="4">Medical Sciences</td><td>Visible spectrum</td><td>Medium</td><td>2 (Binary)</td><td>Low</td><td>Generic</td><td>118</td><td>Endoscopy</td></tr><tr><td>CHASE DB1 [13]</td><td>Microscopic</td><td>Small</td><td>2 (Binary)</td><td>Low</td><td>Domain-spec.</td><td>20</td><td>Retina scan</td></tr><tr><td>CryoNuSeg [30]</td><td>Microscopic</td><td>Small</td><td>2 (Binary)</td><td>Low</td><td>Domain-spec.</td><td>30</td><td>WSI</td></tr><tr><td>PAXRay-4 [42]</td><td>Electromagnetic</td><td>Large</td><td>4x2 (Binary)</td><td>Low</td><td>Domain-spec.</td><td>180</td><td>X-Ray</td></tr><tr><td>Corrosion CS [3]</td><td rowspan="4">Engineering</td><td>Visible spectrum</td><td>Medium</td><td>4 (Few)</td><td>High</td><td>Task-spec.</td><td>44</td><td>Corrosion</td></tr><tr><td>DeepCrack [28]</td><td>Visible spectrum</td><td>Small</td><td>2 (Binary)</td><td>Low</td><td>Generic</td><td>237</td><td>Cracks</td></tr><tr><td>ZeroWaste-f [2]</td><td>Visible spectrum</td><td>Medium</td><td>5 (Few)</td><td>High</td><td>Generic</td><td>929</td><td>Conveyor</td></tr><tr><td>PST900 [43]</td><td>Electromagnetic</td><td>Small</td><td>5 (Few)</td><td>Low</td><td>Generic</td><td>288</td><td>Thermal</td></tr><tr><td>SUIM [17]</td><td rowspan="3">Agriculture and Biology</td><td>Visible spectrum</td><td>Medium</td><td>8 (Few)</td><td>Low</td><td>Generic</td><td>110</td><td>Underwater</td></tr><tr><td>CUB-200 [45]</td><td>Visible spectrum</td><td>Medium</td><td>201 (Many)</td><td>High</td><td>Domain-spec.</td><td>5,794</td><td>Bird species</td></tr><tr><td>CWFID [15]</td><td>Visible spectrum</td><td>Small</td><td>3 (Few)</td><td>High</td><td>Generic</td><td>21</td><td>Crops</td></tr></table>
59
+
60
+ # 3 PRESS benchmark
61
+
62
+ Following the HELM benchmark [26] proposed for the evaluation of large language models, we develop a taxonomy with task characteristics for semantic segmentation and retrieve a set of more than 500 datasets that we review as part of the benchmark creation. For the development of the taxonomy, we use a method proposed by Nickerson et al. [35]. We start the development of the taxonomy by specifying the so-called meta-characteristic of the taxonomy (i.e., our goal): identify visual and language characteristics of downstream tasks influencing the performance of zero-shot semantic segmentation models. We then initialize the taxonomy in a conceptual-to-empirical cycle based on a review of other benchmarks and literature. Next, we refine the taxonomy in multiple empirical-to-conceptual iterations. We reviewed semantic segmentation datasets on Papers with Code, Kaggle, and additional test datasets used by recent segmentation models. We repeatedly reduced the dimensions of the taxonomy to the most meaningful ones for the meta-characteristic. We then conducted a statistical analysis of potential taxonomy dimensions to identify and remove similar or overlapping dimensions (see supplementary material). We identified multiple dimensions that highly correlate with each other like color map and sensor type, segment size and segments per image, as well as viewpoint and domain. Based on this analysis, we discarded color map, resolution, segments per image, and viewpoint. The final taxonomy matches all ending conditions [35]. While the proposed taxonomy identifies the most important dimensions and characteristics validated based on 120 classified datasets, there may be additional dimensions that influence the performance of zero-shot semantic segmentation models in specific cases. Overall, we observe that certain characteristics are more likely to co-occur. E.g., binary datasets typically imply a low class similarity, whereas task-specific vocabulary is often associated with a high similarity between the task-specific classes. We account for this imbalance in the distribution of the characteristics and reflect it in our benchmark.
63
+
64
+ Following the taxonomy development, we selected a representative set of datasets so that theMESS benchmark is informative, reproducible, and manageable. Specifically, we filtered the 120 classified datasets based on four exclusion criteria: each dataset has an official and annotated validation or test set, high annotation quality, moderate disk usage, and sufficient image size. Next, we selected a subset that consists of complementing use cases to avoid duplication and covers all characteristics of the taxonomy. We present the 22 selected datasets and their characteristics within the taxonomy's dimensions in Table 1. These datasets cover a variety of applications, resulting in a holistic evaluation of domain-specific applications. We publish this newMESS benchmark at https://blumenstiel.github.iomess-benchmark and invite others to suggest additional datasets and refine classes for future versions.
65
+
66
+ During dataset selection, we have not identified any ethical issues with these datasets based on the information provided by the data sources. Our use follows the terms and conditions set by the data providers, and we list the corresponding licenses in the supplementary material. However, we acknowledge the importance of considering the societal impact of our work. FMs, such as CLIP, are pre-trained on vast corpora of data that may contain biases. We refer to Agarwal et al. [1] for a detailed analysis of biases in CLIP. While the majority ofMESS datasets are less prone to such biases, some may include data specific to gender or geographic regions. We believe that assessing model performance across a range of datasets can help to identify and mitigate the impact of biases.
67
+
68
+ # 4 Experimental setup
69
+
70
+ In this section, we provide a brief definition of the zero-shot semantic segmentation task, describe the metrics, and outline implementation details.
71
+
72
+ # 4.1 Task
73
+
74
+ Let $I$ denote an image with a set of candidate classes $\mathcal{C} = \{C_1, C_2, \dots, C_N\}$ , where each candidate class $C_i$ is described in natural language. Zero-shot semantic segmentation models then assign a class $C_i$ to each pixel of $I$ . The number of candidate classes $N$ can vary during inference (e.g., different downstream tasks) and, additionally, the model may not have seen the candidate classes during training. This is in contrast to traditional semantic segmentation, where the set of classes is fixed during training and inference [7]. Each dataset represents a set of images with the same label set, and in our evaluations, none of the models is trained on the datasets from the benchmark
75
+
76
+ or the same set of candidate classes. However, it is reasonable to assume the evaluated classes have been present in the pre-training of the underlying vision-language models (like CLIP). All evaluated models have been trained on images with three channels (i.e., RGB). To account for datasets with varying numbers of input channels we mapped them to RGB (i.e., inputs with a single channel are mapped to RGB, for multispectral inputs we selected a subset of three channels).
77
+
78
+ # 4.2 Implementation
79
+
80
+ Following common practice, we evaluate all models using the mean of class-wise intersection over union (mIoU) [6, 7, 25, 50, 51]. We split very large images from the earth monitoring datasets into smaller patches of $1024 \times 1024$ pixels. Further, we use an IRRG color map for multispectral datasets (ISPRS Potsdam and WorldFloods) and select the thermal data in PST900. All other datasets include images with one or three channels. Across our implementation, we use PyTorch [36] and Detectron2 [48] for implementing the data loaders. For the convenience of users and contributors to our benchmark, we additionally provide wrappers for torchvision and MMSeg to process datasets in the Detectron2 dataset catalog. We did not train any models but used the publicly available weights and model configurations. The evaluation was conducted on an NVIDIA V100S.
81
+
82
+ # 4.3 Models
83
+
84
+ We utilize ourMESS benchmark to evaluate a range of recent models for zero-shot semantic segmentation including the state-of-the-art, selecting models based on the reported performance and the availability of official code and weights. OVSeg [25], SAN [50], and CAT-Seg [7] represent the state-of-the-art across different approaches in the architecture for zero-shot semantic segmentation (i.e., two-stage mask-based, one-stage mask-based, and pixel-based). We additionally consider ZSSeg [51] and ZegFormer [9] which are frequently consulted as baseline models e.g. by [7, 25, 50]. The previously listed models are based on CLIP and use COCO Stuff to train the additional segmentation modules. Additionally, OVSeg uses COCO Captions for fine-tuning. X-Decoder [57] and OpenSeeD [55] are part of our evaluation since these approaches do not make use of CLIP but are based on UniCL [52] (i.e., their public versions). X-Decoder and OpenSeeD are trained on multiple datasets which we detail in the supplementary material.
85
+
86
+ To account for recent developments in the field, we additionally include SAM [20] in our evaluations. Standard SAM can only process visual prompts and does not facilitate text-to-mask settings. Therefore, we validated other ways to make use of SAM. We implement Grounded-SAM [16] using the predicted bounding boxes from Grounding DINO [27] as input for SAM and thereby enabling an open-vocabulary setting (i.e., text-to-mask). This serves as a baseline to better understand the potential of SAM-based text-to-mask models. The overall evaluation time per model on theMESS benchmark varies in our experiments between 1 hour for SAN-B and 14.5 hours for OVSeg-L.
87
+
88
+ # 5 Experiments
89
+
90
+ In the following, we provide a holistic overview of the performance of multiple zero-shot semantic segmentation models based on our MESS benchmark. We conduct a range of in-detail analyses of model performances across the dimensions of our taxonomy including sensor types, the class similarity, and the vocabulary—additional experiments are included in the supplementary material.
91
+
92
+ # 5.1 Multi-domain zero-shot semantic segmentation
93
+
94
+ We provide a quantitative comparison across models and all datasets summarized by their domain in Table 2 and per dataset results in Fig. 2 and 3. We add a random prediction as a lower bound by calculating the expected mIoU value with uniformly distributed predictions over all classes. In addition, we report fully supervised results based on the current SOTA from supervised semantic segmentation (see supplementary material). Overall, CAT-Seg-L achieves a strong performance across domains with an average mIoU of $38.14\%$ , followed by its base and huge version. CAT-Seg is followed by SAN-L with a performance of $30.06\%$ . Notably, the performance of zero-shot CAT-Seg-L in the general domain is only 8.69pp (average mIoU) below the performance of supervised SOTA approaches. In comparison, CAT-Seg-L reaches on average $50.36\%$ of the supervised performance in earth monitoring and $54.18\%$ on medical sciences. The performance gap compared to supervised
95
+
96
+ Table 2: mIoU results averaged by the dataset domain. Best-performing models are highlighted in bold, and the second-best are underlined. Random represents the randomly expected mIoU with uniformly distributed predictions. The best supervised models are separately selected for each dataset (see supplementary material for the supervised models and results).
97
+
98
+ <table><tr><td>Model</td><td>Parameters</td><td>Inference (s/iter)</td><td>General</td><td>Earth Monit.</td><td>Medical Sciences</td><td>Engineer.</td><td>Agri. and Biology</td><td>Mean</td></tr><tr><td>Random (LB)</td><td></td><td></td><td>1.17</td><td>7.11</td><td>29.51</td><td>11.71</td><td>6.14</td><td>10.27</td></tr><tr><td>Best supervised (UB)</td><td></td><td></td><td>48.62</td><td>79.12</td><td>89.49</td><td>67.66</td><td>81.94</td><td>70.99</td></tr><tr><td>ZSSeg-B [51]</td><td>211M</td><td>0.49</td><td>19.98</td><td>17.98</td><td>41.82</td><td>14.0</td><td>22.32</td><td>22.73</td></tr><tr><td>ZegFormer-B [9]</td><td>210M</td><td>0.18</td><td>13.57</td><td>17.25</td><td>17.47</td><td>17.92</td><td>25.78</td><td>17.57</td></tr><tr><td>X-Decoder-T [57]</td><td>164M</td><td>0.1</td><td>22.01</td><td>18.92</td><td>23.28</td><td>15.31</td><td>18.17</td><td>19.8</td></tr><tr><td>SAN-B [50]</td><td>158M</td><td>0.04</td><td>29.35</td><td>30.64</td><td>29.85</td><td>23.58</td><td>15.07</td><td>26.74</td></tr><tr><td>OpenSeeD-T [55]</td><td>116M</td><td>0.08</td><td>22.49</td><td>25.11</td><td>44.44</td><td>16.5</td><td>10.35</td><td>24.33</td></tr><tr><td>CAT-Seg-B [7]</td><td>181M</td><td>0.17</td><td>34.96</td><td>34.57</td><td>41.65</td><td>26.26</td><td>29.32</td><td>33.74</td></tr><tr><td>OVSeg-L [25]</td><td>531M</td><td>1.64</td><td>29.54</td><td>29.04</td><td>31.9</td><td>14.16</td><td>28.64</td><td>26.94</td></tr><tr><td>SAN-L [50]</td><td>437M</td><td>0.14</td><td>36.18</td><td>38.83</td><td>30.27</td><td>16.95</td><td>20.41</td><td>30.06</td></tr><tr><td>CAT-Seg-L [7]</td><td>490M</td><td>0.33</td><td>39.93</td><td>39.85</td><td>48.49</td><td>26.04</td><td>34.06</td><td>38.14</td></tr><tr><td>CAT-Seg-H [7]</td><td>1049M</td><td>0.5</td><td>37.98</td><td>37.74</td><td>34.65</td><td>29.04</td><td>37.76</td><td>35.66</td></tr></table>
99
+
100
+ models is even larger for the two other domains. Looking at the dataset-specific performance in Fig. 2 and 3, we observe that the performance varies between datasets and models. While SAN-L is the best-performing model on CUB-200 and DRAM, it has significantly lower performance on CWFID or CHASE DB1 compared to CAT-Seg-L. The model achieves scores between $50\%$ and over $100\%$ of the performance of supervised state-of-the-art in the general domain. Within the other domains, CAT-Seg-L has a performance gap of more than 25pp for most of the datasets.
101
+
102
+ The inference time varies between the models and, in particular, between different model architectures with some models requiring more than ten times higher computational effort indicated by higher inference times. In general, we observe the highest inference times for two-stage mask-based approaches, such as ZSSeg and OVSeg, which are between five to twelve times higher than other mask-based approaches (X-Decoder, OpenSeeD, and SAN). The point-based CAT-Seg uses a sliding window approach which requires five passes and therefore results in higher inference times than SAN. Overall, SAN represents the fastest model in our experiments.
103
+
104
+ ![](images/e29f14d7783da089adf0acaf9da61198c85a256b5652020c367417fb9e18cefb.jpg)
105
+ Figure 2: mIoU results for large models on a log scale. The datasets are grouped by their domain and sorted by supervised performance.
106
+
107
+ ![](images/37cd6a0808d90dc29a5a445659260014eaf402a0f1c6be60b4c1c661f401f089.jpg)
108
+ Figure 3: mIoU results of large models in relative comparison to the supervised mIoU on a log scale. 100 is equal to the supervised mIoU.
109
+
110
+ # 5.2 Sensor type evaluation
111
+
112
+ All considered models have been developed for the visual spectrum (i.e., RGB). In the following, we investigate the performance of three different sensor types: multispectral, electromagnetic, and microscopic. Three datasets from POSS allow for a direct comparison between different sensor types. For multispectral sensors, the POSS benchmark includes the IRRG color map for ISPRS Potsdam and WorldFloods. The models are able to process the different color maps and profit from the visual highlighting of vegetation through the infrared channel. This insight might be limited to commonly used color maps because other color maps might be less represented in the pre-training data of CLIP. On electromagnetic and thermal imagery, none of the evaluated models can regularly segment objects on the PST900 dataset. We compared this result to the aligned RGB images from PST900. All models perform significantly better on the RGB images. E.g., CAT-Seg-L reaches a mIoU of $65.55\%$ on RGB images compared to only $25.26\%$ for thermal data. We also tested a pseudo color map that maps the grayscale thermal data to a pseudo color scale, resulting in a similar low performance. Therefore, we conclude that zero-shot semantic segmentation models are currently not able to sufficiently segment objects in thermal images. Most models are also not able to correctly segment X-ray images in the PAXRay dataset, the second benchmark dataset with an electromagnetic sensor type. However, X-rays do include much more visual features compared to thermal images and CAT-Seg is able to segment some anatomical structures like the lungs. Further, the benchmark includes retina scans in CHASE DB1 and WSI images in CrypNuSeg to evaluate microscopic imagery. Similar to the PAXRay results, most models fail to segment the structures. But CAT-Seg and ZSSeg can locate the requested class. Thus, we assume that CLIP and zero-shot semantic segmentation can understand microscopic concepts but the correct segmentation is not achieved because of the small segments instead of the image type.
113
+
114
+ # 5.3 Multi-domain vs. in-domain evaluation
115
+
116
+ Most zero-shot semantic segmentation models are currently evaluated on five datasets: Pascal VOC, ADE20K-150, ADE20K-847, Pascal Context-59, and Pascal Context-459. Figure 4 compares the average results of the evaluated models on these common datasets (i.e., in-domain datasets) to a multi-domain setting with datasets ofMESS benchmark. Note that the multi-domain datasets contain fewer classes on average, resulting in a much higher random mIoU. We provide the results for each dataset in the supplementary material. While SAN-L has comparable performance to the CAT-Seg models on common datasets, it has a significantly lower mIoU on domain datasets. Further, X-Decoder has a generally lower mIoU on domain datasets compared to other models. X-Decoder does not use CLIP which may explain the limited generalizability of the model. Overall, CAT-Seg is the only model architecture with a higher average mIoU on the domain datasets than common datasets.
117
+
118
+ # 5.4 Language characteristics
119
+
120
+ The differentiation between related classes is relevant in domain-specific use cases like biology. We analyze the influence of class similarity on class-wise IoU in Figure 5. Following Xu et al. [50], we calculated the class similarity as the maximum cosine similarity of the embedding to all other CLIP text embeddings in the label set. Overall, the class IoU does not correlate with the similarity. However, none of the classes with high similarity reaches a desirable IoU (e.g., the Corrosion CS dataset with three classes describing different corrosion stages). All models face difficulties in differentiating these classes. In additional experiments, the model performance significantly improved when considering similar classes as a single class. Also, specialized terms affect the model performance, specifically,
121
+
122
+ Table 3: Comparison of mIoU results for images with different sensor types. Pseudo refers to thermal data mapped to a pseudo color map.
123
+
124
+ <table><tr><td rowspan="2">Model</td><td colspan="2">ISPRS Potsdam</td><td colspan="2">WorldFloods</td><td rowspan="2">Thermal</td><td colspan="2">PST900</td></tr><tr><td>IRRG</td><td>RGB</td><td>IRRG</td><td>RGB</td><td>Pseudo</td><td>RGB</td></tr><tr><td>OVSeg-L [25]</td><td>31.03</td><td>35.46</td><td>31.48</td><td>22.86</td><td>21.89</td><td>21.63</td><td>42.9</td></tr><tr><td>SAN-L [50]</td><td>51.45</td><td>52.06</td><td>48.24</td><td>45.93</td><td>19.01</td><td>19.41</td><td>49.02</td></tr><tr><td>CAT-Seg-L [7]</td><td>51.42</td><td>51.29</td><td>49.86</td><td>45.39</td><td>25.26</td><td>25.43</td><td>65.55</td></tr></table>
125
+
126
+ ![](images/5d5533a4e3ba1e4f1916f9e48bd275b901fda54de2423802535779e4c9a4ec4b.jpg)
127
+ Figure 4: Cross domain settings can be challenging: Average mIoU of commonly used evaluation datasets in comparison to the results on the POSS benchmark. The size represents the parameter count of the models.
128
+
129
+ ![](images/390d042b5a433e917533926d7087da50fbe429c935e663025d7280e72dd3ad37.jpg)
130
+ Figure 5: The class-wise IoU in comparison with the similarity to other labels within the dataset. The similarity is measured by the minimum cosine distance of the class label to all other CLIP text embeddings within the dataset.
131
+
132
+ domain-specific and task-specific labels. Our evaluation covers domain-specific words from medicine and biology, i.e., bird species and anatomical structures like the mediastinum. It shows that CLIP is able to understand domain-specific concepts to a limited extent. We observed higher performance for generic terminology. E.g., all models achieve higher performances on the Kvasir-Instrument dataset when using a generic vocabulary like tool. Utilizing a more precise term like surgical instrument reduces the mIoU. We refer to classes with specified conditions as task-specific classes. In our evaluations, CAT-Seg achieves the best results on task-specific classes. However, CAT-Seg still confuses classes and, e.g., predicts the right shoe and right leg significantly more often than the left side in MHP v1. CAT-Seg models are further biased towards the parked car class in UAVid images, while SAN and OVSeg mostly assign masks to the label moving car. Overall, domain-specific and task-specific vocabulary limits the performance of zero-shot semantic segmentation models.
133
+
134
+ # 5.5 Comparison to SAM
135
+
136
+ For a better understanding of current text-to-mask zero-shot semantic segmentation approaches, we compare them with grounded and oracle versions of SAM. SAM cannot directly process textual inputs, instead, it uses visual prompt inputs, i.e., bounding boxes or points. For the comparison, we implemented three versions of SAM. First, we made use of existing available demos combining Grounding DINO and SAM and extended them by a comprehensive quantitative evaluation. Second, oracle point-to-mask SAM refers to a model that provides a single point for every connected segment
137
+
138
+ Table 4: Domain-averaged mIoU results for Grounded-SAM and SAM with oracle inputs in a point-to-mask and box-to-mask setting. Random, supervised and CAT-Seg-L are provided for reference.
139
+
140
+ <table><tr><td>Model</td><td>Input prompt</td><td>General</td><td>Earth Monitoring</td><td>Medical Sciences</td><td>Engineering</td><td>Agri. and Biology</td><td>Mean</td></tr><tr><td>Random (LB)</td><td></td><td>1.17</td><td>7.11</td><td>29.51</td><td>11.71</td><td>6.14</td><td>10.27</td></tr><tr><td>Best supervised (UB)</td><td></td><td>48.62</td><td>79.12</td><td>89.49</td><td>67.66</td><td>81.94</td><td>70.99</td></tr><tr><td>CAT-Seg-L [7]</td><td></td><td>39.93</td><td>39.85</td><td>48.49</td><td>26.04</td><td>34.06</td><td>38.14</td></tr><tr><td>Gr.-SAM-B [16]</td><td>Grounding</td><td>29.51</td><td>25.97</td><td>37.38</td><td>29.51</td><td>17.66</td><td>28.52</td></tr><tr><td>Gr.-SAM-L [16]</td><td>DINO [27]</td><td>30.32</td><td>26.44</td><td>38.69</td><td>29.25</td><td>17.73</td><td>29.05</td></tr><tr><td>Gr.-SAM-H [16]</td><td></td><td>30.27</td><td>26.44</td><td>38.45</td><td>28.16</td><td>17.67</td><td>28.78</td></tr><tr><td>SAM-B [20]</td><td rowspan="3">Oracle points [44]</td><td>50.41</td><td>38.72</td><td>43.7</td><td>45.16</td><td>57.84</td><td>46.59</td></tr><tr><td>SAM-L [20]</td><td>45.99</td><td>44.03</td><td>55.74</td><td>50.0</td><td>58.23</td><td>49.99</td></tr><tr><td>SAM-H [20]</td><td>36.05</td><td>34.82</td><td>59.58</td><td>47.35</td><td>39.91</td><td>43.0</td></tr><tr><td>SAM-B [20]</td><td rowspan="3">Oracle bounding boxes</td><td>78.5</td><td>73.56</td><td>68.14</td><td>73.29</td><td>86.0</td><td>75.67</td></tr><tr><td>SAM-L [20]</td><td>78.0</td><td>73.27</td><td>64.98</td><td>73.09</td><td>86.99</td><td>74.97</td></tr><tr><td>SAM-H [20]</td><td>65.23</td><td>59.61</td><td>66.58</td><td>66.4</td><td>78.63</td><td>66.55</td></tr></table>
141
+
142
+ in the ground truth mask to simulate the visual input. We use the point sampling approach from RITM [44]. Third, oracle box-to-mask SAM utilizes a single box for every segment in the ground truth mask to simulate the visual input. We consider up to 100 input prompts per image to avoid a large number of very small segments. We later combine all predicted masks by taking the maximum logit value for each pixel. Pixels with only negative logit values are assigned to the background class or marked unlabeled in datasets without a background class. Note that inputting text data as in the models before is fundamentally different from utilizing visual inputs as in our two oracle SAM implementations and our analyses are not intended for a direct comparison but to better understand the potentials of SAM for zero-shot text-to-mask models.
143
+
144
+ In Table 4, we observe that the non-oracle implementation of SAM utilizing Grounding DINO generally exhibits limited performance compared to CAT-Seg text-to-mask models. Oracle versions of SAM receive significantly improved information on the location of the object and, therefore, show a strong performance. Given the perfect information on the location of objects in the image with oracle bounding boxes, the oracle box-to-mask SAM implementation even outperforms supervised semantic segmentation models. Overall, we observe that SAM models achieve a strong performance based on oracle information on the location of the objects. However text-to-mask zero-shot semantic segmentation models like CAT-Seg outperform the combination of Grounding DINO and SAM. Similar to X-Decoder and OpenSeeD, Grounding DINO does not use CLIP, which results in limited multi-domain performance. The results with oracle bounding boxes suggest that future combinations of SAM with open-vocabulary object detection models based on FMs like CLIP may outperform the current state-of-the-art in zero-shot semantic segmentation.
145
+
146
+ # 5.6 Qualitative analyses
147
+
148
+ In the following, we quantitatively compare the predictions of the three promising text-to-mask zero-shot semantic segmentation models with the ground truth and the grounding version of SAM on four different datasets (autonomous driving, satellite imagery, medical science, and engineering). We visually observe the following characteristics: First, CAT-Seg also visually surpasses the predictions of the other models. Second, across different domains, the predictions of CAT-Seg are largely in line with the ground truth and the segmentation is comparatively fine-grained. Third, we observe that Grounding DINO does not locate most segments and, therefore, Grounded-SAM tends to predict the background class. These qualitative observations are largely in line with our quantitative experiments.
149
+
150
+ ![](images/63902fb4b62d661c4bdb2e5efc6c9e1cca7205c1ba12489a39de906bc69bf4c9.jpg)
151
+ Figure 6: Predictions from selected datasets based on CAT-Seg-L [7], SAN-L [50], OVSeg-L [25], and Grounded-SAM [16].
152
+
153
+ Zero-shot semantic segmentation achieves a remarkable performance on in-domain datasets [7, 50]. Based on the PRESS benchmark, we observe that these models can solve some tasks from other domains, however, are limited in their applicability to domains like medical science, engineering, and agriculture. We identified a range of challenges: First, we observe that domain-specific and task-specific vocabulary are difficult to handle. Models tend to be confused by labels with a high class similarity as in Corrosion CS. Therefore, we recommend to utilize a generic vocabulary with common class names, which led to improved performances in our experiments (e.g., tool instead of medical instrument in Kvasir-Instrument). Second, differences in the type of the sensor influence the performance of these models which are generally trained on the visual spectrum—for example, thermal data is hard to process. Third, we observe that state-of-the-art text-to-mask approaches outperform Grounded-SAM across multiple domains.
154
+
155
+ # 6 Conclusion
156
+
157
+ Zero-shot semantic segmentation has the potential to make segmentation models more accurate, cheap, flexible, and interactive. However, the current evaluation is limited to in-domain datasets, and previous analyses focused on model properties rather than task characteristics. With the MESS benchmark, we enable a holistic evaluation and invite others to utilize this benchmark to accelerate the field of semantic segmentation across domains to improve its real-world applicability.
158
+
159
+ # 7 Acknowledgements
160
+
161
+ We want to acknowledge the prior work this benchmark builds on. We especially want to emphasize that we leverage works across the AI community that should be recognized and cited. We appreciate the significant effort across the community in the careful collection, annotation, and publication of datasets. In the code repository, we provide additional details of the datasets including links to the corresponding works for citation. Additionally, we want to be explicit that our evaluation across diverse approaches would not be possible without publicly available architectures and corresponding model weights.
162
+
163
+ # References
164
+
165
+ [1] Agarwal, S., Krueger, G., Clark, J., Radford, A., Kim, J. W., and Brundage, M. (2021). Evaluating clip: Towards characterization of broader capabilities and downstream implications. arXiv preprint arXiv:2108.02818.
166
+ [2] Bashkirova, D., Abdelfattah, M., Zhu, Z., Akl, J., Alladkani, F., Hu, P., Ablavsky, V., Calli, B., Bargal, S. A., and Saenko, K. (2022). Zerowaste dataset: Towards deformable object segmentation in cluttered scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21147-21157.
167
+ [3] Bianchi, E. and Hebdon, M. (2021). Corrosion condition state semantic segmentation dataset. University Libraries, Virginia Tech: Blacksburg, VA, USA.
168
+ [4] BSF Swissphoto (2012). ISPRS Potsdam dataset within the ISPRS test project on urban classification, 3D building reconstruction and semantic labeling. https://www.isprs.org/education/benchmarks/UrbanSemLab/default.aspx.
169
+ [5] Bucher, M., Vu, T.-H., Cord, M., and Pérez, P. (2019). Zero-shot semantic segmentation. Advances in Neural Information Processing Systems, 32.
170
+ [6] Cheng, B., Schwing, A., and Kirillov, A. (2021). Per-pixel classification is not all you need for semantic segmentation. Advances in Neural Information Processing Systems, 34.
171
+ [7] Cho, S., Shin, H., Hong, S., An, S., Lee, S., Arnab, A., Seo, P. H., and Kim, S. (2023). CAT-Seg: Cost aggregation for open-vocabulary semantic segmentation. arXiv preprint arXiv:2303.11797v1.
172
+ [8] Cohen, N., Newman, Y., and Shamir, A. (2022). Semantic segmentation in art paintings. Computer Graphics Forum, 41(2):261-275.
173
+
174
+ [9] Ding, J., Xue, N., Xia, G.-S., and Dai, D. (2022a). Decoupling zero-shot semantic segmentation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11583-11592.
175
+ [10] Ding, Z., Wang, J., and Tu, Z. (2022b). Open-vocabulary panoptic segmentation with maskclip. arXiv preprint arXiv:2208.08984.
176
+ [11] Erfani, S. M. H., Wu, Z., Wu, X., Wang, S., and Goharian, E. (2022). Atlantis: A benchmark for semantic segmentation of waterbody images. *Environmental Modelling & Software*, 149:105333.
177
+ [12] Everingham, M., Gool, L. V., Williams, C. K. I., Winn, J. M., and Zisserman, A. (2010). The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 88:303-338.
178
+ [13] Fraz, M. M., Remagnino, P., Hoppe, A., Uyyanonvara, B., Rudnicka, A. R., Owen, C. G., and Barman, S. A. (2012). An ensemble classification-based approach applied to retinal blood vessel segmentation. IEEE Transactions on Biomedical Engineering, 59(9):2538-2548.
179
+ [14] Ghiasi, G., Gu, X., Cui, Y., and Lin, T.-Y. (2022). Scaling open-vocabulary image segmentation with image-level labels. In European Conference on Computer Vision (ECCV), pages 540-557. Springer.
180
+ [15] Haug, S. and Ostermann, J. (2015). A crop/weed field image dataset for the evaluation of computer vision based precision agriculture tasks. In Computer Vision - ECCV 2014 Workshops, pages 105-116. Springer.
181
+ [16] IDEA-Research (2023). Grounded-SAM. https://github.com/IDEA-Research/Grounded-Segment-Anything.
182
+ [17] Islam, M. J., Edge, C., Xiao, Y., Luo, P., Mehtaz, M., Morse, C., Enan, S. S., and Sattar, J. (2020). Semantic segmentation of underwater imagery: Dataset and benchmark. IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1769-1776.
183
+ [18] Jha, D., Ali, S., Emanuelsen, K., Hicks, S. A., Thambawita, V., Garcia-Ceja, E., Riegler, M. A., de Lange, T., Schmidt, P. T., Johansen, H. D., et al. (2021). Kvasir-instrument: Diagnostic and therapeutic tool segmentation dataset in gastrointestinal endoscopy. MultiMedia Modeling: 27th International Conference, MMM 2021, pages 218-229.
184
+ [19] Jia, C., Yang, Y., Xia, Y., Chen, Y.-T., Parekh, Z., Pham, H., Le, Q., Sung, Y.-H., Li, Z., and Duerig, T. (2021). Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning, pages 4904-4916. PMLR.
185
+ [20] Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A. C., Lo, W.-Y., et al. (2023). Segment anything. arXiv preprint arXiv:2304.02643.
186
+ [21] Lacoste, A., Sherwin, E. D., Kerner, H., Alemohammad, H., Lütjens, B., Irvin, J., Dao, D., Chang, A., Gunturkun, M., Drouin, A., et al. (2021). Toward foundation models for earth monitoring: Proposal for a climate change benchmark. arXiv preprint arXiv:2112.00570.
187
+ [22] Lambert, J., Liu, Z., Sener, O., Hays, J., and Koltun, V. (2020). MSeg: A composite dataset for multi-domain semantic segmentation. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2879-2888.
188
+ [23] Li, B., Weinberger, K. Q., Belongie, S., Koltun, V., and Ranftl, R. (2022). Language-driven semantic segmentation. In International Conference on Learning Representations.
189
+ [24] Li, J., Zhao, J., Wei, Y., Lang, C., Li, Y., Sim, T., Yan, S., and Feng, J. (2017). Multiple-human parsing in the wild. arXiv preprint arXiv:1705.07206.
190
+ [25] Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., and Marculescu, D. (2023). Open-vocabulary semantic segmentation with mask-adapted CLIP. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7061-7070.
191
+
192
+ [26] Liang, P., Bommasani, R., Lee, T., Tsipras, D., Soylu, D., Yasunaga, M., Zhang, Y., Narayanan, D., Wu, Y., Kumar, A., et al. (2022). Holistic evaluation of language models. arXiv preprint arXiv:2211.09110.
193
+ [27] Liu, S., Zeng, Z., Ren, T., Li, F., Zhang, H., Yang, J., Li, C., Yang, J., Su, H., Zhu, J., et al. (2023). Grounding DINO: Marrying DINO with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499.
194
+ [28] Liu, Y., Yao, J., Lu, X., Xie, R., and Li, L. (2019). Deepcrack: A deep hierarchical feature learning architecture for crack segmentation. Neurocomputing, 338:139-153.
195
+ [29] Lyu, Y., Vosselman, G., Xia, G.-S., Yilmaz, A., and Yang, M. Y. (2020). UAVid: A semantic segmentation dataset for UAV imagery. ISPRS journal of photogrammetry and remote sensing, 165:108-119.
196
+ [30] Mahbod, A., Schaefer, G., Bancher, B., Low, C., Dorffner, G., Ecker, R., and Ellinger, I. (2021). CryoNuSeg: A dataset for nuclei instance segmentation of cryosectioned H&E-stained histological images. Computers in biology and medicine, 132:104349.
197
+ [31] Mateo-Garcia, G., Veitch-Michaelis, J., Smith, L., Oprea, S. V., Schumann, G., Gal, Y., Baydin, A. G., and Backes, D. (2021). Towards global flood mapping onboard low cost satellites with machine learning. Scientific reports, 11(1):1-12.
198
+ [32] Mazurowski, M. A., Dong, H., Gu, H., Yang, J., Konz, N., and Zhang, Y. (2023). Segment anything model for medical image analysis: An experimental study. Medical Image Analysis, 89:102918.
199
+ [33] Mottaghi, R., Chen, X., Liu, X., Cho, N.-G., Lee, S.-W., Fidler, S., Urtasun, R., and Yuille, A. (2014). The role of context for object detection and semantic segmentation in the wild. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 891-898.
200
+ [34] Mukhoti, J., Lin, T.-Y., Poursaeed, O., Wang, R., Shah, A., Torr, P. H., and Lim, S.-N. (2023). Open vocabulary semantic segmentation with patch aligned contrastive learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19413-19423.
201
+ [35] Nickerson, R. C., Varshney, U., Muntermann, J., and Nickerson, R. C. (2013). A method for taxonomy development and its application in information systems. European Journal of Information Systems.
202
+ [36] Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al. (2019). Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32.
203
+ [37] Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., Krueger, G., and Sutskever, I. (2021). Learning transferable visual models from natural language supervision. International Conference on Machine Learning.
204
+ [38] Rahnemoonfar, M., Chowdhury, T., Sarkar, A., Varshney, D., Yari, M., and Murphy, R. R. (2021). Floodnet: A high resolution aerial imagery dataset for post flood scene understanding. IEEE Access, 9:89644-89654.
205
+ [39] Rao, Y., Zhao, W., Chen, G., Tang, Y., Zhu, Z., Huang, G., Zhou, J., and Lu, J. (2022). DenseCLIP: Language-guided dense prediction with context-aware prompting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18082-18091.
206
+ [40] Ren, P., Li, C., Xu, H., Zhu, Y., Wang, G., Liu, J., Chang, X., and Liang, X. (2023). Viewco: Discovering text-supervised segmentation masks via multi-view semantic consistency. arXiv preprint arXiv:2302.10307.
207
+ [41] Sakaridis, C., Dai, D., and Gool, L. V. (2019). Guided curriculum model adaptation and uncertainty-aware evaluation for semantic nighttime image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7374-7383.
208
+
209
+ [42] Seibold, C., Reiß, S., Sarfraz, S., Fink, M. A., Mayer, V., Sellner, J., Kim, M. S., Maier-Hein, K. H., Kleesiek, J., and Stiefelhagen, R. (2022). Detailed Annotations of Chest X-Rays via CT Projection for Report Understanding. Proceedings of the 33th British Machine Vision Conference.
210
+ [43] Shivakumar, S. S., Rodrigues, N., Zhou, A., Miller, I. D., Kumar, V., and Taylor, C. J. (2020). PST900: RGB-thermal calibration, dataset and segmentation network. IEEE international conference on robotics and automation (ICRA), pages 9441–9447.
211
+ [44] Sofiuk, K., Petrov, I. A., and Konushin, A. (2022). Reviving iterative training with mask guidance for interactive segmentation. In 2022 IEEE International Conference on Image Processing (ICIP), pages 3141-3145. IEEE.
212
+ [45] Wah, C., Branson, S., Welinder, P., Perona, P., and Belongie, S. (2011). Caltech-UCSD Birds 200. California Institute of Technology.
213
+ [46] Waqas Zamir, S., Arora, A., Gupta, A., Khan, S., Sun, G., Shahbaz Khan, F., Zhu, F., Shao, L., Xia, G.-S., and Bai, X. (2019). iSAID: A large-scale dataset for instance segmentation in aerial images. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 28-37.
214
+ [47] Wu, X., Fu, X., Liu, Y., Lim, E.-P., Hoi, S. C., and Sun, Q. (2021). A large-scale benchmark for food image segmentation. In Proceedings of the 29th ACM International Conference on Multimedia, pages 506-515.
215
+ [48] Wu, Y., Kirillov, A., Massa, F., Lo, W.-Y., and Girshick, R. (2019). Detectron2. https://github.com/facebookresearch/detectron2.
216
+ [49] Xu, J., De Mello, S., Liu, S., Byeon, W., Breuel, T., Kautz, J., and Wang, X. (2022a). Groupvit: Semantic segmentation emerges from text supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18134-18144.
217
+ [50] Xu, M., Zhang, Z., Wei, F., Hu, H., and Bai, X. (2023). Side adapter network for open-vocabulary semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2945-2954.
218
+ [51] Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., and Bai, X. (2022b). A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In European Conference on Computer Vision, pages 736-753. Springer.
219
+ [52] Yang, J., Li, C., Zhang, P., Xiao, B., Liu, C., Yuan, L., and Gao, J. (2022). Unified contrastive learning in image-text-label space. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19163-19173.
220
+ [53] Yu, F., Chen, H., Wang, X., Xian, W., Chen, Y., Liu, F., Madhavan, V., and Darrell, T. (2020). BDD100K: A diverse driving dataset for heterogeneous multitask learning. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2636-2645.
221
+ [54] Yuan, L., Chen, D., Chen, Y.-L., Codella, N., Dai, X., Gao, J., Hu, H., Huang, X., Li, B., Li, C., Liu, C., Liu, M., Liu, Z., Lu, Y., Shi, Y., Wang, L., Wang, J., Xiao, B., Xiao, Z., Yang, J., Zeng, M., Zhou, L., and Zhang, P. (2021). Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432v1.
222
+ [55] Zhang, H., Li, F., Zou, X., Liu, S., Li, C., Yang, J., and Zhang, L. (2023). A simple framework for open-vocabulary segmentation and detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1020-1031.
223
+ [56] Zhou, B., Zhao, H., Puig, X., Xiao, T., Fidler, S., Barriuso, A., and Torralba, A. (2019). Semantic understanding of scenes through the ADE20K dataset. International Journal of Computer Vision, 127:302-321.
224
+ [57] Zou, X., Dou, Z.-Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., et al. (2023a). Generalized decoding for pixel, image, and language. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15116-15127.
225
+ [58] Zou, X., Yang, J., Zhang, H., Li, F., Li, L., Gao, J., and Lee, Y. J. (2023b). Segment everything everywhere all at once. arXiv preprint arXiv:2304.06718.
whatamessmultidomainevaluationofzeroshotsemanticsegmentation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5fb5c0f441e5260a40aee4dce0b67e78bc69b293abd8d391ab7fff9ad487bd7
3
+ size 649765
whatamessmultidomainevaluationofzeroshotsemanticsegmentation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:485238f481fe07899304d3e1240c29195e26832aab21f23e8ee11d8bea16c5eb
3
+ size 266373
whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/af41eb11-cc6a-4764-a585-c8ce802c8fc5_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d6d927e753b94f5bc1d0429214472cede3d12c904d921e3e4a10ea657f42d17
3
+ size 316005
whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/af41eb11-cc6a-4764-a585-c8ce802c8fc5_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4c3bc067e9e71d300f89dd9bb7374f2eaa31223960abaffdbd7cc14685c18da
3
+ size 366704
whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/af41eb11-cc6a-4764-a585-c8ce802c8fc5_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa2eb172d33940ab215ad905d28e9952083219e1f5d411b85961d21c533cf280
3
+ size 1523865
whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/full.md ADDED
The diff for this file is too large to render. See raw diff
 
whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30b81f01b49e6ed9a6fff09d57a5d6d86ce8d876d58dc3692c2aa87deec07f53
3
+ size 2831846
whatcanasingleattentionlayerlearnastudythroughtherandomfeatureslens/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba35ad56fcb16b9248c0ded1b1ff6077b1ded73c08bea536ad8ae6220ddb77c
3
+ size 1765533
whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/aa5e9a5b-f5b2-469e-9199-387531b9ac3f_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebc870c499fe7b869550198aa26cb9e45ec0bfba00c768217b177e14cb77b321
3
+ size 134368
whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/aa5e9a5b-f5b2-469e-9199-387531b9ac3f_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86047e96fccbc776b09061e590370793258e5c544df0247eb8afabd265dde2b9
3
+ size 162008
whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/aa5e9a5b-f5b2-469e-9199-387531b9ac3f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce151e6ca189aa1fa21d11ef976fb233a6dd7a4a3ebd3e7794e53053cde40568
3
+ size 3499184
whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/full.md ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # What can Large Language Models do in chemistry? A comprehensive benchmark on eight tasks
2
+
3
+ Taicheng Guo,\* Kehan Guo,\* Bozhao Nan, Zhenwen Liang, Zhichun Guo, Nitesh V. Chawla, Olaf Wiest, Xiangliang Zhang
4
+
5
+ University of Notre Dame
6
+
7
+ {tguo2, kguo2, bnan, zliang6, zguo5, nchawla, owiest, xzhang33} @nd.edu
8
+
9
+ # Abstract
10
+
11
+ Large Language Models (LLMs) with strong abilities in natural language processing tasks have emerged and have been applied in various kinds of areas such as science, finance and software engineering. However, the capability of LLMs to advance the field of chemistry remains unclear. In this paper, rather than pursuing state-of-the-art performance, we aim to evaluate capabilities of LLMs in a wide range of tasks across the chemistry domain. We identify three key chemistry-related capabilities including understanding, reasoning and explaining to explore in LLMs and establish a benchmark containing eight chemistry tasks. Our analysis draws on widely recognized datasets facilitating a broad exploration of the capacities of LLMs within the context of practical chemistry. Five LLMs (GPT-4, GPT-3.5, Davinci-003, Llama and Galactica) are evaluated for each chemistry task in zero-shot and few-shot in-context learning settings with carefully selected demonstration examples and specially crafted prompts. Our investigation found that GPT-4 outperformed other models and LLMs exhibit different competitive levels in eight chemistry tasks. In addition to the key findings from the comprehensive benchmark analysis, our work provides insights into the limitation of current LLMs and the impact of in-context learning settings on LLMs' performance across various chemistry tasks. The code and datasets used in this study are available at https://github.com/ChemFoundationModels/ChemLLMBench.
12
+
13
+ # 1 Introduction
14
+
15
+ Large language models (LLMs) have recently demonstrated impressive reasoning abilities across a wide array of tasks. These tasks are not limited to natural language processing, but also extend to various language-related applications within scientific domains [56, 30, 24, 10]. Much of the research on the capacity of LLMs in science has been focused on tasks such as answering medical [30] and scientific questions [24, 25]. However, the exploration of their application to practical tasks in the field of chemistry remains underinvestigated. Although some studies [6, 27, 63, 48] have been conducted, they tend to focus on specific case studies rather than a comprehensive or systematic evaluation. The exploration of LLMs' capabilities within the field of chemistry has the potential to revolutionize this domain and expedite research and development activities [62]. Thus, the question, "What can LLMs do in chemistry?" is a compelling topic of inquiry for both AI researchers and chemists. Nevertheless, there exist two challenges that hinder the answer to the topic and the further development of LLMs in chemistry:
16
+
17
+ - Determining the potential capabilities of LLMs in chemistry requires a systematic analysis of both LLMs and the specific requirements of chemistry tasks. There are different kinds of tasks in chemistry, some of which can be formulated to tasks solved by LLMs while others may not. It is necessary to consider the specific knowledge and reasoning required for each task and assess whether LLMs can effectively acquire and utilize that knowledge.
18
+ - Conducting reliable and wide-ranging evaluation requires diverse experimental settings and limitations, that is, careful consideration and standardization of evaluation procedures, dataset curation, prompt design, and in-context learning strategies. Additionally, the API call time consumption and the randomness of LLMs limit the size of the testing.
19
+
20
+ To address this knowledge gap, we (a group of AI researchers and chemists) have developed a comprehensive benchmark to provide a preliminary investigation into the abilities of LLMs across a diverse range of practical chemistry tasks. Our aim is to gain insights that will be beneficial to both AI researchers and chemists to advance the application of LLMs in chemistry. For AI researchers, we provide insights into the strengths, weaknesses, and limitations of LLMs in chemistry-related tasks, which can inform the further development and refinement of different AI techniques for more effective applications within the field. For chemists, our study provides a better understanding of the tasks in which they can rely on current LLMs. Utilizing our more extensive experimental setup, a broader range of chemistry tasks can be explored to further evaluate the capabilities of LLMs.
21
+
22
+ Our investigation focuses on 8 practical chemistry tasks, covering a diverse spectrum of the chemistry domain. These include: 1) name prediction, 2) property prediction, 3) yield prediction, 4) reaction prediction, 5) retrosynthesis (prediction of reactants from products), 6) text-based molecule design, 7) molecule captioning, and 8) reagents selection. Our analysis draws on widely available datasets including BBBP, Tox21 [65], PubChem [32], USPTO [29, 53, 39], and ChEBI [17, 16]. Five LLMs (GPT-4, GPT-3.5, Davinci-003, Llama, and Galactica) [43] are evaluated for each chemistry task in zero-shot and few-shot in-context learning settings with carefully selected demonstration examples and specific prompts. We highlight the contributions of this paper as follows:
23
+
24
+ - We are the first to establish a comprehensive benchmark to evaluate the abilities of LLMs on a wide range of chemistry tasks. These eight selected tasks, in consultation with chemists, not only encompass a diverse spectrum of the chemistry domain but also demand different abilities such as understanding, reasoning, and explaining using domain-specific chemistry knowledge.
25
+ - We provide a comprehensive experimental framework for testing LLMs in chemistry tasks. To factor in the impact of prompts and demonstration examples in in-context learning, we have assessed multiple input options, focusing on the description of chemistry tasks. Five representative configurations were chosen based on their performance on a validation set, then these selected options were applied on the testing set. The conclusion is made from five repeated evaluations on each task, since GPTs often yield different outputs at different API calls even though the input is the same. We thus believe that our benchmarking process is both reliable and systematic.
26
+ - Our investigations yield broader insights into the performance of LLMs on chemistry tasks. As summarized in Table 2, our findings confirm some anticipated outcomes (e.g., GPT-4 outperforms GPT-3 and Davinci-003), and also reveal unexpected discoveries (e.g., property prediction can be better solved when property label semantics are included in prompts). Our work also contributes to practical recommendations that can guide AI researchers and chemists in leveraging LLMs more effectively in the future (see Section 5).
27
+
28
+ The paper is organized as follows. Related works are presented in Section 2. In section 3, we elaborate on the evaluation process, including an overview of the chemistry tasks, the utilized LLMs and prompts, and the validation and testing settings. In section 4, we summarize the main findings (due to the space limit, evaluation details of each chemistry task can be found in Appendix). Finally, to answer the question "What can LLMs do in chemistry?" we discuss the constraints inherent to LLMs and how different settings related to LLMs affect performance across various chemistry tasks in Section 5. The conclusions are summarized in section 6.
29
+
30
+ # 2 Related Work
31
+
32
+ Large Language Models. The rise of Large Language Models (LLMs) has marked a significant trend in recent natural language processing (NLP) research. This progress has been fuelled by milestones such as the introduction of GPT-3 [4], T0 [52], Flan-T5 [12], Galactica [56] and LLaMa [57]. The
33
+
34
+ recently released GPT-4, an evolution from GPT-3.5 series, has drawn considerable attention for its improvements in language understanding, generation, and planning [43]. Despite the vast potential of LLMs, existing research primarily centers on their performance within general NLP tasks [8, 9]. The scientific disciplines, notably chemistry, have received less focus. The application of LLMs in these specialized domains presents an opportunity for significant advancements. Therefore, we conduct a comprehensive experimental analysis to evaluate the capability of LLMs in chemistry-related tasks.
35
+
36
+ Large Language Model Evaluations. In recent years, the evaluation of LLMs like GPT has become a significant field of inquiry. [11] showed ChatGPT's proficiency in law exams, while technical aspects of GPT-4 were analyzed in [43]. LLMs are also applied in healthcare [14], mathematical problem [18], and code generation tasks [37]. Specifically, in healthcare, the utility and safety of LLMs in clinical settings were explored [42]. In the context of mathematical problem-solving, studies [18, 7] have highlighted that LLMs encounter challenges with graduate-level problems, primarily due to difficulties in parsing complex syntax. These studies underscored the complexity of achieving task-specific accuracy and functionality with LLMs. Lastly, AGIEval [66] assessed LLMs' general abilities but noted struggles in complex reasoning tasks.
37
+
38
+ Our work aligns with these evaluations but diverges in its focus on chemical tasks. To our knowledge, this is the first study to transform such tasks to suit LLM processing and to perform a comprehensive evaluation of these models' ability to tackle chemistry-related problems. This focus will contribute to expand our understanding of LLMs' capabilities in specific scientific domains.
39
+
40
+ Large Language Model for Chemistry. Recent efforts integrating LLMs with the field of chemistry generally fall into two distinct categories. One category aims to create a chemistry agent with LLMs' by leveraging its planning ability to utilize task-related tools. For example, Bran et al [3] developed ChemCrow, which augmented LLMs with chem-expert designed tools for downstream tasks such as organic synthesis and drug discovery. Similarly, by leveraging the planning and execution ability of multiple LLMs, Boiko et al [2] developed an autonomous chemical agent to conduct chemical experiments. The other category involves direct usage of LLMs for downstream tasks in chemistry [27, 62, 6, 28]. While these studies have explored the performance of LLMs in chemistry-related tasks, a systematic evaluation of their capabilities within this domain has been lacking. Consequently, there is a noticeable gap that calls for a meticulous benchmark to thoroughly assess the potential of LLMs in chemistry. Such a benchmark is crucial not only for identifying the strengths and limitations of these models in a specialized scientific domain, but also to guide future improvements and applications.
41
+
42
+ # 3 The Evaluation Process and Setting
43
+
44
+ The evaluation process workflow is depicted in Fig. 1. Guided by co-author Prof. Olaf Wiest (from the Department of Chemistry at the University of Notre Dame), we identify eight tasks in discussion with senior Ph.D. students at the NSF Center for Computer Assisted Synthesis (C-CAS). Following this, we generate, assess, and choose suitable prompts to forward to LLMs. The acquired answers are then evaluated both qualitatively by chemists to identify whether they are helpful in the real-world scenario and quantitatively by selected metrics.
45
+
46
+ Chemistry tasks. In order to explore the abilities of LLMs in the field of chemistry, we concentrate on three fundamental capabilities: understanding, reasoning, and explaining. We examine these competencies through eight diverse and broadly acknowledged practical chemistry tasks. These tasks are summarized in Table 1, in terms of the task type from the perspective of machine learning, the dataset used for the evaluation, as well as the evaluation metrics. The #ICL candidates refers to the number of candidate examples, from which we select $k$ demonstration examples, either randomly or based on similarity searches. These candidate sets are the training sets used in classical machine learning models, e.g., in training classifiers or generative models. We set the test set of 100 instances, randomly sampled from the original testing dataset (non-overlapping with the training set). To reduce the influence of the LLMs randomness on the results, each evaluation experiment is repeated five times and the mean and variance are reported.
47
+
48
+ LLMs. For all tasks, we evaluate the performance of five popular LLMs: GPT-4, GPT-3.5 (referred to as GPT-3.5-turbo, also known as ChatGPT), Davinci-003, LLama and Galactica.
49
+
50
+ ![](images/011ac4ccd9d58c9a99625704e1d759e08700a1733242e02633346830bc761671.jpg)
51
+ Figure 1: Overview of the evaluation process
52
+ Figure 2: The standardized zero-shot prompt template for all tasks.
53
+
54
+ Table 1: The statistics of all tasks, datasets, the number of ICL/test samples, and evaluation metrics
55
+
56
+ <table><tr><td>Ability</td><td>Task</td><td>Task Type</td><td>Dataset</td><td>#ICL candidates</td><td>#test</td><td>Evaluation Metrics</td></tr><tr><td rowspan="2">Understanding</td><td>Name Prediction</td><td>Generation</td><td>PubChem</td><td>500</td><td>100</td><td>Accuracy</td></tr><tr><td>Property Prediction</td><td>Classification</td><td>BBBP, HIV, BACE, Tox21, ClinTox</td><td>2053, 41127, 1514, 8014, 1484</td><td>100</td><td>Accuracy, F1 score</td></tr><tr><td rowspan="5">Reasoning</td><td>Yield Prediction</td><td>Classification</td><td>Buchwald-Hartwig, Suzuki-Miyaura</td><td>3957, 5650</td><td>100</td><td>Accuracy</td></tr><tr><td>Reaction Prediction</td><td>Generation</td><td>USPTO-Mixed</td><td>409035</td><td>100</td><td>Accuracy, Validity</td></tr><tr><td>Reagents Selection</td><td>Ranking</td><td>Suzuki-Miyaura</td><td>5760</td><td>100</td><td>Accuracy</td></tr><tr><td>Retrosynthesis</td><td>Generation</td><td>USPTO-50k</td><td>40029</td><td>100</td><td>Accuracy, Validity</td></tr><tr><td>Text-Based Molecule Design</td><td>Generation</td><td>ChEBI-20</td><td>26407</td><td>100</td><td>BLEU, Exact Match, etc</td></tr><tr><td>Explaining</td><td>Molecule Captioning</td><td>Generation</td><td>ChEBI-20</td><td>26407</td><td>100</td><td>BLEU, Chemists, etc</td></tr></table>
57
+
58
+ Zero-shot prompt. For each task, we apply a standardized zero-shot prompt template. As shown in Fig. 2, we instruct the LLMs to act in the capacity of a chemist. The content within the brackets is tailored to each task, adapting to its specific inputs and outputs. The responses from LLMs are confined to only returning the desired output without any explanations.
59
+
60
+ You are an expert chemist. Given the [reactants SMILES / molecular description / ...]: [Input], predict the [reaction product SMILES / molecule SMILES / ...] using your experienced chemical [reaction prediction / chemical molecule design / ...] knowledge. No explanations and other information. Only return the [product SMILES / designed molecular SMILES ].
61
+
62
+ Task-specific ICL prompt. ICL is a new paradigm for LLMs where predictions are based solely on contexts enriched with a few demonstration examples [15]. This paper specifically denotes ICL as a few-shot in-context learning approach, excluding the zero-shot paradigm. In order to thoroughly examine the capacities of LLMs within each chemistry-specific task, we design a task-specific ICL prompt template. As shown in Fig. 3. The format of the template is similar to that used in [48]. We also partition our template into four parts: {General Template} {Task-Specific Template} {ICL} {Question}. The {General Template} is almost the same as the zero-shot prompt, instructing the LLMs to play the role of a chemist and specify the chemistry task with its corresponding input and output. Considering that the responses for chemistry-related tasks must be accurate and chemically reasonable, it is crucial to prevent LLMs from generating hallucinated information. To this end, we introduce the {Task-Specific Template} which consists of three main components: [Input explanation], [Output Explanation], and [Output Restrictions], specifically designed to reduce hallucinations. These components are tailored to each task. The {ICL} part is a straightforward
63
+
64
+ concatenation of the demonstration examples and it follows the structure "[Input]: [Input_content] [Output]: [Output_content]". The [Input] and [Output] denote the specific names of each task's input and output, respectively. For example, in the reaction prediction task, the [Input] would be "Reactants+Reagents" and the [Input_content] would be the actual SMILES of reactants and reagents. The [Output] would be "Products" and the [Output_content] would be the SMILES of products. Detailed ICL prompts for each task will be presented in their respective sections that follow. The last {Question} part presents the testing case for LLMs to respond to. Fig 5 is example of our name prediction prompt.
65
+
66
+ ![](images/92aa18e9b8a16ee0f33e3b870c7238b0cb9913f8ef211f50585d3c503dd06ff8.jpg)
67
+ Figure 3: An ICL prompt template for all tasks.
68
+
69
+ ![](images/6e28b7488ae189da87c31889fda3d68f06c2262e343e88556bbac49467ab81ba.jpg)
70
+ Figure 4: An ICL prompt example for smiles2iupac prediction
71
+
72
+ ICL strategies. To investigate the impact of the quality and quantity of ICL examples on the performance of each task, we explore two ICL strategies. The quality is determined by the retrieval methods employed for finding similar examples to the sample in question. We conduct a grid search across two strategies: {Random, Scaffold}. In the Random strategy, we randomly select $k$ examples from the ICL candidate pool. In the Scaffold strategy, if the [Input_content] is a molecule SMILES, we use Tanimoto Similarity [55] from Morgan Fingerprint [41] with 2048-bits and radius=2 to calculate the molecular scaffold similarity to find the top- $k$ similar molecule SMILES. If the [Input_content] is a description such as IUPAC name or others, we use Python's built-in difflib.SequenceMatcher tool [49] to find the top- $k$ similar strings. To explore the influence of the quantity of ICL examples on performance, we also perform a grid search for $k$ , the number of ICL examples, in each task.
73
+
74
+ Experiment setup strategy. In property prediction and yield prediction tasks, we perform the grid search of $k$ in $\{4, 8\}$ . In the name prediction, reaction prediction, and retrosynthesis tasks, we perform the grid search of $k$ in $\{5, 20\}$ . In text-based molecule design and molecule captioning tasks, we
75
+
76
+ Table 2: The rank of five LLMs on eight chemistry tasks and performance highlight (NC: not competitive, C: competitive, SC: selectively competitive, acc: accuracy).
77
+
78
+ <table><tr><td>Task</td><td>GPT-4</td><td>GPT-3.5</td><td>Davinci-003</td><td>Llama2-13B-chat</td><td>GAL-30B</td><td>Performance highlight (comparing to baselines if any)</td></tr><tr><td>Name Prediction</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>NC: max. acc. 8% (Table 4)</td></tr><tr><td>Property Prediction</td><td>1</td><td>2</td><td>3</td><td>5</td><td>4</td><td>SC: outperform RF and XGBoost from MoleculeNet [65] (Table 6)</td></tr><tr><td>Yield Prediction</td><td>1</td><td>3</td><td>2</td><td>5</td><td>4</td><td>C: but 16-20% lower acc. than UAGNN [34] (Table 10)</td></tr><tr><td>Reaction Prediction</td><td>1</td><td>3</td><td>2</td><td>5</td><td>4</td><td>NC: 70% lower acc. than Chemformer [26] (Table 11)</td></tr><tr><td>Reagents Selection</td><td>2</td><td>1</td><td>3</td><td>4</td><td>5</td><td>C: 40-50% acc. (Table 12)</td></tr><tr><td>Retrosynthesis</td><td>2</td><td>3</td><td>1</td><td>5</td><td>4</td><td>NC: 40% lower acc. than Chemformer [26] (Table 13)</td></tr><tr><td>Molecule Design</td><td>1</td><td>3</td><td>2</td><td>4</td><td>5</td><td>SC: better than MolT5-Large [17] (Table 14)</td></tr><tr><td>Molecule Captioning</td><td>1</td><td>2</td><td>1</td><td>4</td><td>5</td><td>SC: better than MolT5-Large [17] (Table 15)</td></tr><tr><td>Average rank</td><td>1.25</td><td>2.375</td><td>2.125</td><td>4.5</td><td>4.5</td><td>overall: 3 SC, 2 C, 3 NC</td></tr></table>
79
+
80
+ perform the grid search of $k$ in $\{5, 10\}$ because of the maximum token limitation of LLMs. To reduce the time consumption of API requests caused by testing on the large test set, we first construct a validation set of size 30 which is randomly sampled from the original training set. Then we search $k$ and retrieval strategies (\{Random, Scaffold\}) on the validation set. Based on the validation set results, we take 5 representative options when testing on 100 instances, which are randomly sampled from the original test set. For each task, we run evaluation 5 times and report mean and standard deviation.
81
+
82
+ # 4 Experiment Analysis
83
+
84
+ Due to space limitations, we provide details of the evaluation on each chemistry task in Appendix by the following order: name prediction in section A, property prediction in section B, yield prediction in section C, reaction prediction in section D, reagents selection in section E, retrosynthesis in section F, text-based molecule design in section G, and molecule captioning in section H. The detailed results described in the Appendix allow us to approach the question "What can LLMs do in chemistry?" from several directions. We discuss the key findings from our comprehensive benchmark analysis and provide valuable insights by thoroughly analyzing the limitation of LLMs and how different settings related to LLMs affect performance across various chemistry tasks.
85
+
86
+ # 4.1 Can LLMs outperform existing baselines in chemistry tasks?
87
+
88
+ Several classic predictive models based on machine learning (ML) have been developed for specific chemistry tasks. For instance, MolR (Graph Neural Network-based) predicts molecule properties as a binary classification problem [58]. UAGNN achieved state-of-the-art performance in yield prediction [34]. MolT5-Large, a specialized language model based on T5, excels in translating between molecule and text [17]. We conduct a performance analysis of GPT models and compare their results with available baselines, if applicable. The main findings from the investigations are:
89
+
90
+ - GPT-4 outperforms the other models evaluated. The ranking of the models on 8 tasks can be found in Table 2;
91
+ - GPT models exhibit a less competitive performance in tasks demanding precise understanding of molecular SMILES representation, such as name prediction, reaction prediction and retrosynthesis;
92
+ - GPT models demonstrate strong capabilities both qualitatively (in Fig. 14 evaluated by chemists) and quantitatively in text-related explanation tasks such as molecule captioning;
93
+ - For chemical problems that can be converted to classification tasks or ranking tasks, such as property prediction, and yield prediction, GPT models can achieve competitive performance compared to baselines that use classical ML models as classifiers, or even better, as summarized in Table 2.
94
+
95
+ These conclusions are derived from conducting five repeated evaluations on each task, using the best evaluation setting that was discovered through a grid search on the validation set of each task. We designate the performance of GPT models as three categories and provide in-depth discussion next.
96
+
97
+ - Tasks with not competitive (NC) performance. In tasks such as reaction prediction and retrosynthesis, GPT models are worse than existing ML baselines trained by large amounts of training data, partially because of the limitation on understanding molecular SMILES strings. In reaction prediction and retrosynthesis, SMILES strings are present in both the input and output of the GPT models. Without an in-depth understanding of the SMILES strings that represent
98
+
99
+ reactants and products, as well as the reaction process that transforms reactants into products, it will be difficult for GPT models to generate accurate responses, as shown in Table 11 and 13. GPT models exhibit poor performance on the task of name prediction as well (see Table 4). This further validates the notion that GPT models struggle with understanding long strings in formats such as SMILES, IUPAC name, and molecular formula, and make correct translations between them.
100
+
101
+ - Tasks with competitive (C) performance. GPT models can achieve satisfactory results when the chemistry tasks are formulated into the forms of classification (e.g., formatting yield prediction into a high-or-not classification, instead of regression) or ranking (as seen in reagents selection), as illustrated in Fig. 7 and 9. This is understandable, because making choices is inherently simpler than generating products, reactants or names. GPT models can achieve an accuracy of $40\%$ to $50\%$ when asked to select the reactant or solvent or ligand from provided candidates. Although GPT-4's performance on yield prediction falls short compared to the baseline model UAGNN [34] (with $80\%$ versus $96\%$ on the Buchwald-Hartwig dataset, and $76\%$ versus $96\%$ on the Suzuki-coupling dataset), it demonstrates improved performance when given more demonstration examples within the few-shot in-context learning scenario, as reported in Table 10. It is worth noting that the UAGNN model was trained on thousands of examples for these specific reactions. Last, while GPT models exhibit promising performance for yield prediction on the evaluated High-Throughput Experimentation (HTE) datasets, specifically the Buchwald-Hartwig [1] and Suzuki-Miyaura datasets [50], they perform as bad as other ML baselines on more challenging datasets like USPTO-50k [53]. This observation indicates a potential area for future research and improvement in the performance of GPT models on challenging chemistry datasets.
102
+
103
+ - Tasks with selectively competitive (SC) performance. GPT models are selectively competitive on two types of tasks.
104
+
105
+ - In the property prediction task on some datasets (HIV, ClinTox), GPT models outperform the baseline significantly, achieving F1 scores and accuracy nearing 1, as reported in Table 6 and 7. This might be due to the fact that the property labels to be predicted are included in the prompts, with GPT models being simply tasked in responding with yes or no. For example, the prompt includes inhibit HIV replication or drugs failed clinical trials for toxicity reason, and we observed a significant decline in the performance of GPT models upon removing property labels from the prompt (refer to Appendix section B). In contrast, baselines employing machine learning models do not include the semantic meaning of these labels in their input. The input for these models only comprises molecular representations in graph form but no labels.
106
+
107
+ - For tasks related to text, such as text-based molecule design and molecule captioning, GPT models exhibit strong performance due to their language generation capabilities. On the task of text-based molecule design, GPT models outperform the baseline when evaluated using NLP metrics such as BLEU and Levenshtein. However, when it comes to exact match, the accuracy is less than $20\%$ , as reported in Table 14 and 15. This suggests that the molecules designed by GPT models may not be exactly the same as the ground truth. Particularly in the context of molecular design/generation, the exact match is a significant metric. Unlike in natural language generation where there is some allowance for deviation from the input, molecular design demands precise accuracy and chemical validity. However, not being precisely identical to the ground truth does not automatically invalidate a result. Molecules generated by GPT models may still prove to be beneficial and could potentially act as viable alternatives to the ground truth, provided they meet the requirements outlined in the input text and the majority (over $89\%$ ) are chemically valid (see Table 14). Nonetheless, assessing the true utility of these generated molecules, such as evaluating their novelty in real-world applications, can be a time-consuming undertaking.
108
+
109
+ # 4.2 The capability of different LLMs
110
+
111
+ As shown in Table 2, we can find that GPT-4 model shows better chemical understanding, reasoning, and explaining abilities than Davinci-003, GPT-3.5, Llama and Galactica. This further verifies the GPT-4 model outperforms the other models in both basic and realistic scenarios [5].
112
+
113
+ # 4.3 The effects of the ICL
114
+
115
+ To investigate the effects of the ICL, we introduced ICL prompting and different ICL retrieval methods, and the different number of ICL examples in each task. Based on the experiments results of
116
+
117
+ 12 different variants of each option and evaluating their performance on the validation set, we have the following three observations:
118
+
119
+ - In all tasks, the performance of ICL prompting is better than zero-shot prompting.
120
+ - In most tasks (in Table 4, 6, 7, 11, 13, 14, 15), using scaffold similarity to retrieve the most similar examples of the question as ICL examples achieves better performance than random sampling.
121
+ - In most tasks (in Table 4, 6, 7, 10, 11, 14, 15), using larger $k$ (more ICL examples) usually achieves better performance than small $k$ (fewer ICL examples).
122
+
123
+ These observations indicate that the quality and quantity of ICL examples plays an important role in the performance of ICL prompting [23, 36]. This may inspire that it is necessary to design more chemistry-specific ICL methods to build high-quality ICL examples to further improve the ICL prompting performance.
124
+
125
+ # 4.4 Are molecule SELFIES representations more suitable for LLMs than SMILES representations?
126
+
127
+ SELFIES [33] representations are more machine-learning-friendly string representations of molecules. To investigate whether the SELFIES representations are more suitable for LLMs than SMILES representations, we conduct experiments on four tasks, including molecule property prediction, reaction prediction, molecule design and molecule captioning. The experiment results are shown in Table 16, 17, 18, 19. We can observe that the results of using SELFIES in all four tasks are inferior to those of using SMILES. This could be attributed to the fact that the pretraining datasets for LLMs are primarily populated with SMILES-related content rather than SELFIES. Consequently, these models are more attuned to SMILES. However, it's worth mentioning that the occurrence of invalid SELFIES is less frequent than that of invalid SMILES, which aligns with the inherent design of SELFIES to ensure molecular validity.
128
+
129
+ # 4.5 The impact of temperature parameter of LLMs
130
+
131
+ One key hyperparameter that affects the performance of LLMs is temperature, which influences the randomness in the model's predictions. To determine the optimal temperature for each task, we randomly sampled 30 data points from the datasets and performed in-context learning experiments across various temperature settings. While optimal temperatures determined on the validation set may not always yield optimal results on the test set, our methodology is primarily designed to conserve token usage and API query time. To address potential discrepancies between validation and test sets, we performed targeted temperature testing on the test sets for two molecular property prediction datasets: BBBP and BACE. Our results are summarized in Table 3. For these tests, we employed the GPT-4 model (using scaffold sampling with $k = 8$ ) and set temperature values $t = [0.2, 0.4, 0.6, 0.8, 1]$ . The result reveals that variations in the temperature parameter have a marginal impact on test performance, with fluctuations of less than 0.05 observed in both F1 and accuracy scores. These results validate the robustness of our initial sampling approach and underscore the reliability of our findings across different settings.
132
+
133
+ Table 3: The F1(↑) and accuracy(↑) score of GPT-4 model(scaffold sampling, $k = 8$ ) on different temperature setting.
134
+
135
+ <table><tr><td>F1(↑)</td><td>BBBP</td><td>BACE</td><td>Accuracy(↑)</td><td>BBBP</td><td>BACE</td></tr><tr><td>GPT-4(t=0.2)</td><td>0.667 ± 0.029</td><td>0.741 ± 0.019</td><td>GPT-4(t=0.2)</td><td>0.650 ± 0.028</td><td>0.743 ± 0.019</td></tr><tr><td>GPT-4(t=0.4)</td><td>0.712 ± 0.014</td><td>0.728 ± 0.024</td><td>GPT-4(t=0.4)</td><td>0.691 ± 0.017</td><td>0.729 ± 0.024</td></tr><tr><td>GPT-4(t=0.6)</td><td>0.683 ± 0.016</td><td>0.736 ± 0.020</td><td>GPT-4(t=0.6)</td><td>0.659 ± 0.016</td><td>0.736 ± 0.019</td></tr><tr><td>GPT-4(t=0.8)</td><td>0.686 ± 0.030</td><td>0.744 ± 0.025</td><td>GPT-4(t=0.8)</td><td>0.661 ± 0.032</td><td>0.745 ± 0.025</td></tr><tr><td>GPT-4(t=1.0)</td><td>0.684 ± 0.023</td><td>0.756 ± 0.025</td><td>GPT-4(t=1.0)</td><td>0.660 ± 0.021</td><td>0.757 ± 0.025</td></tr></table>
136
+
137
+ # 5 Discussion
138
+
139
+ # 5.1 Limitation of LLMs on understanding molecular SMILES
140
+
141
+ A significant limitation of LLMs is their lack of understanding of molecular representations in SMILES strings, which in many cases leads to inaccurate or inconsistent results as shown in Section A for the translation of different ways to name molecules. SMILES (Simplified Molecular Input Line Entry System) [60, 61] is a widely used textual representation for chemical structures. For example, the SMILES string for ethanol, a simple alcohol, is "CCO". This string represents a molecule with two carbon atoms (C) connected by a single bond and an oxygen atom (O) connected to the second carbon atom. SMILES strings can serve as both input and output for LLMs, alongside other natural language text. However, several issues make it challenging for LLMs to accurately understand and interpret SMILES strings: 1) Hydrogen atoms are not explicitly represented in SMILES strings, as they can be inferred based on the standard bonding rules. LLMs frequently struggle to infer these implicit hydrogen atoms and may even fail at simple tasks like counting the number of atoms in a molecule [27, 6]. 2) A given molecule can have multiple valid SMILES representations, which can lead to ambiguity if not properly processed or standardized. LLMs may thus fail to consistently recognize and compare molecular structures represented by different SMILES strings. 3) LLMs do not have any inherent understanding of SMILES strings, and treat them as a sequence of characters or subwords. When processing long SMILES strings, LLMs rely on the byte-pair encoding tokenization technique, which can break the string into smaller pieces or subwords in ways that do not represent the molecular structure and properties of molecules represented by SMILES strings. Because many tasks in cheminformatics rely on the accurate representation of a molecule by SMILES strings, the non-competitive performance of GPT models in converting structures into SMILES strings (and vice versa) affects downstream tasks such as retrosynthesis, reaction and name prediction. LLMs that have an enhanced ability of handling molecular structures and their specific attributes or coupling to existing tools such as RDKit [35] will be needed.
142
+
143
+ # 5.2 The limitations of current evaluation methods
144
+
145
+ Although in Text-Based Molecule Design and Molecule Captioning tasks, GPT models show competitive performance compared to the baseline in some metrics (BLEU, Levenshtein, ROUGE, FCD, etc), we observe that the exact match of GPT models is inferior to the baseline in the Text-Based Molecule Design task and the GPT models generate some descriptions which violate chemical facts. This divergence between metrics and real-world scenarios mainly arises because, unlike many natural language processing tasks that can be suitably evaluated by sentence-level matching evaluation metrics, chemistry-related tasks necessitate exact matching for SMILES and precise terminology in descriptions. These findings spotlight the limitations of current evaluation metrics and underscore the need for the development of chemistry-specific metrics.
146
+
147
+ # 5.3 Hallucination of LLMs in chemistry
148
+
149
+ Our evaluation experiments across various tasks reveal two primary types of hallucinations exhibited by LLMs in the domain of chemistry. The first type occurs when the input is given in SMILES format (e.g., name prediction); LLMs occasionally struggle with interpreting these SMILES correctly. For instance, they may fail to recognize the number of atoms or certain functional groups within molecules during name prediction tasks. The second type of hallucination arises when the expected output from LLMs should be in the form of SMILES (e.g., reaction prediction and retrosynthesis). Here, LLMs may produce molecules that are chemically unreasonable, suggesting a gap in understanding what constitutes valid SMILES. Hallucination issues represent a key challenge with LLMs, particularly in the field of chemistry which necessitates exact matching of SMILES and adherence to strict chemical facts [62]. Current LLMs need further investigation into this problem.
150
+
151
+ # 5.4 Prospects of LLMs for chemistry
152
+
153
+ Overall, through an exhaustive set of experiments and analyses, we outline several promising avenues for the application of LLMs in the field of chemistry. While LLMs underperform relative to baselines across a majority of tasks, it's important to note that LLMs leverage only a few examples to solve chemistry problems, whereas baselines are trained on extensive, task-specific datasets and are limited
154
+
155
+ to certain tasks. This observation provides valuable insights into the potential of LLMs' generalized intelligence in the domain of chemistry. The employment of advanced prompting techniques such as Chain-of-thought (CoT) [59], Decomposed Prompting [31] could potentially boost the capacity of LLMs to perform complex reasoning. On the other hand, LLMs display a considerable amount of hallucinations in chemistry tasks, indicating that current LLMs may not yet possess the necessary capabilities to solve practical chemistry problems effectively. However, with continuous development of LLMs and further research into methods to avoid hallucinations, we are optimistic that LLMs can significantly enhance their problem-solving abilities in the field of chemistry.
156
+
157
+ # 5.5 Impact of generating harmful chemicals
158
+
159
+ Our work demonstrate that LLMs can generate chemically valid molecules. However, it's crucial to acknowledge and mitigate the risks of AI misuse, such as generating hazardous substances. While advancements in AI-enabled chemistry have the potential to bring about groundbreaking medicines and sustainable materials, the same technology can be misused to create toxic or illegal substances. This dual-edged potential emphasizes the necessity for stringent oversight. Without careful regulation, these tools could not only pose significant health and safety hazards but also create geopolitical and security challenges. Consequently, as we harness the capabilities of LLMs in the field of chemistry, we concur with earlier research on generative models in chemistry [2, 3] that it is vital for developers to establish robust safeguards and ethical guidelines to deter harmful applications. This is akin to the limitations imposed on popular search engines, which can also be exploited to find information about dangerous chemicals or procedures online.
160
+
161
+ # 5.6 Broader Impacts
162
+
163
+ Our work has broad impacts across multiple dimensions. First, it offers valuable insights and recommendations for both AI researchers and chemists in academia and industry. These perspectives enhance the effective utilization of LLMs and guide future advancements in the field. Second, our objective evaluation of LLMs helps alleviate concerns regarding the replacement of chemists by AI. This aspect contributes to public education, addressing misconceptions and fostering a better understanding of the role of AI in chemistry. Furthermore, we provide a comprehensive experimental framework for testing LLMs in chemistry tasks, which can also be applicable to other domains. This framework serves as a valuable resource for researchers seeking to evaluate LLMs in diverse fields. However, it is important to recognize the ethical and societal implications associated with our work. Additionally, concerns about job displacement in the chemical industry may arise, and efforts should be made to address these challenges and ensure a responsible and equitable adoption of AI technologies.
164
+
165
+ # 6 Conclusion and Future Work
166
+
167
+ In this paper, we summarize the required abilities of LLMs in chemistry and construct a comprehensive benchmark to evaluate the five most popular LLMs (GPT-4, GPT-3.5, Davinci-003, LLama and Galactica) on eight widely-used chemistry tasks. The experiment results show that LLMs perform less competitive in generative tasks which require in-depth understanding of molecular SMILES strings, such as reaction prediction, name prediction, and retrosynthesis. LLMs show competitive performance in tasks that are in classification or ranking formats such as yield prediction and reagents selection. LLMs are selectively competitive on tasks involving text in prompts such as property prediction and text-based molecule design, or explainable tasks such as molecule captioning. These experiments indicate the potential of LLMs in chemistry tasks and the need for further improvement. We will collaborate with more chemists in the C-CAS group, progressively integrating a wider range of tasks that are both novel and practical. We hope our work can address the gap between LLMs and the chemistry research field, inspiring future research to explore the potential of LLMs in chemistry.
168
+
169
+ # Acknowledgments and Disclosure of Funding
170
+
171
+ This work was supported by the National Science Foundation (CHE-2202693) through the NSF Center for Computer Assisted Synthesis (C-CAS).
172
+
173
+ # References
174
+
175
+ [1] Derek T Ahneman, Jesus G Estrada, Shishi Lin, Spencer D Dreher, and Abigail G Doyle. Predicting reaction performance in c–n cross-coupling using machine learning. Science, 360 (6385):186–190, 2018.
176
+ [2] Daniil A Boiko, Robert MacKnight, and Gabe Gomes. Emergent autonomous scientific research capabilities of large language models. arXiv preprint arXiv:2304.05332, 2023.
177
+ [3] Andres M Bran, Sam Cox, Andrew D White, and Philippe Schwaller. Chemcrow: Augmenting large-language models with chemistry tools. arXiv preprint arXiv:2304.05376, 2023.
178
+ [4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.
179
+ [5] Sébastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, Harsha Nori, Hamid Palangi, Marco Tulio Ribeiro, and Yi Zhang. Sparks of artificial general intelligence: Early experiments with gpt-4, 2023.
180
+ [6] Cayque Monteiro Castro Nascimento and André Silva Pimentel. Do large language models understand chemistry? a conversation with chatgpt. Journal of Chemical Information and Modeling, 63(6):1649-1655, 2023.
181
+ [7] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Kaijie Zhu, Hao Chen, Linyi Yang, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. A survey on evaluation of large language models. arXiv preprint arXiv:2307.03109, 2023.
182
+ [8] Xiuying Chen, Hind Alamro, Mingzhe Li, Shen Gao, Xiangliang Zhang, Dongyan Zhao, and Rui Yan. Capturing relations between scientific papers: An abstractive model for related work section generation. In Proc. of ACL, 2021.
183
+ [9] Xiuying Chen, Hind Alamro, Mingzhe Li, Shen Gao, Rui Yan, Xin Gao, and Xiangliang Zhang. Target-aware abstractive related work generation with contrastive learning. In Proc. of SIGIR, 2022.
184
+ [10] Xiuying Chen, Mingzhe Li, Shen Gao, Rui Yan, Xin Gao, and Xiangliang Zhang. Scientific paper extractive summarization enhanced by citation graphs. In Proc. of EMNLP, 2022.
185
+ [11] Jonathan Choi, Kristin Hickman, Amy Monahan, and Daniel Schwarcz. Chatgpt goes to law school. Journal of Legal Education, 2023.
186
+ [12] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416, 2022.
187
+ [13] Connor W Coley, Regina Barzilay, Tommi S Jaakkola, William H Green, and Klavs F Jensen. Prediction of organic reaction outcomes using machine learning. ACS central science, 3(5): 434-443, 2017.
188
+ [14] Debadutta Dash, Rahul Thapa, Juan M Banda, Akshay Swaminathan, Morgan Cheatham, Mehr Kashyap, Nikesh Kotecha, Jonathan H Chen, Saurabh Gombar, Lance Downing, et al. Evaluation of gpt-3.5 and gpt-4 for supporting real-world information needs in healthcare delivery. arXiv preprint arXiv:2304.13714, 2023.
189
+ [15] Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Zhiyong Wu, Baobao Chang, Xu Sun, Jingjing Xu, Lei Li, and Zhifang Sui. A survey on in-context learning, 2023.
190
+ [16] Carl Edwards, ChengXiang Zhai, and Heng Ji. Text2Mol: Cross-modal molecule retrieval with natural language queries. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 595-607, Online and Punta Cana, Dominican Republic, November 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.emnlp-main.47. URL https://aclanthology.org/2021.emnlp-main.47.
191
+
192
+ [17] Carl Edwards, Tuan Lai, Kevin Ros, Garrett Honke, and Heng Ji. Translation between molecules and natural language. arXiv preprint arXiv:2204.11817, 2022.
193
+ [18] Simon Frieder, Luca Pinchetti, Ryan-Rhys Griffiths, Tommaso Salvatori, Thomas Lukasiewicz, Philipp Christian Petersen, Alexis Chevalier, and Julius Berner. Mathematical capabilities of chatgpt. arXiv preprint arXiv:2301.13867, 2023.
194
+ [19] Taicheng Guo, Changsheng Ma, Xiuying Chen, Bozhao Nan, Kehan Guo, Shichao Pei, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Modeling non-uniform uncertainty in reaction prediction via boosting and dropout. arXiv preprint arXiv:2310.04674, 2023.
195
+ [20] Taicheng Guo, Lu Yu, Basem Shihada, and Xiangliang Zhang. Few-shot news recommendation via cross-lingual transfer. In Proceedings of the ACM Web Conference 2023, WWW '23, page 1130–1140, New York, NY, USA, 2023. Association for Computing Machinery. ISBN 9781450394161. doi: 10.1145/3543507.3583383. URL https://doi.org/10.1145/3543507.3583383.
196
+ [21] Zhichun Guo, Chuxu Zhang, Wenhao Yu, John Herr, Olaf Wiest, Meng Jiang, and Nitesh V Chawla. Few-shot graph learning for molecular property prediction. In Proceedings of the Web Conference 2021, pages 2559-2567, 2021.
197
+ [22] Zhichun Guo, Bozhao Nan, Yijun Tian, Olaf Wiest, Chuxu Zhang, and Nitesh V Chawla. Graph-based molecular representation learning. arXiv preprint arXiv:2207.04869, 2022.
198
+ [23] Yaru Hao, Yutao Sun, Li Dong, Zhixiong Han, Yuxian Gu, and Furu Wei. Structured prompting: Scaling in-context learning to 1,000 examples, 2022.
199
+ [24] Dan Hendrycks, Collin Burns, Steven Basart, Andrew Critch, Jerry Li, Dawn Song, and Jacob Steinhardt. Aligning ai with shared human values. Proceedings of the International Conference on Learning Representations (ICLR), 2021.
200
+ [25] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. Proceedings of the International Conference on Learning Representations (ICLR), 2021.
201
+ [26] Ross Irwin, Spyridon Dimitriadis, Jiazhen He, and Esben Jannik Bjerrum. Chemformer: a pretrained transformer for computational chemistry. Machine Learning: Science and Technology, 3(1):015022, 2022.
202
+ [27] Kevin Jablonka, Philippe Schwaller, Andres Ortega-Guerrero, and Berend Smit. Is gpt-3 all you need for low-data discovery in chemistry. 10.26434/chemrxiv-2023-fw8n4, 2023.
203
+ [28] Kevin Maik Jablonka, Qianxiang Ai, Alexander Al-Feghali, Shruti Badhwar, Joshua D Bran, Stefan Bringuier, L Catherine Brinson, Kamal Choudhary, Defne Circi, Sam Cox, et al. 14 examples of how llms can transform materials science and chemistry: A reflection on a large language model hackathon. arXiv preprint arXiv:2306.06283, 2023.
204
+ [29] Wengong Jin, Connor W. Coley, Regina Barzilay, and Tommi Jaakkola. Predicting organic reaction outcomes with weisfeiler-lehman network, 2017.
205
+ [30] Rehan Ahmed Khan, Masood Jawaid, Aymen Rehan Khan, and Madiha Sajjad. Chatgptreshaping medical education and clinical management. Pakistan Journal of Medical Sciences, 39(2):605, 2023.
206
+ [31] Tushar Khot, Harsh Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. arXiv preprint arXiv:2210.02406, 2022.
207
+ [32] Sunghwan Kim, Jie Chen, Tiejun Cheng, Asta Gindulyte, Jia He, Siqian He, Qingliang Li, Benjamin A Shoemaker, Paul A Thiessen, Bo Yu, et al. Pubchem 2019 update: improved access to chemical data. Nucleic acids research, 47(D1):D1102-D1109, 2019.
208
+
209
+ [33] Mario Krenn, Florian Häse, AkshitKumar Nigam, Pascal Friederich, and Alan Aspuru-Guzik. Self-referencing embedded strings (SELFIES): A $100\%$ robust molecular string representation. Machine Learning: Science and Technology, 1(4):045024, oct 2020. doi: 10.1088/2632-2153/aba947. URL https://doi.org/10.1088/2F2632-2153%2Faba947.
210
+ [34] Youngchun Kwon, Dongseon Lee, Youn-Suk Choi, and Seokho Kang. Uncertainty-aware prediction of chemical reaction yields with graph neural networks. Journal of Cheminformatics, 14:1-10, 2022.
211
+ [35] G. A. Landrum. Rdkit: Open-source cheminformatics software. http://www.rdkit.org, 2020.
212
+ [36] Itay Levy, Ben Bogin, and Jonathan Berant. Diverse demonstrations improve in-context compositional generalization, 2022.
213
+ [37] Jiawei Liu, Chunqiu Steven Xia, Yuyao Wang, and Lingming Zhang. Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. arXiv preprint arXiv:2305.01210, 2023.
214
+ [38] Zequn Liu, Wei Zhang, Yingce Xia, Lijun Wu, Shufang Xie, Tao Qin, Ming Zhang, and TieYan Liu. Molxpt: Wrapping molecules with text for generative pre-training. arXiv preprint arXiv:2305.10688, 2023.
215
+ [39] Daniel Mark Lowe. Extraction of chemical structures and reactions from the literature. PhD thesis, University of Cambridge, 2012.
216
+ [40] Frederic P Miller, Agnes F Vandome, and John McBrewster. Levenshtein distance: Information theory, computer science, string (computer science), string metric, damerau? levenshtein distance, spell checker, hamming distance, 2009.
217
+ [41] Harry L Morgan. The generation of a unique machine description for chemical structures-a technique developed at chemical abstracts service. Journal of chemical documentation, 5(2): 107-113, 1965.
218
+ [42] Harsha Nori, Nicholas King, Scott Mayer McKinney, Dean Carignan, and Eric Horvitz. Capabilities of gpt-4 on medical challenge problems. arXiv preprint arXiv:2303.13375, 2023.
219
+ [43] OpenAI. Gpt-4 technical report, 2023.
220
+ [44] Damith Perera, Joseph W Tucker, Shalini Brahmbhatt, Christopher J Helal, Ashley Chong, William Farrell, Paul Richardson, and Neal W Sach. A platform for automated nanomole-scale reaction screening and micromole-scale synthesis in flow. Science, 359(6374):429-434, 2018.
221
+ [45] Kristina Preuer, Philipp Renz, Thomas Unterthiner, Sepp Hochreiter, and Gunter Klambauer. Fréchet chemnet distance: a metric for generative models for molecules in drug discovery. Journal of chemical information and modeling, 58(9):1736-1741, 2018.
222
+ [46] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. The Journal of Machine Learning Research, 21(1):5485-5551, 2020.
223
+ [47] Kohulan Rajan, Achim Zielesny, and Christoph Steinbeck. Stout: Smiles to iupac names using neural machine translation. Journal of Cheminformatics, 13(1):1-14, 2021.
224
+ [48] Mayk Caldas Ramos, Shane S Michtavy, Marc D Porosoff, and Andrew D White. Bayesian optimization of catalysts with in-context learning. arXiv preprint arXiv:2304.05341, 2023.
225
+ [49] David Ratcliff, John W.; Metzener. Pattern matching: The gestalt approach, 1988.
226
+ [50] Brandon J Reizman, Yi-Ming Wang, Stephen L Buchwald, and Klavs F Jensen. Suzuki-miyaura cross-coupling optimization enabled by automated feedback. Reaction chemistry & engineering, 1(6):658-666, 2016.
227
+ [51] Mandana Saebi, Bozhao Nan, John E Herr, Jessica Wahlers, Zhichun Guo, Andrzej M Zurański, Thierry Kogej, Per-Ola Norrby, Abigail G Doyle, Nitesh V Chawla, et al. On the use of real-world datasets for reaction yield prediction. Chemical Science, 2023.
228
+
229
+ [52] Victor Sanh, Albert Webson, Colin Raffel, Stephen H Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Teven Le Scao, Arun Raja, et al. Multitask prompted training enables zero-shot task generalization. arXiv preprint arXiv:2110.08207, 2021.
230
+ [53] Nadine Schneider, Nikolaus Stiefl, and Gregory A Landrum. What's what: The (nearly) definitive guide to reaction role assignment. Journal of chemical information and modeling, 56 (12):2336-2346, 2016.
231
+ [54] Philippe Schwaller, Teodoro Laino, Théophile Gaudin, Peter Bolgar, Christopher A Hunter, Costas Bekas, and Alpha A Lee. Molecular transformer: a model for uncertainty-calibrated chemical reaction prediction. ACS central science, 5(9):1572-1583, 2019.
232
+ [55] Taffee T Tanimoto. Elementary mathematical theory of classification and prediction. Journal of Biomedical Science and Engineering, 1958.
233
+ [56] Ross Taylor, Marcin Kardas, Guillem Cucurull, Thomas Scialom, Anthony Hartshorn, Elvis Saravia, Andrew Poulton, Viktor Kerkez, and Robert Stojnic. Galactica: A large language model for science. arXiv preprint arXiv:2211.09085, 2022.
234
+ [57] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.
235
+ [58] Hongwei Wang, Weijiang Li, Xiaomeng Jin, Kyunghyun Cho, Heng Ji, Jiawei Han, and Martin D Burke. Chemical-reaction-aware molecule representation learning. arXiv preprint arXiv:2109.09888, 2021.
236
+ [59] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. Chain of thought prompting elicits reasoning in large language models. arXiv preprint arXiv:2201.11903, 2022.
237
+ [60] David Weininger. Smiles, a chemical language and information system. 1. introduction to methodology and encoding rules. J. Chem. Inf. Comput. Sci., 28:31-36, 1988.
238
+ [61] David Weininger, Arthur Weininger, and Joseph L. Weininger. Smiles. 2. algorithm for generation of unique smiles notation. J. Chem. Inf. Comput. Sci., 29:97-101, 1989.
239
+ [62] A.D. White. The future of chemistry is language., 2023.
240
+ [63] Andrew D. White, Glen M. Hocky, Heta A. Gandhi, Mehrad Ansari, Sam Cox, Geemi P. Wellawatte, Subarna Sasmal, Ziyue Yang, Kangxin Liu, Yuvraj Singh, and Willmor J. Peña Ccoa. Assessment of chemistry knowledge in large language models that generate code. Digital Discovery, 2:368-376, 2023. doi: 10.1039/D2DD00087C. URL http://dx.doi.org/10.1039/D2DD00087C.
241
+ [64] Genta Indra Winata, Samuel Cahyawijaya, Zihan Liu, Zhaojiang Lin, Andrea Madotto, and Pascale Fung. Are multilingual models effective in code-switching? arXiv preprint arXiv:2103.13309, 2021.
242
+ [65] Zhenqin Wu, Bharath Ramsundar, Evan N Feinberg, Joseph Gomes, Caleb Geniesse, Aneesh S Pappu, Karl Leswing, and Vijay Pande. Moleculenet: a benchmark for molecular machine learning. Chemical science, 9(2):513-530, 2018.
243
+ [66] Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023.
244
+
245
+ # Appendix
246
+
247
+ # A Name Prediction
248
+
249
+ For one molecule, there are different chemical naming conventions and representations such as SMILES, IUPAC names, and graphic molecular formula. To investigate whether GPT models have the basic chemical name understanding ability, we construct four chemical name prediction tasks that include SMILES to IUPAC name translation (smiles2iupac), IUPAC name to SMILES translation (iupac2smiles), SMILES to molecule formula translation (smiles2formula), and IUPAC name to molecule formula translation (iupac2formula). We collect 630 molecules and their corresponding names including SMILES, IUPAC name, and molecule formula from PubChem [32]. We randomly sample 500 molecules as the ICL candidates, and other 30 molecules as the validation set, and other 100 molecules as the test set. For all name translation tasks, we use the exact match accuracy as the metric to evaluate the performance.
250
+
251
+ ICL Prompt. One example of the smiles2iupac prediction is shown in Figure 5. For other name translation tasks, we only change the underlined parts that represent different tasks and their corresponding input names and output names.
252
+
253
+ ![](images/e7ba835382d19c7046dc62e44a672643ee9f0427951871d19ca7279af1863de0.jpg)
254
+ Figure 5: An ICL prompt example for smiles2iupac prediction
255
+
256
+ Results. The results are reported in Table 4 (we only report representative methods along with their optimal prompt settings via grid search on validation set). In all four name prediction tasks, the accuracy of the best method is extremely low (0.014 in the iupac2smiles task, 0.086 in the smiles2formula task, 0.118 in the iupac2formula task) or even 0 (in the smiles2iupac task). This indicates the LLMs lack basic chemical name understanding ability. The accuracy of Davinci-003 is considerably inferior to other models.
257
+
258
+ Case studies. Example results generated by GPT-4 (Scaffold, $k = 20$ ) method for each task is shown in Table 5. In all tasks, the GPT-4 model gives the wrong answers. In the smiles2formula task, we can observe that GPT models cannot even recognize the number of Carbon and infer the correct number of Hydrogen, demonstrating the bad chemical understanding ability of GPT models. For prospects, some pre-training technologies such as wrapping molecules with text [38] or code-switch [64, 20] may be helpful to align different chemical names of the same molecule to help improve LLMs' chemical understanding.
259
+
260
+ Table 4: The accuracy $(\uparrow)$ of LLMs in 4 different name prediction tasks. The best LLM is in bold font. Here $k$ is the number of examples used in few-shot ICL. The baseline is underlined and "-" indicates that STOUT cannot solve the smiles2formula and iupac2formula tasks.
261
+
262
+ <table><tr><td>Method</td><td>smiles2iupac</td><td>iupac2smiles</td><td>smiles2formula</td><td>iupac2formula</td></tr><tr><td>STOUT [47]</td><td>0.55</td><td>0.7</td><td>-</td><td>-</td></tr><tr><td>GPT-4 (zero-shot)</td><td>0</td><td>0.008±0.008</td><td>0.048±0.022</td><td>0.092±0.018</td></tr><tr><td>GPT-4 (Scaffold, k=5)</td><td>0</td><td>0.014±0.009</td><td>0.058±0.015</td><td>0.118±0.022</td></tr><tr><td>GPT-4 (Scaffold, k=20)</td><td>0</td><td>0.012±0.004</td><td>0.086±0.036</td><td>0.084±0.005</td></tr><tr><td>GPT-4 (Random, k=20)</td><td>0</td><td>0.010±0.007</td><td>0.070±0.032</td><td>0.076±0.011</td></tr><tr><td>GPT-3.5 (Scaffold, k=20)</td><td>0</td><td>0.010±0.000</td><td>0.052±0.004</td><td>0.044±0.009</td></tr><tr><td>Davinci-003 (Scaffold, k=20)</td><td>0</td><td>0</td><td>0.006±0.005</td><td>0.018±0.004</td></tr><tr><td>Llama2-13B-chat (Scaffold, k=20)</td><td>0</td><td>0</td><td>0.010±0.007</td><td>0</td></tr><tr><td>GAL-30B (Scaffold, k=10)</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table>
263
+
264
+ Table 5: Example results generated by GPT-4 (Scaffold, $k = {20}$ ) method for different tasks
265
+
266
+ <table><tr><td>Task</td><td>Input</td><td>Ground Truth</td><td>Output of GPT-4 (Scaffold, k=20)</td></tr><tr><td>smiles2iupac</td><td>CCOC(=O)C(C=C)=O=C(C)N</td><td>ethyl 2-acetyl-3-aminobut-2-enoate</td><td>ethyl 2-methyl-5-oxo-2-azahept-4-en-3-oate</td></tr><tr><td>iupac2smiles</td><td>ethyl 2-acetyl-3-aminobut-2-enoate</td><td>CCOC(=O)C(C=C)=O=C(C)N</td><td>CCOC(=O)C=C(C)C(=N)C</td></tr><tr><td>smiles2formula</td><td>Cc1noc(CCn2cc[h]c2=O)n1</td><td>C8H10N4O2</td><td>C9H10N4O2</td></tr><tr><td>iupac2formula</td><td>R)-(1-benzylquinolin-1-ium-4-yl)-(5-ethenyl-1-azabicyclo[2.2.2]octan-2-yl)methanol:chloride</td><td>C26H29CIN2O</td><td>C23H27CIN2O</td></tr></table>
267
+
268
+ # B Molecule Property Prediction
269
+
270
+ Molecule property prediction [21, 58] is a fundamental task in computational chemistry that has been gaining significant attention in recent years due to its potential for drug discovery, material science, and other areas in the chemistry. The task involves using machine learning techniques [22] to predict the chemical and physical properties of a given molecule, based on its molecular structure. We aim to further explore the potential of LLMs in molecular property prediction and assess their performance on a set of benchmark datasets, such as BBBP/MIT license), HIV/MIT license), BACE/MIT license), Tox21(MIT license), and ClinTox(MIT license), which were originally introduced by [65]. The datasets are made up of extensive collections of SMILES, paired with binary labels that highlight the particular property being evaluated, such as BBBP: Blood-Brain Barrier Penetration, HIV: inhibit HIV replication, BACE: bindings results for a set of inhibitors of human beta-secretase, Tox21: toxicity of compounds, and ClinTox: drugs failed clinical trials for toxicity reasons. A comprehensive explanation of these datasets can be referenced in the original research conducted by [65]. For ICL, we either select $k$ samples randomly, or search the top- $k$ most analogous molecules using RDKit [35] to determine the Tanimoto Similarity. However, it is crucial to mention that using the latter method does not assure an even distribution among classes. In our study, we employ a strategic sampling method for two categories of datasets: balanced and highly imbalanced. For balanced datasets, such as BBBP and BACE, we randomly select 30 samples for the validation process and 100 samples for testing from the original dataset. Contrastingly, for datasets exhibiting substantial label imbalance $(39684:1443\approx 28:1)$ , take HIV datasets as a example), we select samples from the majority and minority classes to achieve a ratio of 4:1. This strategic approach enables us to maintain a representative sample for the evaluation process, despite the original high imbalance in the dataset. To evaluate the results, we use the classification accuracy, as well as $F1$ score as the evaluation metric due to the class imbalance. We benchmark our method against two established baselines from MoleculeNet [65]: RF and XGBoost. Both baselines utilize the 1024-bit circular fingerprint as input to predict the property as a binary classification problem.
271
+
272
+ ICL Prompt. Figure 6 illustrates a sample of our ICL prompt for property prediction. Within the task-specific template, we include a detailed explanation of the task forecasting the penetration of the brain-blood barrier to assist LLMs in comprehending the input SMILES from the BBBP dataset. Additionally, we establish certain constraints for the output to conform to the specific characteristics of the property prediction task.
273
+
274
+ Results. The results are reported as F1 in Table 6, accuracy in Table 7. We observed that GPT models outperform the baseline model in terms of F1 on four out of five datasets. In the range of GPT
275
+
276
+ ![](images/a876b5b5864432726a94288b825c436bc547d8c402748773f135ff999f9bd681.jpg)
277
+ Figure 6: An ICL prompt example for property prediction
278
+
279
+ Table 6: F1 (↑) score of LLMs and baseline in molecular property prediction tasks. $k$ is the number of examples used in few-shot ICL. The best GPT model is in bold font, and the baseline is underlined.
280
+
281
+ <table><tr><td></td><td>BBBP</td><td>BACE</td><td>HIV</td><td>Tox21</td><td>ClinTox</td></tr><tr><td>RF</td><td>0.881</td><td>0.758</td><td>0.518</td><td>0.260</td><td>0.461</td></tr><tr><td>XGBoost</td><td>0.897</td><td>0.765</td><td>0.551</td><td>0.333</td><td>0.620</td></tr><tr><td>GPT-4 (zero-shot)</td><td>0.560 ± 0.034</td><td>0.322±0.018</td><td>0.977±0.013</td><td>0.489±0.018</td><td>0.555±0.043</td></tr><tr><td>GPT-4 (Scaffold, k= 4)</td><td>0.498 ± 0.028</td><td>0.516 ± 0.024</td><td>0.818 ± 0.015</td><td>0.444 ± 0.004</td><td>0.731 ± 0.035</td></tr><tr><td>GPT-4 (Scaffold, k= 8)</td><td>0.587±0.018</td><td>0.666±0.023</td><td>0.797 ± 0.021</td><td>0.563±0.008</td><td>0.736±0.033</td></tr><tr><td>GPT-4 (random, k= 8)</td><td>0.469 ± 0.025</td><td>0.504 ± 0.020</td><td>0.994 ± 0.006</td><td>0.528±0.003</td><td>0.924±0.000</td></tr><tr><td>GPT-3.5 (Scaffold, k= 8)</td><td>0.463 ± 0.008</td><td>0.406 ± 0.011</td><td>0.807 ± 0.021</td><td>0.529 ± 0.021</td><td>0.369 ± 0.029</td></tr><tr><td>Davinci-003 (Scaffold, k= 8)</td><td>0.378 ± 0.024</td><td>0.649 ± 0.021</td><td>0.832 ± 0.020</td><td>0.518±0.009</td><td>0.850 ± 0.020</td></tr><tr><td>Llama2-13B-chat (Scaffold, k= 8)</td><td>0.002 ± 0.001</td><td>0.045 ± 0.015</td><td>0.069 ± 0.033</td><td>0.047 ± 0.013</td><td>0.001 ± 0.003</td></tr><tr><td>GAL-30B (Scaffold, k= 8)</td><td>0.074 ± 0.019</td><td>0.025 ± 0.013</td><td>0.014 ± 0.016</td><td>0.077 ± 0.046</td><td>0.081 ± 0.015</td></tr></table>
282
+
283
+ Table 7: Accuracy $(\uparrow)$ of LLMs and baseline in molecular property prediction tasks. $k$ is the number of examples used in few-shot ICL. The best GPT model is in bold font, and the baseline is underlined.
284
+
285
+ <table><tr><td></td><td>BBBP</td><td>BACE</td><td>HIV</td><td>Tox21</td><td>ClinTox</td></tr><tr><td>RF</td><td>0.820</td><td>0.790</td><td>0.870</td><td>0.830</td><td>0.858</td></tr><tr><td>XGBoost</td><td>0.850</td><td>0.810</td><td>0.870</td><td>0.840</td><td>0.888</td></tr><tr><td>GPT-4 (zero-shot)</td><td>0.476 ± 0.036</td><td>0.499 ± 0.005</td><td>0.986 ± 0.007</td><td>0.518 ± 0.018</td><td>0.736 ± 0.027</td></tr><tr><td>GPT-4 (Scaffold, k= 4)</td><td>0.516 ± 0.022</td><td>0.514 ± 0.205</td><td>0.834 ± 0.014</td><td>0.457 ± 0.004</td><td>0.856 ± 0.014</td></tr><tr><td>GPT-4 (Scaffold, k= 8)</td><td>0.614±0.016</td><td>0.679±0.205</td><td>0.836 ± 0.020</td><td>0.737 ± 0.004</td><td>0.856 ± 0.014</td></tr><tr><td>GPT-4 (random, k= 8)</td><td>0.610 ± 0.021</td><td>0.588 ± 0.023</td><td>0.996±0.004</td><td>0.874±0.003</td><td>0.930±0.010</td></tr><tr><td>GPT-3.5 (Scaffold, k= 8)</td><td>0.463 ± 0.007</td><td>0.496 ± 0.016</td><td>0.864 ± 0.018</td><td>0.572 ± 0.026</td><td>0.578 ± 0.029</td></tr><tr><td>Davinci-003 (Scaffold, k= 8)</td><td>0.396 ± 0.023</td><td>0.650 ± 0.021</td><td>0.781 ± 0.004</td><td>0.682 ± 0.006</td><td>0.845 ± 0.010</td></tr><tr><td>Llama2-13B-chat (Scaffold, k= 8)</td><td>0.002 ± 0.003</td><td>0.048 ± 0.017</td><td>0.048 ± 0.025</td><td>0.053 ± 0.011</td><td>0.002 ± 0.004</td></tr><tr><td>GAL-30B (Scaffold, k= 8)</td><td>0.062 ± 0.007</td><td>0.020 ± 0.010</td><td>0.012 ± 0.009</td><td>0.030 ± 0.018</td><td>0.099 ± 0.007</td></tr></table>
286
+
287
+ models examined, GPT-4 surpasses both Davinci-003 and GPT-3.5 in predicting molecular properties. In our investigation, we have found evidence to support that the expansion of in-context learning (ICL) instances leads to a measurable enhancement in model performance. This underlines a direct relationship between the extent of ICL data and the predictive precision of our models. Concurrently, our research presents empirical evidence that scaffold sampling exceeds the performance of random
288
+
289
+ sampling on three distinct datasets (BBBP, BACE, Tox21). A plausible explanation for this could be the structural resemblances between the scaffold-sampled molecules and the query molecule, which potentially biases the GPT models towards more accurate decision.
290
+
291
+ Label interpretation. The results presented in Table 6 and Table 7 indicate that the GPT-4 model selectively outperforms the baseline models on the HIV and ClinTox datasets. This superior performance likely stems from the inclusion of information directly related to the labels within the ICL prompts. Specifically, in the HIV dataset, the activity test results play a crucial role. Molecules tend to inhibit HIV replication when the activity test is categorized as "confirmed active" or "confirmed moderately active." For the ClinTox dataset, the FDA approval status of a molecule acts as a predictor of its clinical toxicity. A molecule not having FDA approval is more likely to be clinically toxic. In experiments where we excluded this contextual information from the in-context learning prompts, the F1 and accuracy score of predictions notably declined, as evident from the results in Table 8 and Table 9.
292
+
293
+ Table 8: Impact to F1 score of removing label context information from the in-context learning prompts.
294
+
295
+ <table><tr><td>F1(↑)</td><td>HIV</td><td>ClinTox</td></tr><tr><td>GPT-4(zero-shot)</td><td>0.977 ± (0.013)</td><td>0.489 ± (0.018)</td></tr><tr><td>GPT-4(unlabelled, zero-shot)</td><td>0.554 ± (0.017)</td><td>0.438 ± (0.045)</td></tr><tr><td>GPT-4(few-shot)</td><td>0.797 ± (0.021)</td><td>0.563 ± (0.008)</td></tr><tr><td>GPT-4(unlabelled, few-shot)</td><td>0.493 ± (0.030)</td><td>0.478 ± (0.035)</td></tr></table>
296
+
297
+ Table 9: Impact to accuracy of removing label context information from the in-context learning prompts.
298
+
299
+ <table><tr><td>Accuracy(↑)</td><td>HIV</td><td>ClinTox</td></tr><tr><td>GPT-4(zero-shot)</td><td>0.986 ± (0.070)</td><td>0.736 ± (0.027)</td></tr><tr><td>GPT-4(unlabelled, zero-shot)</td><td>0.628 ± (0.016)</td><td>0.602 ± (0.039)</td></tr><tr><td>GPT-4(few-shot)</td><td>0.836 ± (0.020)</td><td>0.856 ± (0.014)</td></tr><tr><td>GPT-4(unlabelled, few-shot)</td><td>0.541 ± (0.032)</td><td>0.630 ± (0.014)</td></tr></table>
300
+
301
+ # C Yield Prediction
302
+
303
+ Yield prediction [51] is a critical task in chemistry, specifically in the domain of synthetic chemistry, which involves the design and synthesis of new compounds for various applications, such as pharmaceuticals, materials, and catalysts. The yield prediction task aims to estimate the efficiency and effectiveness of a chemical reaction, primarily by quantifying the percentage of the desired product formed from the reactants. We use two High-Throughput experimentation (HTE) datasets: Buchwald-Hartwig [1] (MIT license) and Suzuki-Miyaura dataset [50] (MIT license) for evaluation. These datasets consist of reactions and their corresponding yields, which have been meticulously acquired through standardized and consistent experimental setups. This uniformity ensures that the data within each dataset is coherent, reducing the likelihood of discrepancies arising from variations in experimental procedures or conditions. We formulate the task of yield prediction as a binary classification problem, by determining whether a reaction is a high-yielding reaction or not. We used only random sampling for our ICL examples as reactions in those datasets belong to the same type. For every dataset, we randomly select 30 samples for the validation process and 100 samples for testing from the original dataset. To evaluate the results, we use the classification accuracy as the evaluation metric, with UAGNN [34] serving as baseline. UAGNN reports state-of-the-art performance on yield prediction. It takes the graphs of reactants and products as input, and learns representation of these molecules through a graph neural network, and then predicts the scaled yield.
304
+
305
+ ICL prompt. We show our ICL prompt for yield prediction with an example from Buchwald-Hartwig dataset. As described in Figure 7, we incorporate an input explanation (wherein the reactants are separated by ‘.’ and the products are split by ‘ $>>$ ’) to assist large language models. Additionally, output restrictions are enforced to ensure the generation of valid results.
306
+
307
+ ![](images/4b85962d244ea72be63bc15ab61501e2cb92cae1aaf1847a08b96975f1b0fdf5.jpg)
308
+ Figure 7: An ICL prompt example for yield prediction
309
+
310
+ Results. The results are presented in Table 10. Our analysis reveals that in the task of yield prediction, GPT models perform below the established baseline model, UAGNN. However, it's worth noting that the UAGNN model was trained on the full training dataset including thousands of examples. Considering the spectrum of GPT models under scrutiny, GPT-4 emerges as the superior model, overshadowing both Davinci-003 and GPT-3.5 in predicting reaction yields. In the process of our investigation, we unearthed supporting evidence that signifies the role of ICL instances in the enhancement of model performance. This suggests an inherent correlation between the quantity of ICL data and the predictive accuracy of the models under consideration. This phenomenon is particularly in the case of GPT-4, we observed a significant improvement in performance when the number of ICL examples was increased from 4 to 8, both in the Buchwald-Hartwig and Suzuki-coupling reactions. This indicates that even within the same model architecture, the amount of contextual data can significantly influence the predictive capabilities.
311
+
312
+ Table 10: Accuracy $(\uparrow)$ of yield prediction task. $k$ is the number of examples used in few-shot ICL. The best LLM is in bold font, and the baseline is underlined.
313
+
314
+ <table><tr><td></td><td>Buchwald-Hartwig</td><td>Suzuki-coupling</td></tr><tr><td>UAGNN [34]</td><td>0.965</td><td>0.957</td></tr><tr><td>GPT-4 (zero-shot)</td><td>0.322 ± 0.034</td><td>0.214 ± 0.019</td></tr><tr><td>GPT-4 (random, k=8)</td><td>0.800±0.008</td><td>0.764±0.013</td></tr><tr><td>GPT-4 (random, k=4)</td><td>0.574 ± 0.045</td><td>0.324 ± 0.018</td></tr><tr><td>GPT-3.5 (random, k=8)</td><td>0.585 ± 0.045</td><td>0.542 ± 0.011</td></tr><tr><td>Davinci-003 (random, k=8)</td><td>0.467 ± 0.013</td><td>0.341 ± 0.017</td></tr><tr><td>Llama2-13B-chat</td><td>0.008 ± 0.007</td><td>0.006 ± 0.004</td></tr><tr><td>GAL-30B</td><td>0</td><td>0.008 ± 0.010</td></tr></table>
315
+
316
+ # D Reaction Prediction
317
+
318
+ Reaction prediction is a central task in the field of chemistry, with significant implications for drug discovery, materials science, and the development of novel synthetic routes. Given a set of reactants, the goal of this task is to predict the most likely products formed during a chemical reaction [54, 13, 19]. In this task, we use the widely adopted USPTO-MIT dataset [29](MIT license) to evaluate the performance of GPT models. This dataset contains approximately 470,000 chemical reactions extracted from US patents. In the experiment, we used the USPTO mixed data set, where the reactants and reagents strings are split by ‘:’. We randomly sampled 30 samples from the original validation set for validation and 100 samples from the original test set for testing. We use the Top-1 Accuracy as the evaluation metric and Chemformer [26] as the baseline due to its superior performance among the machine learning solutions for reaction prediction. Chemformer is a seq2seq model trained to predict the output product when given reactants and reagents as input. We also report the percentage of invalid SMILES generated by each method.
319
+
320
+ ![](images/e3ff5fe24bd7ebc5f1c4426c03726e37cc011e8598ddf1f8e0433090a04ea0ff.jpg)
321
+ Figure 8: An ICL prompt example for reaction prediction
322
+
323
+ ICL Prompt. One example of our ICL prompt for reaction prediction is shown in Figure 8. Given the nature of the reaction prediction task and the characteristics of the USPTO-MIT dataset, we enhance the task-specific template with an input explanation (stating that the input includes reactants and reagents, which are separated by ‘.’) to assist the GPT models in understanding the input SMILES. Moreover, we incorporate output restrictions to guide GPT models in generating chemically valid and reasonable products.
324
+
325
+ Table 11: The performance of LLMs and baseline in the reaction prediction task. $k$ is the number of examples used in few-shot ICL. The best LLM is in bold font, and the baseline is underlined.
326
+
327
+ <table><tr><td>Method</td><td>Top-1 Accuracy (↑)</td><td>Invalid SMILES (↓)</td></tr><tr><td>Chemformer [26]</td><td>0.938</td><td>0%</td></tr><tr><td>GPT-4 (zero-shot)</td><td>0.004 ± 0.005</td><td>17.4% ± 3.9%</td></tr><tr><td>GPT-4 (Scaffold, k=20)</td><td>0.230 ± 0.022</td><td>7.0% ± 1.6%</td></tr><tr><td>GPT-4 (Random, k=20)</td><td>0.012 ± 0.008</td><td>8.4% ± 1.5%</td></tr><tr><td>GPT-4 (Scaffold, k=5)</td><td>0.182 ± 0.015</td><td>6.6% ± 1.5%</td></tr><tr><td>GPT-3.5 (Scaffold, k=20)</td><td>0.184 ± 0.005</td><td>15.6% ± 2.3%</td></tr><tr><td>Davinci-003 (Scaffold, k=20)</td><td>0.218 ± 0.008</td><td>11.4% ± 2.7%</td></tr><tr><td>Llama2-13B-chat (Scaffold, k=20)</td><td>0.032 ± 0.013</td><td>27.8% ± 5.5%</td></tr><tr><td>GAL-30B (Scaffold, k=5)</td><td>0.036 ± 0.011</td><td>5.2% ± 1.5%</td></tr></table>
328
+
329
+ Results. The results are reported in Table 11. We can observe that compared to the baseline, the performance of GPT models is considerably inferior, especially for the Zero-shot prompting (Top-1 Accuracy is only 0.004 and it generates $17.4\%$ invalid SMILES). The less competitive results of GPT models can be attributed to the lack of in-depth understanding of the SMILES strings that represent reactants and products, as well as the reaction process that transforms reactants into products. It is also worth mentioning that the high accuracy achieved by Chemformer is due to its training on the complete dataset. More conclusions and detailed analysis are summarized in the section 5.
330
+
331
+ # E Reagents Selection
332
+
333
+ Reagents selection, also known as reagent recommendation, involves the identification and proposal of the most fitting reagents for a specific chemical reaction or process. Compared to other prediction and generation tasks, these selection tasks might be more fitting for LLMs and carry extensive implications. Reagent recommendation can markedly enhance reaction design by pinpointing optimal reagents and conditions for a given reaction, thereby augmenting efficiency and effectiveness in both academic and industrial settings. Drawing from a vast corpus of chemical knowledge, GPT models may be able to generate suggestions, leading to chemical reactions with a greater likelihood of yielding superior results.
334
+
335
+ In this study, we formulate four reaction component selection task from the Suzuki High-Throughput Experimentation (HTE) dataset. The dataset, created by Perera et al[44](MIT license), evaluates the Suzuki coupling of 5 electrophiles and 7 nucleophiles across a matrix of 11 ligands (with one blank), 7 bases (with one blank), and 4 solvents, resulting in a reaction screening dataset comprising 5,760 data points. The task of reagents selection can be divided into three categories: Reactant selection, Ligand Selection and Solvent selection. For validation, 30 examples were randomly sampled, while 100 examples were used for testing, all taken from the original datasets. Top-1 Accuracy serves as the assessment metric for both reactant and solvent selection, while Top-50% is utilized for ligand selection, as the upper half of the ligands in the list typically provide satisfactory yields in chemical reactions. This task is newly emergent in the field of chemistry, and as such, there are no established baselines yet.
336
+
337
+ ICL prompt. One example of our ICL prompt for reagents selection is shown in Figure 9. Considering the structure of the dataset and the characteristics of the reagents, we provide detailed task description and an answer template to guide GPT models towards the desired output.
338
+
339
+ Results. Our results are presented in Table 12. From the table, it is evident that GPT-4 and GPT-3.5 perform comparatively well in reagent selection tasks. This suggests a promising potential for GPT models in the realm of reagent selection.
340
+
341
+ Table 12: Accuracy $(\uparrow)$ of LLM in the reagent selection tasks. For Reactant Selection and Solvent selection task, we report the mean (and standard deviation) of the Top-1 Accuracy score and we report the Top-50% accuracy score for the Ligand Selection task. The best LLM is in bold font, and the baseline is underlined.
342
+
343
+ <table><tr><td></td><td>Reactant Selection</td><td>Solvent Selection</td><td>Ligand Selection</td></tr><tr><td>GPT-4 (zero-shot)</td><td>0.299 ± 0.029</td><td>0.526±0.012</td><td>0.534±0.059</td></tr><tr><td>GPT-3.5 (zero-shot)</td><td>0.400±0.038</td><td>0.368±0.034</td><td>0.436 ± 0.020</td></tr><tr><td>Davinci-003 (zero-shot)</td><td>0.178 ± 0.034</td><td>0.463 ± 0.014</td><td>0.432 ± 0.020</td></tr><tr><td>Llama2-13B-chat (zero-shot)</td><td>0.145 ± 0.000</td><td>0.050 ± 0.010</td><td>0.284 ± 0.024</td></tr><tr><td>GAL-30B (zero-shot)</td><td>0.107 ± 0.020</td><td>0.104 ± 0.004</td><td>0.030 ± 0.016</td></tr></table>
344
+
345
+ # F Retrosynthesis
346
+
347
+ Retrosynthesis planning is a crucial task in synthetic organic chemistry that involves identifying efficient synthetic pathways for a target molecule by recursively transforming it into simpler precursor molecules. In contrast to reaction prediction, retrosynthesis planning involves a reverse extrapolation from the target molecule to identify the readily available reactants for its synthesis. In this study, we use the USPTO-50k dataset [53](MIT license), which contains 50,037 chemical reactions. In our
348
+
349
+ ![](images/9c0fb3296c6bad8b183163a3e38bdb91149281a16bc4074221cb31b2262e48ed.jpg)
350
+ Figure 9: An ICL prompt example for reagents selection
351
+
352
+ experiment, we use the data splitting as [17] and we the training set which contains 40,029 reactions as the ICL candidates. The metric and baseline are the same as the reaction prediction.
353
+
354
+ ICL Prompt. One example of our ICL prompt for reaction prediction is shown in Figure 10. As discussed in the reaction prediction task, we also add the task-specific template to help GPT models understand the input and restrict the output.
355
+
356
+ ![](images/4f7672d5851517771e502745d235556db759f54bfc3c13618abb3ef707e2c07f.jpg)
357
+ Figure 10: An ICL prompt example for Retrosynthesis
358
+
359
+ Table 13: The performance of LLMs and baseline in Retrosynthesis task. The best LLM is in bold font, and the baseline is underlined.
360
+
361
+ <table><tr><td>Method</td><td>Top-1 Accuracy (↑)</td><td>Invalid SMILES (↓)</td></tr><tr><td>Chemformer [26]</td><td>0.536</td><td>0%</td></tr><tr><td>GPT-4 (zero-shot)</td><td>0.006 ± 0.005</td><td>20.6% ± 4.7%</td></tr><tr><td>GPT-4 (Scaffold, k=20)</td><td>0.096 ± 0.013</td><td>10.4% ± 3.4%</td></tr><tr><td>GPT-4 (Scaffold, k=5)</td><td>0.114 ± 0.013</td><td>11.0% ± 1.2%</td></tr><tr><td>GPT-4 (Random, k=20)</td><td>0.012 ± 0.011</td><td>18.2% ± 4.2%</td></tr><tr><td>GPT-3.5 (Scaffold, k=20)</td><td>0.022 ± 0.004</td><td>6.4% ± 1.3%</td></tr><tr><td>Davinci-003 (Scaffold, k=20)</td><td>0.122 ± 0.013</td><td>6.0% ± 1.2%</td></tr><tr><td>Llama2-13B-chat (Scaffold, k=20)</td><td>0</td><td>27.2% ± 1.5%</td></tr><tr><td>GAL-30B (Scaffold, k=5)</td><td>0.016 ± 0.005</td><td>5.2% ± 1.8%</td></tr></table>
362
+
363
+ Results. The results are reported in Table 13. The performance of GPT models is also inferior than the baseline due to the lack of an in-depth understanding of the SMILES strings that represent reactants and products. Detailed analysis are summarized in the later section 5 Discussion.
364
+
365
+ # G Text-Based Molecule Design
366
+
367
+ Text-Based Molecule Design is a novel task in computational chemistry and drug discovery. It involves generating new molecules with desired molecule descriptions. In our experiment, we employ the ChEBI-20 dataset which consists of 33,010 molecule-description pairs. The dataset is split into $80/10/10\%$ as the training/validation/test set [17](CC BY 4.0). We use the training set which contains 26407 molecule-description pairs as the ICL candidates. For comparison, we use the MolT5-Large [17] as the baseline. MolT5-Large is the initial effort to investigate the translation between molecules and text, including tasks such as text-based molecule design and molecule captioning. It builds upon T5 [46], an encoder-decoder Transformer model, and benefits from pretraining on a large amount of dataset. To comprehensively evaluate the performance, we employ three different types of metrics. The first type of metric is the chemical similarity between the ground-truth molecules and generated molecules, measured by FTS (fingerprint Tanimoto Similarity) [55] in terms of MACCS [49], RDK [35], and Morgan [14]. Secondly, we also use FCD (Fréchet ChemNet Distance) [45] which allows comparing molecules based on the latent information used to predict the activity of molecules [17]. Since the generated molecules are in SMILES string format, we also employ natural language processing metrics including BLEU, Exact Match [17], and Levenshtein distance [40] between the ground-truth molecules and generated molecules SMILES. Finally, to evaluate whether generated molecules are valid, we use RDKIT [35] to check the validity of generated molecules and report the percent of the valid molecules.
368
+
369
+ ICL Prompt. One ICL prompt example for text-based molecule design is shown in Figure 11.
370
+
371
+ Table 14: The performance of LLMs and baseline in the Text-Based Molecule Design task. The best LLM is in bold font and the baseline is underlined.
372
+
373
+ <table><tr><td>Method</td><td>BLEU (↑)</td><td>Exact (↑)</td><td>Levenshtein (↓)</td><td>Validity (↑)</td><td>MACCS FTS (↑)</td><td>RDK FTS (↑)</td><td>Morgan FTS (↑)</td><td>FCD (↓)</td></tr><tr><td>MolT5-Large [17]</td><td>0.601</td><td>0.290</td><td>41.600</td><td>0.940</td><td>0.879</td><td>0.797</td><td>0.752</td><td>5.394</td></tr><tr><td>GPT-4(zero-shot)</td><td>0.490±0.017</td><td>0.046±0.009</td><td>47.418±1.668</td><td>0.758±0.015</td><td>0.733±0.020</td><td>0.514±0.021</td><td>0.432±0.014</td><td>11.913±0.972</td></tr><tr><td>GPT-4(Scaffold, k=10)</td><td>0.816±0.004</td><td>0.174±0.029</td><td>21.160±0.600</td><td>0.888±0.023</td><td>0.867±0.005</td><td>0.738±0.010</td><td>0.672±0.013</td><td>6.224±0.449</td></tr><tr><td>GPT-4(Scaffold, k=5)</td><td>0.815±0.011</td><td>0.164±0.018</td><td>21.862±1.768</td><td>0.874±0.030</td><td>0.865±0.015</td><td>0.741±0.023</td><td>0.670±0.028</td><td>5.843±0.515</td></tr><tr><td>GPT-4(Random, k=10)</td><td>0.602±0.016</td><td>0.060±0.007</td><td>42.390±1.008</td><td>0.770±0.030</td><td>0.762±0.013</td><td>0.548±0.017</td><td>0.475±0.015</td><td>10.594±0.414</td></tr><tr><td>GPT-3.5(Scaffold, k=10)</td><td>0.479±0.156</td><td>0.094±0.011</td><td>82.008±40.354</td><td>0.854±0.059</td><td>0.833±0.006</td><td>0.686±0.016</td><td>0.585±0.013</td><td>8.341±0.607</td></tr><tr><td rowspan="2">Davinci-003(Scaffold, k=10)Llama2-13B-chat(Scaffold, k=10)</td><td>0.741±0.011</td><td>0.100±0.010</td><td>25.648±2.186</td><td>0.936±0.009</td><td>0.783±0.014</td><td>0.648±0.004</td><td>0.560±0.010</td><td>8.335±0.310</td></tr><tr><td>0.626±0.013</td><td>0.020±0.000</td><td>33.956±2.648</td><td>0.782±0.008</td><td>0.679±0.015</td><td>0.568±0.014</td><td>0.454±0.009</td><td>12.387±0.437</td></tr><tr><td>GAL-30B(zero-shot)</td><td>0.004±0.000</td><td>0.000±0.000</td><td>2738.136±166.093</td><td>0.956±0.011</td><td>0.233±0.011</td><td>0.109±0.006</td><td>0.053±0.002</td><td>35.091±0.774</td></tr></table>
374
+
375
+ ![](images/ca284e19c4b677cdf69846a4689e50ea1a79846abaae44669bd995ac48d81c14.jpg)
376
+ Figure 11: An ICL prompt example for Text-Based Molecule Design
377
+
378
+ Results. The results are reported in Table 14. We can observe that the best ICL prompting GPT models (GPT-4 and Davinci-003) can achieve competitive performance or even outperform the baseline in some metrics (BLEU, Levenshtein). Although the GPT models significantly underperform the baseline in terms of exact match and Morgan FTS metrics, it's important to note that we only utilize a maximum of 10 examples, which is substantially less than the training set (comprising 26,407 training examples) used for the baseline. These results demonstrate the strong few-shot text-based molecule design ability of GPT models. Last, not being exactly the same as the ground truth doesn't necessarily mean it's incorrect, especially in the context of molecular design. The molecules generated by GPT models may still be useful and can serve as alternatives to the ground truth, given they fulfill the requirements described in the input text and a majority (over $89\%$ ) are chemically valid.
379
+
380
+ Case studies. We select three different types of molecules (organic molecule without rings, organic molecule with ring, and metal atom) as examples, and show the generated molecules in Figure 12. We observe that the structure of molecules generated by the GPT-4 (Scaffold, $k = 10$ ) method is more similar to the ground truth compared to Davinci-003, GPT-4 (zero-shot), and even the baseline. Additionally, for metal atoms design, GPT models outperform the baseline which wrongly generates the SMILES instead of the metal atom. These cases show promising results of the molecule design ability of GPT models. However, evaluating whether the generated molecules are helpful such as molecule novelty in real-world scenarios is still a difficult problem. Thus we conclude that GPT models have excellent potential in molecule design and there are prospects for investigating this ability.
381
+
382
+ # H Molecule Captioning
383
+
384
+ Molecule captioning is an important task in computational chemistry, offering valuable insights and applications in various areas such as drug discovery, materials science, and chemical synthesis. Given a molecule as input, the goal of this task is to generate a textual description that accurately describes the key features, properties, and functional groups of the molecule. We also use the ChEBI-20 dataset(CC BY 4.0) and the training set of it as the ICL candidates as discussed in the Text-Based Molecule Design Section. We use traditional captioning metrics including BLEU, ROUGE, and METEOR for evaluation.
385
+
386
+ ICL Prompt. One example of our ICL prompt for molecule captioning is shown in Figure 13.
387
+
388
+ ![](images/7325edeece6b47b7f4928c782f5b936ec1a6eb03fff8848c6ef0e44154afee27.jpg)
389
+ Figure 12: Examples of molecules generated by different models.
390
+
391
+ Results. The results are reported in Table 15. We can observe that the best ICL prompting GPT models (GPT-4 and Davinci-003) can achieve competitive performance or even outperform the baseline in some metrics (BLEU-2 and BLEU-4). This indicates the inspiring capability of the GPT models in the molecule captioning task.
392
+
393
+ ![](images/af62850db125a98deda6dd946c71b9b66668fb6271ca7f9491db936c8124af4f.jpg)
394
+ Figure 13: An ICL prompt example for molecule captioning
395
+
396
+ Case studies. Same as case studies in the Text-Based Molecule Design task, we also select three different types of molecules as examples, and the captions are shown in Figure 14. We observe that although the performance of the baseline is close to GPT models, the captions generated by the baseline contain more descriptions that violate the chemical facts. In contrast, the captions generated by GPT-4 models contain only a few inaccurate descriptions, highlighting the excellent explaining ability of GPT models. This highlights the limitations of applying traditional Natural Language Processing (NLP) evaluation metrics to this task. Therefore, it is necessary to create more suitable evaluation metrics for chemistry-related generation tasks.
397
+
398
+ Table 15: The performance of LLMs and baseline in the molecule captioning task. The best LLM is in bold font and the baseline is underlined.
399
+
400
+ <table><tr><td>Method</td><td>BLEU-2 (↑)</td><td>BLEU-4 (↑)</td><td>ROUGE-1 (↑)</td><td>ROUGE-2 (↑)</td><td>ROUGE-L (↑)</td><td>METEOR (↑)</td></tr><tr><td>MolT5-Large [17]</td><td>0.482</td><td>0.383</td><td>0.574</td><td>0.410</td><td>0.516</td><td>0.530</td></tr><tr><td>GPT-4(zero-shot)</td><td>0.062±0.001</td><td>0.013±0.001</td><td>0.192±0.002</td><td>0.040±0.002</td><td>0.125±0.002</td><td>0.209±0.002</td></tr><tr><td>GPT-4(Scaffold, k=10)</td><td>0.464±0.008</td><td>0.365±0.008</td><td>0.545±0.003</td><td>0.362±0.003</td><td>0.459±0.007</td><td>0.519±0.005</td></tr><tr><td>GPT-4(Scaffold, k=5)</td><td>0.456±0.003</td><td>0.357±0.004</td><td>0.540±0.005</td><td>0.355±0.007</td><td>0.455±0.005</td><td>0.505±0.005</td></tr><tr><td>GPT-4(Random, k=10)</td><td>0.260±0.007</td><td>0.140±0.007</td><td>0.393±0.004</td><td>0.180±0.006</td><td>0.309±0.004</td><td>0.320±0.007</td></tr><tr><td>GPT-3.5(Scaffold, k=10)</td><td>0.468±0.010</td><td>0.368±0.010</td><td>0.534±0.005</td><td>0.355±0.007</td><td>0.457±0.006</td><td>0.497±0.005</td></tr><tr><td>Davinci-003(Scaffold, k=10)</td><td>0.488±0.011</td><td>0.391±0.012</td><td>0.532±0.008</td><td>0.359±0.010</td><td>0.465±0.008</td><td>0.478±0.011</td></tr><tr><td>Llama2-13B-chat(Scaffold, k=10)</td><td>0.197±0.005</td><td>0.140±0.004</td><td>0.331±0.005</td><td>0.193±0.005</td><td>0.265±0.005</td><td>0.372±0.006</td></tr><tr><td>GAL-30B(zero-shot)</td><td>0.008±0.000</td><td>0.002 ± 0.000</td><td>0.019±0.002</td><td>0.004±0.000</td><td>0.015±0.002</td><td>0.043±0.002</td></tr></table>
401
+
402
+ ![](images/d68434188f1469168eb961fbfaf089b0f1c7c4bb9d5ec2dcef24062896ff8854.jpg)
403
+ Figure 14: Examples captions generated by different models. Descriptions that violate chemical facts are marked in grey.
404
+
405
+ # I The comparison of SMILES and SELFIES
406
+
407
+ Table 16: F1 (↑) score of SMILES and SELFIES of GPT-4 model in molecular property prediction tasks.
408
+
409
+ <table><tr><td></td><td>BBBP</td><td>BACE</td><td>HIV</td><td>Tox21</td><td>ClinTox</td></tr><tr><td>SMILES</td><td>0.587 ± 0.018</td><td>0.666 ± 0.023</td><td>0.797 ± 0.021</td><td>0.563 ± 0.008</td><td>0.736 ± 0.033</td></tr><tr><td>SELFIES</td><td>0.541 ± 0.001</td><td>0.601 ± 0.036</td><td>0.784 ± 0.014</td><td>0.478 ± 0.011</td><td>0.654 ± 0.025</td></tr></table>
410
+
411
+ Table 17: Performance of SMILES and SELFIES of GPT-4 model in reaction prediction task.
412
+
413
+ <table><tr><td></td><td>Top-1 Accuracy (↑)</td><td>Invalid SMILES/SELFIES (↓)</td></tr><tr><td>SMILES</td><td>0.230 ± 0.022</td><td>7.0% ± 1.6%</td></tr><tr><td>SELFIES</td><td>0.110 ± 0.007</td><td>1.0% ± 0.0%</td></tr></table>
414
+
415
+ Table 18: Performance of SMILES and SELFIES of GPT-4 model in molecule design task.
416
+
417
+ <table><tr><td></td><td>BLEU (↑)</td><td>Exact (↑)</td><td>Levenshtein (↓)</td><td>Validity (↑)</td><td>MACCS FTS (↑)</td><td>RDK FTS (↑)</td><td>Morgan FTS (↑)</td><td>FCD (↓)</td></tr><tr><td>SMILES</td><td>0.816 ± 0.004</td><td>0.174 ± 0.029</td><td>21.160 ± 0.600</td><td>0.888 ± 0.023</td><td>0.867 ± 0.005</td><td>0.738 ± 0.010</td><td>0.672 ± 0.013</td><td>6.224 ± 0.449</td></tr><tr><td>SELFIES</td><td>0.277 ± 0.009</td><td>0.100 ± 0.016</td><td>76.162 ± 2.229</td><td>0.804 ± 0.022</td><td>0.619 ± 0.010</td><td>0.467 ± 0.018</td><td>0.399 ± 0.017</td><td>13.557 ± 0.224</td></tr></table>
418
+
419
+ Table 19: Performance of SMILES and SELFIES of GPT-4 model in molecule captioning task.
420
+
421
+ <table><tr><td></td><td>BLEU-2 (↑)</td><td>BLEU-4 (↑)</td><td>ROUGE-1 (↑)</td><td>ROUGE-2 (↑)</td><td>ROUGE-L (↑)</td><td>METEOR (↑)</td></tr><tr><td>SMILES</td><td>0.464 ± 0.008</td><td>0.365 ± 0.008</td><td>0.545 ± 0.003</td><td>0.362 ± 0.003</td><td>0.459 ± 0.007</td><td>0.519 ± 0.005</td></tr><tr><td>SELFIES</td><td>0.459 ± 0.012</td><td>0.367 ± 0.010</td><td>0.530 ± 0.007</td><td>0.360 ± 0.005</td><td>0.456 ± 0.005</td><td>0.490 ± 0.007</td></tr></table>
whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f70a3aa00a72d8024417bf807c25d214c5f6e1ec2b55c9c9c1b0904796284bfb
3
+ size 2152728
whatcanlargelanguagemodelsdoinchemistryacomprehensivebenchmarkoneighttasks/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c317b5a0fbe7cc5013893c83181c794649cca87a328b7248a5be68d9bf58813
3
+ size 518318
whatcanwelearnfromunlearnabledatasets/94126f6c-48a3-4232-a5d9-f65f5096c8e7_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bf07a73dc36de62c7c8b41b399955197711010b9f79389dede5cacb4e12aa8c
3
+ size 101975
whatcanwelearnfromunlearnabledatasets/94126f6c-48a3-4232-a5d9-f65f5096c8e7_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1bc1334ec23c95f9b26bcd207017795bf6985b6138fd3634e62189f8bfd3869
3
+ size 119490