SlowGuess commited on
Commit
bc1c735
·
verified ·
1 Parent(s): 0b24f85

Add Batch 76316c23-d7ba-41b2-8503-b1e162b94c58

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/b48b3244-119a-4fd6-bc8d-c5f30e313002_content_list.json +3 -0
  2. fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/b48b3244-119a-4fd6-bc8d-c5f30e313002_model.json +3 -0
  3. fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/b48b3244-119a-4fd6-bc8d-c5f30e313002_origin.pdf +3 -0
  4. fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/full.md +742 -0
  5. fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/images.zip +3 -0
  6. fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/layout.json +3 -0
  7. iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/025dad01-183e-408f-b811-9bb744cef25c_content_list.json +3 -0
  8. iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/025dad01-183e-408f-b811-9bb744cef25c_model.json +3 -0
  9. iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/025dad01-183e-408f-b811-9bb744cef25c_origin.pdf +3 -0
  10. iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/full.md +0 -0
  11. iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/images.zip +3 -0
  12. iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/layout.json +3 -0
  13. kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/2dd01f7a-eb35-4d5f-8538-975c25eab59c_content_list.json +3 -0
  14. kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/2dd01f7a-eb35-4d5f-8538-975c25eab59c_model.json +3 -0
  15. kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/2dd01f7a-eb35-4d5f-8538-975c25eab59c_origin.pdf +3 -0
  16. kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/full.md +669 -0
  17. kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/images.zip +3 -0
  18. kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/layout.json +3 -0
  19. rppgtoolboxdeepremoteppgtoolbox/ccf1513f-6339-48d5-896d-284ac3142418_content_list.json +3 -0
  20. rppgtoolboxdeepremoteppgtoolbox/ccf1513f-6339-48d5-896d-284ac3142418_model.json +3 -0
  21. rppgtoolboxdeepremoteppgtoolbox/ccf1513f-6339-48d5-896d-284ac3142418_origin.pdf +3 -0
  22. rppgtoolboxdeepremoteppgtoolbox/full.md +0 -0
  23. rppgtoolboxdeepremoteppgtoolbox/images.zip +3 -0
  24. rppgtoolboxdeepremoteppgtoolbox/layout.json +3 -0
  25. trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/ab71f100-7bbb-444d-9ce5-a23368f05f0c_content_list.json +3 -0
  26. trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/ab71f100-7bbb-444d-9ce5-a23368f05f0c_model.json +3 -0
  27. trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/ab71f100-7bbb-444d-9ce5-a23368f05f0c_origin.pdf +3 -0
  28. trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/full.md +264 -0
  29. trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/images.zip +3 -0
  30. trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/layout.json +3 -0
  31. whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/87c002e3-bc86-4340-8296-e744e39e0d3b_content_list.json +3 -0
  32. whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/87c002e3-bc86-4340-8296-e744e39e0d3b_model.json +3 -0
  33. whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/87c002e3-bc86-4340-8296-e744e39e0d3b_origin.pdf +3 -0
  34. whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/full.md +402 -0
  35. whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/images.zip +3 -0
  36. whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/layout.json +3 -0
  37. xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/6f8bef3c-7b38-4b28-b2b8-1565385fb36c_content_list.json +3 -0
  38. xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/6f8bef3c-7b38-4b28-b2b8-1565385fb36c_model.json +3 -0
  39. xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/6f8bef3c-7b38-4b28-b2b8-1565385fb36c_origin.pdf +3 -0
  40. xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/full.md +262 -0
  41. xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/images.zip +3 -0
  42. xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/layout.json +3 -0
  43. youonlycondenseoncetworulesforpruningcondenseddatasets/dbfde00c-7c4b-48aa-b68d-8b0f07b73866_content_list.json +3 -0
  44. youonlycondenseoncetworulesforpruningcondenseddatasets/dbfde00c-7c4b-48aa-b68d-8b0f07b73866_model.json +3 -0
  45. youonlycondenseoncetworulesforpruningcondenseddatasets/dbfde00c-7c4b-48aa-b68d-8b0f07b73866_origin.pdf +3 -0
  46. youonlycondenseoncetworulesforpruningcondenseddatasets/full.md +387 -0
  47. youonlycondenseoncetworulesforpruningcondenseddatasets/images.zip +3 -0
  48. youonlycondenseoncetworulesforpruningcondenseddatasets/layout.json +3 -0
  49. yourrepresentationsareinthenetworkcomposableandparalleladaptationforlargescalemodels/e5ad109c-cd83-439a-9c26-a0e687ebd28a_content_list.json +3 -0
  50. yourrepresentationsareinthenetworkcomposableandparalleladaptationforlargescalemodels/e5ad109c-cd83-439a-9c26-a0e687ebd28a_model.json +3 -0
fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/b48b3244-119a-4fd6-bc8d-c5f30e313002_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c8344c6b301a25ba30e72f92985d6e7f6f4189b85f2b6b49fb35d7713d7e9f2
3
+ size 155023
fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/b48b3244-119a-4fd6-bc8d-c5f30e313002_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:195324d6946bce2fe6c536bc8e571c70ef3057a082d02ed68fc9a570d8152d46
3
+ size 181382
fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/b48b3244-119a-4fd6-bc8d-c5f30e313002_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c660440b833d38bfc3baab760a848facaceed7adb9526c2135213a66d0f8c06d
3
+ size 3672518
fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/full.md ADDED
@@ -0,0 +1,742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # $f$ -Policy Gradients: A General Framework for Goal Conditioned RL using $f$ -Divergences
2
+
3
+ Siddhant Agarwal
4
+
5
+ The University of Texas at Austin siddhant@cs.utexas.edu
6
+
7
+ Peter Stone
8
+
9
+ The University of Texas at Austin
10
+ Sony AI
11
+ pstone@cs.utexas.edu
12
+
13
+ Ishan Durugkar
14
+
15
+ Sony AI
16
+
17
+ ishan.durugkar@sony.com
18
+
19
+ Amy Zhang
20
+
21
+ The University of Texas at Austin amy.zhang@austin.utexas.edu
22
+
23
+ # Abstract
24
+
25
+ Goal-Conditioned Reinforcement Learning (RL) problems often have access to sparse rewards where the agent receives a reward signal only when it has achieved the goal, making policy optimization a difficult problem. Several works augment this sparse reward with a learned dense reward function, but this can lead to sub-optimal policies if the reward is misaligned. Moreover, recent works have demonstrated that effective shaping rewards for a particular problem can depend on the underlying learning algorithm. This paper introduces a novel way to encourage exploration called $f$ -Policy Gradients, or $f$ -PG. $f$ -PG minimizes the f-divergence between the agent's state visitation distribution and the goal, which we show can lead to an optimal policy. We derive gradients for various f-divergences to optimize this objective. Our learning paradigm provides dense learning signals for exploration in sparse reward settings. We further introduce an entropy-regularized policy optimization objective, that we call state-MaxEnt RL (or $s$ -MaxEnt RL) as a special case of our objective. We show that several metric-based shaping rewards like L2 can be used with $s$ -MaxEnt RL, providing a common ground to study such metric-based shaping rewards with efficient exploration. We find that $f$ -PG has better performance compared to standard policy gradient methods on a challenging gridworld as well as the Point Maze and FetchReach environments. More information on our website https://agarwalsiddhant10.github.io/projects/fpg.html.
26
+
27
+ # 1 Introduction
28
+
29
+ Reinforcement Learning (RL) algorithms aim to identify the optimal behavior (policy) for solving a task by interacting with the environment. The field of RL has made large strides in recent years (Mnih et al., 2013; Silver et al., 2017; Haarnoja et al., 2018; Ouyang et al., 2022; Wurman et al., 2022) and has been applied to complex tasks ranging from robotics (Gupta et al., 2019), protein synthesis (Jumper et al., 2021), computer architecture (Fawzi et al., 2022) and finance (Liu et al., 2021). Goal-Conditioned RL (GCRL) is a generalized form of the standard RL paradigm for learning a policy that can solve many tasks, as long as each task can be defined by a single rewarding goal state. Common examples of goal-conditioned tasks arise in robotics where the goal states can be a target object configuration for manipulation-based tasks (Kim et al., 2022; Gupta et al., 2019; OpenAI et al., 2021) or a target location for navigation-based tasks (Shah et al., 2020; Gervet et al., 2023).
30
+
31
+ In any reinforcement learning setup, the task is conveyed to the agent using rewards (Silver et al., 2021). In goal-conditioned RL settings, a common reward function used is 1 when the goal is
32
+
33
+ achieved and 0 everywhere else. This reward function is sparse and poses a huge learning challenge to obtain the optimal policy without any intermediate learning signal. Prior works (Ng et al., 1999; Ni et al., 2020; Durugkar et al., 2021; Arjona-Medina et al., 2019; Goyal et al., 2019) have augmented the reward function to provide some dense signal for policy optimization. A major issue with augmenting reward functions is that the optimal policy for the new reward function may no longer be optimal under the original, true reward function (Ng et al., 1999). Moreover, it has been shown (Booth et al., 2023) that shaping rewards that improve learning for one learning algorithm may not be optimal for another learning algorithm. Algorithms that learn reward functions (Ni et al., 2020; Durugkar et al., 2021; Zheng et al., 2018) are inefficient because the reward function must first be learned before it can be used for policy optimization. These challenges lead to the following research question: Is there another way to provide dense learning signals for policy optimization other than through dense shaping rewards?
34
+
35
+ In this work, we look at using divergence minimization between the agent's state visitation and the goal distribution (we assume that each goal can be represented as a distribution, Dirac distribution being the simplest) as an objective to provide additional learning signals. Similar perspectives to policy learning has been explored by prior works (Ziebart et al., 2008; Haarnoja et al., 2017, 2018; Ho & Ermon, 2016; Ni et al., 2020; Ghasemipour et al., 2019; Fu et al., 2017), but they reduce their methods into a reward-centric view. MaxEnt RL methods (Ziebart et al., 2008; Haarnoja et al., 2017, 2018) use the distribution over trajectories rather than state visitations and still suffer from sparsity if the task rewards are sparse. Imitation learning works like those of Ho & Ermon (2016); Fu et al. (2017); Ghasemipour et al. (2019) use a variational lower bound to obtain min-max objectives that require discriminators. These objectives suffer from mathematical instabilities and often require coverage assumptions i.e., abundant overlap between the agent's state visitation distribution and goal distribution. Our method does not rely on discriminators nor does it assume state coverage. It provides dense signals to update the policy even when the agent has not seen the goal. These signals push the policy towards higher entropy state visitations until the goal is discovered.
36
+
37
+ Our method, $f$ -PG or $f$ -Policy Gradient, introduces a novel GCRL framework that aims to minimize a general measure of mismatch (the $f$ -divergence) between the agent's state visitation distribution and the goal distribution. We prove that minimizing the $f$ -divergence (for some divergences) recovers the optimal policy. The analytical gradient for the objective looks very similar to a policy gradient which allows us to leverage established methods from the policy gradient literature to come up with an efficient algorithm for goal-conditioned RL. We show the connection of our method to the commonly used metric-based shaping rewards for GCRL like L2 rewards. We show that a special case of $f$ -PG jointly optimizes for maximization of a reward and the entropy of the state-visitation distribution thus introducing state-MaxEnt RL (or s-MaxEnt RL). Using a sparse gridworld, we establish the benefits of using $f$ -PG as a dense signal to explore when the agent has not seen the goal. We also demonstrate that our framework can be extended to continuous state spaces and scale to larger and higher-dimensional state spaces in maze navigation and manipulation tasks.
38
+
39
+ Our key contributions are 1) developing a novel algorithm for goal-conditioned RL that provably produces the optimal policy, 2) connecting our framework to commonly known metric-based shaping rewards, 3) providing a new perspective to RL (s-MaxEnt RL) that focuses on maximizing the entropy of the state-visitation distribution and 4) empirical evidence demonstrating its ability to provide dense learning signals and scale to larger domains.
40
+
41
+ # 2 Background
42
+
43
+ This section goes over the standard goal-conditioned reinforcement learning formulation and the f-divergences that will be used in the rest of the paper.
44
+
45
+ Goal-conditioned reinforcement learning. This paper considers an agent in a goal-conditioned MDP (Puterman, 1990; Kaelbling, 1993). A goal-conditioned MDP is defined as a tuple $\langle S, \mathcal{G}, \mathcal{A}, P, r, \gamma, \mu_0, \rho_g \rangle$ where $S$ is the state space, $\mathcal{A}$ is the action space, $P: S \times \mathcal{A} \longmapsto \Delta(S)$ is the transition probability ( $\Delta(\cdot)$ denotes a probability distribution over a set), $\gamma \in [0,1)$ is the discount factor, $\mu_0$ is the distribution over initial states, $\mathcal{G} \subset S$ is the set of goals, and $\rho_g: \Delta(\mathcal{G})$ is the distribution over goals. At the beginning of an episode, the initial state $s_0$ and the goal $g$ are sampled from the distributions $\mu_0$ and $\rho_g$ . The rewards $r: S \times \mathcal{G} \longmapsto \mathbb{R}$ are based on the state the agent visits and conditioned on the goal specified during that episode. This work focuses on sparse rewards, where $r(s', g) = 1$ when $s' = g$ , and is $r(s', g) = 0$ otherwise. In continuous domains, the equality is relaxed to $s' \in \mathcal{B}(g, r)$ where $\mathcal{B}(g, r)$ represents a ball around the goal $g$ with radius $r$ .
46
+
47
+ A trajectory $\tau$ is defined as the sequence $(s_0, a_0, s_1, \ldots, s_{T-1}, a_{T-1}, s_T)$ . The return $H_g(s)$ is defined as the cumulative undiscounted rewards $H_g(s) := \sum_{t=0}^{T} [r(s_{t+1}, g) | s_0 = s]$ , where $T$ is the length of a trajectory. We will assume the trajectory ends when a maximum number of policy steps $(T)$ have been executed. The agent aims to learn a policy $\pi: \mathcal{S} \times \mathcal{G} \longmapsto \Delta(\mathcal{A})$ that maximises the expected return $\mathbb{E}_{\pi, s_0}[H_g(s_0)]$ . The optimal policy $\pi^* = \arg \max_{\pi_\theta \in \Pi} \mathbb{E}_{\pi, s_0}[H_g(s_0)]$ , where the space of policies $\Pi$ is defined by a set of parameters $\theta \in \Theta$ .
48
+
49
+ Distribution matching approach to goal-conditioned RL. The distribution over goal-conditioned trajectories is defined as $p_{\theta}(\tau; g) = p(s_0)\Pi_{t=0}^{T}p(st|s_{t-1}, a_{t-1})\pi_{\theta}(a_t|s_t; g)$ . The trajectory-dependent state visitation distribution is defined as $\eta_{\tau}(s)$ . It is the number of times the state $s$ is visited in the trajectory $\tau$ . The agent's goal-conditioned state visitation can then be defined as:
50
+
51
+ $$
52
+ \begin{array}{l} p _ {\theta} (s; g) = \frac {\int p _ {\theta} (\tau ; g) \eta_ {\tau} (s) d \tau}{Z} (1) \\ = \frac {\int \Pi p \left(s _ {t + 1} \mid s _ {t} , a _ {t}\right) \pi_ {\theta} \left(a _ {t} \mid s _ {t} ; g\right) \eta_ {\tau} (s)}{\int \int \Pi p \left(s _ {t + 1} \mid s _ {t} , a _ {t}\right) \pi_ {\theta} \left(a _ {t} \mid s _ {t} ; g\right) \eta_ {\tau} (s) d \tau d s} d \tau . (2) \\ \end{array}
53
+ $$
54
+
55
+ The goal $g$ defines an idealized target distribution $p_{g} : \Delta(S)$ , considered here as a Dirac distribution which places all the probability mass at the goal state $p_{g} = \delta(g)$ . Such a formulation has been used previously in approaches to learn goal-conditioned policies (Durugkar et al., 2021). This work focuses on minimizing the mismatch of an agent's goal-conditioned state visitation distribution $p_{\theta}(s; g)$ to this target distribution $p_{g}$ . In this paper, we will be using $p_{\theta}$ and $p_{\pi}$ interchangeably i.e., $p_{\theta}$ corresponds to the visitation distribution induced by policy $\pi$ that is parameterized by $\theta$ .
56
+
57
+ To do so, this paper considers a family of methods that compare the state-visitation distribution induced by a goal-conditioned policy and the ideal target distribution for that goal $g$ , called $f$ -divergences. $f$ -divergences are defined as (Polyanskiy & Wu, 2022),
58
+
59
+ $$
60
+ D _ {f} (P | | Q) = \int_ {P > 0} P (x) f \left(\frac {Q (x)}{P (x)}\right) d x - f ^ {\prime} (\infty) Q ([ P (x) = 0 ]), \tag {3}
61
+ $$
62
+
63
+ where $f$ is a convex function with $f(1) = 0$ . $f^{\prime}(\infty)$ is not defined (is $\infty$ ) for several $f$ -divergences and so it is a common assumption that $Q = 0$ wherever $P = 0$ . Table 1 shows a list of commonly used $f$ -divergences with corresponding $f$ and $f^{\prime}(\infty)$ .
64
+
65
+ <table><tr><td>f-divergence</td><td>Df(P||Q)</td><td>f(u)</td><td>f&#x27;(u)</td><td>f&#x27;(∞)</td></tr><tr><td>FKL</td><td>∫P(x) log P(x)/Q(x)dx</td><td>u log u</td><td>1 + log u</td><td>Undefined</td></tr><tr><td>RKL</td><td>∫Q(x) log Q(x)/P(x)dx</td><td>- log u</td><td>- 1/u</td><td>0</td></tr><tr><td>JS</td><td>1/2 ∫P(x) log 2P(x)/P(x)+Q(x)+Q(x)/P(x)+Q(x)dx + Q(x) log 2Q(x)/P(x)+Q(x)dx</td><td>u log u-(1 + u) log 1+u/2</td><td>log 2u/1+u</td><td>log 2</td></tr><tr><td>χ²</td><td>1/2 ∫Q(x)(P(x)/Q(x) - 1)2dx</td><td>1/2(u - 1)2</td><td>u</td><td>Undefined</td></tr></table>
66
+
67
+ Table 1: Selected list of $f$ -divergences $D_{f}(P||Q)$ with generator functions $f$ and their derivatives $f'$ , where $f$ is convex, lower-semicontinuous and $f(1) = 0$ .
68
+
69
+ # 3 Related Work
70
+
71
+ Shaping Rewards. Our work is related to a separate class of techniques that augment the sparse reward function with dense signals. Ng et al. (1999) proposes a way to augment reward functions without changing the optimal behavior. Intrinsic Motivation (Durugkar et al., 2021; Bellemare et al., 2016; Singh et al., 2010; Barto, 2013) has been an active research area for providing shaping rewards. Some work (Niekum, 2010; Zheng et al., 2018) learn intrinsic or alternate reward functions for the underlying task that aim to improve agent learning performance while others (Durugkar et al., 2021; Ni et al., 2020; Goyal et al., 2019) learn augmented rewards based on distribution matching. AIM (Durugkar et al., 2021) learns a potential-based shaping reward to capture the time-step distance but requires a restrictive assumption about state coverage, especially around the goal while we do not make any such assumption. Recursive classification methods (Eysenbach et al., 2021, 2020) use future state densities as rewards. However, these methods will fail when the agent has never seen the goal. Moreover, in most of these works, the reward is not stationary (is dependent on the policy)
72
+
73
+ which can lead to instabilities during policy optimization. GoFAR (Ma et al., 2022) is an offline goal-conditioned RL algorithm that minimizes a lower bound to the KL divergence between $p_{\theta}(s)$ and the $p_g(s)$ . It computes rewards using a discriminator and uses the dual formulation utilized by the DICE family (Nachum et al., 2019), but reduces to GAIL (Ho & Ermon, 2016) in the online setting, requiring coverage assumptions. Our work also minimizes the divergence between the agent's visitation distribution and the goal distribution, but we provide a new formulation for on-policy goal-conditioned RL that does not require a discriminator or the same coverage assumptions.
74
+
75
+ Policy Learning through State Matching. We first focus on imitation learning where the expert distribution $p_E(s, a)$ is directly inferred from the expert data. GAIL (Ho & Ermon, 2016) showed that the inverse RL objective is the dual of state-matching. f-MAX (Ghasemipour et al., 2019) uses f-divergence as a metric to match the agent's state-action visitation distribution $p_{\pi}(s, a)$ and $p_E(s, a)$ . Ke et al. (2019); Ghasemipour et al. (2019) shows how several commonly used imitation learning methods can be reduced to a divergence minimization. But all of these methods optimize a lower bound of the divergence which is essentially a min-max bilevel optimization objective. They break the min-max into two parts, fitting the density model to obtain a reward that can be used for policy optimization. But these rewards depend on the policy, and should not be used by RL algorithms that assume stationary rewards. f-IRL (Ni et al., 2020) escapes the min-max objective but learns a reward function that can be used for policy optimization. We do not aim to learn a reward function but rather directly optimize for a policy using dense signals from an $f$ -divergence objective.
76
+
77
+ In reinforcement learning, the connections between entropy regularized MaxEnt RL and the minimization of reverse KL between agent's trajectory distribution, $p_{\pi}(\tau)$ , and the "optimal" trajectory distribution, $p^{*}(\tau) \propto e^{r(\tau)}$ has been extensively studied Ziebart (2010); Ziebart et al. (2008); Kappen et al. (2012); Levine (2018); Haarnoja et al. (2018). MaxEnt RL optimizes for a policy with maximum entropy but such a policy does not guarantee maximum coverage of the state space. Hazan et al. (2018) discusses an objective for maximum exploration that focuses on maximizing the entropy of the state-visitation distribution or KL divergence between the state-visitation distribution and a uniform distribution. A few works like Durugkar et al. (2023, 2021); Ma et al. (2022), that have explored state-matching for reinforcement learning, have been discussed above. Several works like (Belousov & Peters, 2018, 2019; Touati et al., 2020) have used divergence to constraint the policy improvement steps making the updates more stable.
78
+
79
+ Limitations of Markov Rewards. Our work looks beyond the maximization of a Markov reward for policy optimization. The learning signals that we use are non-stationary. We thus discuss the limitations of using Markov rewards for obtaining the optimal policy. There have been works (Abel et al., 2021; Clark & Amodei, 2016; Icarte et al., 2018, 2021) that express the difficulty in using Markov rewards. Abel et al. (2021) proves that there always exist environment-task pairs that cannot be described using Markov rewards. Reward Machines (Icarte et al., 2018) create finite automata to specify reward functions and can specify Non-Markov rewards as well but these are hand-crafted.
80
+
81
+ # 4 $f$ -Policy Gradient
82
+
83
+ In this paper, we derive an algorithm where the agents learn by minimizing the following $f$ -divergence:
84
+
85
+ $$
86
+ J (\theta) = D _ {f} \left(p _ {\theta} (s) \mid \mid p _ {g} (s)\right) \tag {4}
87
+ $$
88
+
89
+ In this section, we shall derive an algorithm to minimize $J(\theta)$ and analyze the objective more closely in the subsequent section. Unlike f-max (Ghasemipour et al., 2019), we directly optimize $J(\theta)$ . We differentiate $J(\theta)$ with respect to $\theta$ to get this gradient. We follow a similar technique as Ni et al. (2020) to obtain the analytical gradient of $J(\theta)$ .
90
+
91
+ Theorem 4.1. The gradient of $J(\theta)$ as defined in Equation 4 is given by,
92
+
93
+ $$
94
+ \nabla_ {\theta} J (\theta) = \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \right] \left[ \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} \left(s _ {t}\right)}{p _ {g} \left(s _ {t}\right)}\right) \right] \right]. \tag {5}
95
+ $$
96
+
97
+ The gradient looks exactly like policy gradient with rewards $-f'\left(\frac{p_{\theta}(s_t)}{p_g(s_t)}\right)$ . However, this does not mean that we are maximizing $J^{RL}(\theta) = \mathbb{E}_{\tau \sim p_{\theta}(\tau)}\left[-f'\left(\frac{p_{\theta}(s_t)}{p_g(s_t)}\right)\right]$ . This is because the gradient of $J^{RL}(\theta)$ is not the same as $\nabla_{\theta}J(\theta)$ . For Dirac goal distributions, the gradient in Equation 5 cannot be
98
+
99
+ used (as $f' \left( \frac{p_{\theta}(s_t)}{p_g(s_t)} \right)$ will not be defined when $p_g(s_t) = 0$ ). We can use the definition of $f$ -divergence in Equation 3 to derive a gradient for such distributions.
100
+
101
+ The gradient is obtained in terms of the state visitation frequencies $\eta_{\tau}(s)$ . Further examination of the gradient leads to the following theorem,
102
+
103
+ Theorem 4.2. Updating the policy using the gradient (Equation 5) maximizes $\mathbb{E}_{p\theta}[\eta_{\tau}(g)]$
104
+
105
+ Theorem 4.2 provides another perspective for $f$ -Policy Gradient - $\eta_{\tau}(g)$ is equivalent to the expected return for a goal-based sparse reward, hence optimizing the true goal-conditioned RL objective. We shall prove the optimality of the policy obtained from minimizing $J(\theta)$ in the next section.
106
+
107
+ In practice, a Dirac goal distribution can be approximated by clipping off the zero probabilities at $\epsilon$ , similar to Laplace correction. Doing so, we will be able to use dense signals from the gradient in Equation 5 while still producing the optimal policy. This approximation is different from simply adding an $\epsilon$ reward at every state. This is because the gradients are still weighed by $f'\left(\frac{p_{\theta}(s_t)}{\epsilon}\right)$ which depends on $p_{\theta}(s_t)$ .
108
+
109
+ Simply optimizing $J(\theta)$ is difficult because it faces similar issues to REINFORCE (Williams & Peng, 1991). A major shortcoming of the above gradient computation is that it requires completely on-policy updates. This requirement will make learning sample inefficient, especially when dealing with any complex environments. However, there have been a number of improvements to naïve policy gradients that can be used. One approach is to use importance sampling (Precup, 2000), allowing samples collected from a previous policy $\pi_{\theta'}$ to be used for learning. To reap the benefits of importance sampling, we need the previous state-visitation distributions to compute $f' \left( \frac{p_{\theta}(s)}{p_g(s)} \right)$ . Hence, we need to ensure that the current policy does not diverge much from the previous policy. This condition is ensured by constraining the KL divergence between the current policy and the previous policy. We use the clipped objective similar to Proximal Policy Optimization (Schulman et al., 2017), which has been shown to work well with policy gradients. PPO has shown that the clipped loss works well even without an explicit KL constraint in the objective. The gradient used in practice is,
110
+
111
+ $$
112
+ \nabla_ {\theta} J (\theta) = \mathbb {E} _ {s _ {t}, a _ {t} \sim p _ {\theta^ {\prime}} (s _ {t}, a _ {t})} \left[ \min \left(r _ {\theta} \left(s _ {t}\right) F _ {\theta^ {\prime}} \left(s _ {t}\right), c l i p \left(r _ {\theta} \left(s _ {t}\right), 1 - \epsilon , 1 + \epsilon\right) F _ {\theta^ {\prime}} \left(s _ {t}\right)\right) \right] \tag {6}
113
+ $$
114
+
115
+ where $r_{\theta}(t) = \frac{\pi_{\theta}(a_t|s_t)}{\pi_{\theta'}(a_t|s_t)}$ and $F_{\theta '}(s_t) = \sum_{t' = t}^{T}\gamma^{t'}f'\left(\frac{p_{\theta'}(s_t)}{p_g(s_t)}\right)$ . The derivation for this objective is provided in Appendix B. $\gamma$ is added to improve the stability of gradients and to prevent the sum of $f^{\prime}\left(\frac{p_{\theta^{\prime}}(s_t)}{p_g(s_t)}\right)$ from exploding.
116
+
117
+ For the purpose of this paper, we use kernel density estimators to estimate the goal distribution and the agent's state visitation distribution. We may also use discriminators to estimate the ratio of these densities like Ho & Ermon (2016); Fu et al. (2017); Ghasemipour et al. (2019). But unlike these methods, we will not be incorrectly breaking a minmax objective. In our case, the estimate of the gradient requires the value of the ratio of the two distributions and does not make any assumptions about the stationarity of these values. While the adversarial methods break the minmax objective and assume the discriminator
118
+
119
+ to be fixed (and rewards stationary) during policy optimization.
120
+
121
+ # Algorithm 1 $f$ -PG
122
+
123
+ Let, $\pi_{\theta}$ be the policy, $G$ be the set of goals, $B$ be a buffer
124
+ for $i = 1$ to num_iter do
125
+ $B \gets []$
126
+ for $j = 1$ to num_traj_per_iter do
127
+ Sample $g$ , set $p_g(s)$
128
+ Collect goal conditioned trajectories, $\tau : g$
129
+ Fit $p_{\theta}(s)$ using KDE on $\tau$
130
+ Store $f' \left( \frac{p_{\theta}(s)}{p_g(s)} \right)$ for each $s$ in $\tau$ $B \gets B + \{\tau : g\}$
131
+ end for
132
+ for $j = 1$ to num_policy Updates do
133
+ $\theta \gets \theta - \alpha \nabla_{\theta} J(\theta)$ (Equation 6)
134
+ end for
135
+
136
+ # 5 Theoretical analysis of $f$ -PG
137
+
138
+ In this section, we will first show that minimizing the f-divergence between the agent's state visitation distribution and goal distribution yields the optimal policy. We will further analyze the connections to metric based shaping rewards and implicit exploration boost from the learning signals. For the rest
139
+
140
+ of the paper, we will refer to $f$ -PG using FKL divergence as $fkl$ -PG, $f$ -PG using RKL divergence as $rkl$ -PG and so on.
141
+
142
+ # 5.1 Analysis of $J(\theta)$
143
+
144
+ This section shows that the policy obtained by minimizing an $f$ -divergence between the agent's state visitation distribution and the goal distribution is the optimal policy.
145
+
146
+ Theorem 5.1. The policy that minimizes $D_{f}(p_{\pi}||p_{g})$ for a convex function $f$ with $f(1) = 0$ and $f'(\infty)$ being defined, is the optimal policy.
147
+
148
+ The proof for Theorem 5.1 is provided in Appendix A. The Theorem states that the policy obtained by minimizing the $f$ -divergence between the agent's state-visitation distribution and the goal distribution is the optimal policy for a class of convex functions defining the $f$ -divergence with $f'(\infty)$ defined. It thus makes sense to minimize the $f$ -divergence between the agent's visitation and the goal distribution. It must be noted that the objective does not involve maximizing a reward function. Note that the condition that $f'(\infty)$ is defined is not true for all $f$ -divergences. The common $f$ -divergences like RKL, TV, and JS have $f'(\infty)$ defined $rkl$ -PG, $tv$ -PG, and $js$ -PG will produce the optimal policy.
149
+
150
+ Forward KL divergence (FKL) has $f = u\log u$ and so does not have $f^{\prime}(\infty)$ defined. Does this mean that the policy obtained by minimizing the FKL divergence is not optimal? Lemma 5.1 (proof in Appendix A) shows that the policy obtained maximizes the entropy of the agent's state-visitation distribution along with maximizing a reward of $\log p_{g}(s)$ .
151
+
152
+ Lemma 5.1. $fkl - PG$ produces a policy that maximizes the reward $\log p_{g}(s)$ along with the entropy of the state-visitation distribution.
153
+
154
+ A similar result can be shown for $\chi^2$ -divergence as well. It must be understood that Lemma 5.1 does not mean that $fkl$ -PG is the same as the commonly studied MaxEnt RL.
155
+
156
+ Differences from MaxEnt RL: MaxEnt RL, as studied in Haarnoja et al. (2017, 2018), maximizes the entropy of the policy along with the task reward to achieve better exploration. However, maximizing the entropy of the policy does not imply maximum exploration. Hazan et al. (2018) shows that maximizing the entropy of the state-visitation distribution provably provides maximum exploration. Lemma 5.1 shows that $fkl$ -PG maximizes the entropy of the state-visitation distribution along with the reward making it better suited for exploration. To distinguish our work, we call the MaxEnt RL, as discussed in works like Haarnoja et al. (2017, 2018), as $\pi$ -MaxEnt RL because it only focuses on the entropy of the policy. On the other hand, $fkl$ -PG maximizes the entropy of the state-visitation distribution so we call it state-MaxEnt RL or s-MaxEnt RL. Similarly, sa-MaxEnt RL can be defined to maximize the entropy of the state-action visitation distribution.
157
+
158
+ ![](images/f4ab49e02ec7bb7ead8b18708210b2ea656b5878b3b8e4371dc0d400e0a59e69.jpg)
159
+ (a) $s$ -MaxEnt RL
160
+
161
+ ![](images/c042f77c726f32b6785659633cdf835953f66990a3ce47ce8b1bc5dc5be44c22.jpg)
162
+ (b) $\pi$ -MaxEnt RL
163
+ Figure 1: Comparison of the evolution state-visitation distributions with training for $\pi$ -MaxEnt RL and $s$ -MaxEnt RL. The darker regions imply lower visitation while the bright regions imply higher visitations.
164
+
165
+ Since the agent's state visitation distribution depends on both the policy and the dynamics, simply increasing the entropy of the policy (without considering the dynamics) will not ensure that the agent will visit most of the states or will have a state-visitation distribution with high entropy. In Figure 1, we compare the efficiencies of $\pi$ -MaxEnt RL and $s$ -MaxEnt RL to explore around a wall in a discrete gridworld. The initial and the goal distributions (highlighted in green and red respectively)
166
+
167
+ ![](images/01e8b0b1f015728e9c545d3c060a5dff22934c7948b15cda5a26a19d136f0afe.jpg)
168
+
169
+ ![](images/1cdd1ed0787d08427ecce06b69859e09d8577c33cb38935085b84a03049f95d1.jpg)
170
+
171
+ ![](images/352bf7cd2b8f29ffec3475b6b34df49904121a6c6db879cc186449e601568701.jpg)
172
+
173
+ ![](images/8ff0d14020f4c9c8a5f0a6adce48c277c7800f9fae7ee6fd2e7f404b3d185b8f.jpg)
174
+
175
+ ![](images/764aba6d4265e9b12240b7375caac36ce50bc2eb3b2b413c5aa89ca887667cb5.jpg)
176
+
177
+ ![](images/12e7ae56e38619324d47cabd4ac96456c6e846072677d886ec0e1580e46c3879.jpg)
178
+ Figure 2: Evolution of $-f^{\prime}(\frac{p_{\theta}(s)}{p_g(s)})$ for $f = u\log u$ through policy learning. Top: $f^{\prime}(\frac{p_{\theta}(s)}{p_g(s)})$ , darker blue are relatively lower values (higher values for the learning signal) while red corresponds to high values (lower values for the learning signal). Bottom: Corresponding state-visitation of the policy.
179
+
180
+ ![](images/b8f477da074dcb3231b8a63801ecbab3f6b236825538a3021524080208301296.jpg)
181
+
182
+ ![](images/53fbeecd60c4de0fed73f5975f6295fcda2d13ec9aa55eae1066058e518746ff.jpg)
183
+
184
+ ![](images/481b50bc8c99376fd43f776841cf7f250bc8d35ba8af471998a4b682a892bf9a.jpg)
185
+
186
+ ![](images/896449534a02e3010b613c0468770ab1aea32fbb546a441395a97b423eacf4f4.jpg)
187
+
188
+ are separated by a wall. This environment is further discussed in Section 6.1 and Appendix C. Figure 1 shows the evolution of the agent's state-visitation distribution with training for $s$ -MaxEnt RL (fkl-PG) and $\pi$ -MaxEnt RL (Soft Q Learning (Haarnoja et al., 2017))
189
+
190
+ Metric-based Shaping Reward: A deeper look into Lemma 5.1 shows that an appropriate choice of $p_g(s)$ can lead to entropy maximizing policy optimization with metric-based shaping rewards. Define the goal distribution as $p_g(s) = e^{f(s;g)}$ where $f(s;g)$ captures the metric of the underlying space. Then the $fkl$ -PG objective becomes,
191
+
192
+ $$
193
+ \min D _ {F K L} \left(p _ {\theta}, p _ {g}\right) = \max \mathbb {E} _ {p _ {\theta}} [ f (s; g) ] - \mathbb {E} _ {p _ {\theta}} [ \log p _ {\theta} ]. \tag {7}
194
+ $$
195
+
196
+ The above objective maximizes the reward $f(s; g)$ along with the entropy of the agent's state visitation distribution. For an L2 Euclidean metric, $f(s; g)$ will be $-||s - g||_2^2$ which is the L2 shaping reward, and the goal distribution will be Gaussian. If the goal distribution is Laplacian, the corresponding shaping reward will be the L1 norm.
197
+
198
+ AIM (Durugkar et al., 2021) used a potential-based shaping reward based on a time step quasimetric. If we define $f(s;g)$ as a Lipschitz function for the time step metric maximizing at $s = g$ , we end up optimizing for the AIM reward along with maximizing the entropy of the state-visitation distribution.
199
+
200
+ # 5.2 Analysis of the learning signals
201
+
202
+ $f$ -PG involves a learning signal $f^{\prime}\left(\frac{p_{\theta}(s)}{p_g(s)}\right)$ to weigh the gradients of log probabilities of the policy. Since we are minimizing the objective (in contrast to policy gradients) the visitation will be pushed towards states with lower values of the learning signal. It is thus important to understand how $f^{\prime}\left(\frac{p_{\theta}(s)}{p_g(s)}\right)$ behaves for goal-conditioned RL settings. During the initial stages of training, the agent visits regions with very low $p_{g}$ . For such states, the signal has a higher value compared to the states that have lower $p_{\theta}$ , i.e., the unexplored states. This is because for any convex function $f$ , $f^{\prime}(x)$ is an increasing function, so minimizing $f^{\prime}\left(\frac{p_{\theta}(s)}{p_g(s)}\right)$ will imply minimizing $p_{\theta}(s)$ for the states with low $p_{g}(s)$ . The only way to do this is to increase the entropy of the state-visitation distribution, directly making the agent explore new states. As long as there is no significant overlap between the two distributions, it will push $p_{\theta}$ down to a flatter distribution until there is enough overlap with the goal distribution when it will pull back the agent's visitation again to be closer to the goal distribution.
203
+
204
+ This learning signal should not be confused with reward in reinforcement learning. It is non-stationary and non-Markovian as it depends on the policy. More importantly, we are not maximizing this signal, just using it to weigh the gradients of the policy.
205
+
206
+ In the following example, we shall use the Reacher environment (Todorov et al., 2012) to illustrate how our learning signal $(f'(\frac{p_{\theta}(s)}{p_g(s)}))$ varies as the agent learns. We will also show how this signal can push for exploration when the agent has not seen the goal yet. Consider the We fix the goal at $(-0.21, 0)$ and show how the learning signal evolves with the policy. While Figure 2 shows the evolution of $-f'(\frac{p_{\theta}(s)}{p_g(s)})$ (note the negation) for $fkl$ -PG, the rest can be found in Appendix D.
207
+
208
+ The value of $f^{\prime}\left(\frac{p_{\theta}(s)}{p_{g}(s)}\right)$ is highest where the agent's visitation is high and lower where the agent is not visiting. $f^{\prime}\left(\frac{p_{\theta}(s)}{p_{g}(s)}\right)$ has the lowest value at the goal. As the policy converges to the optimal policy, the
209
+
210
+ regions where the state-visitation distribution is considerably high (towards the bottom-right in the figure), the value for $f^{\prime}\left(\frac{p_{\theta}(s)}{p_{g}(s)}\right)$ decreases for those states (to still push for exploration) but its value at the goal is low enough for the policy to converge.
211
+
212
+ # 6 Experiments
213
+
214
+ Our experiments evaluate our new framework ( $f$ -PG) as an alternative to conventional reward maximization for goal-conditional RL. We pose the following questions:
215
+
216
+ 1. Does $f$ -PG provide sufficient signals to explore in otherwise challenging sparse reward settings?
217
+ 2. How well does our framework perform compared to discriminator-based approaches?
218
+ 3. Can our framework scale to larger domains with continuous state spaces and randomly generated goals?
219
+ 4. How do different $f$ -divergences affect learning?
220
+
221
+ The first two questions are answered using a toy gridworld environment. The gridworld has a goal contained in a room which poses a significant exploration challenge. We also show how the dense signal to the gradients of the policy evolves during training on a continuous domain like Reacher. To answer the third question, our framework is compared with several baselines on a 2D Maze solving task (Point Maze). Additionally, we scale to more complex tasks such as FetchReach Plappert et al. (2018) and an exploration-heavy PointMaze.
222
+
223
+ # 6.1 Gridworld
224
+
225
+ We use a gridworld environment to compare and visualize the effects of using different shaping rewards for exploration. We discussed this environment briefly in Section 5.1. The task is for the agent to reach the goal contained in a room. The only way to reach the goal is to go around the wall. The task reward is 1 when the agent reaches the room otherwise it is 0. The state space is simply the $(x, y)$ coordinates of the grid and the goal is fixed. A detailed description of the task is provided in Appendix C. Although the environment seems simple, exploration here is very difficult as there is no incentive for the agent to go around the wall.
226
+
227
+ ![](images/1dd4dc87eee0ad0a988a6e3479f454fb12ae857a9b5505f744d6751877482774.jpg)
228
+ (a) $fkl$ -PG
229
+
230
+ ![](images/3bff903566b5eb9d25d42573e0bb50b43e7bb98d920047befc12bdff47fdde88.jpg)
231
+ (b) $rkl$ -PG
232
+
233
+ ![](images/d070ef6db05ff2eed27b0545edc092cc202edc2bf99a3f35c92e5b718e2b709f.jpg)
234
+ (c) AIM
235
+ Figure 3: Gridworld: The agent needs to move from the green circle to the red circle. The state visitations of the policies (after 500 policy updates) are shown when using our framework for training $(fkl, rkl)$ compared with AIM and GAIL trained on top of soft Q learning.
236
+
237
+ ![](images/94ba524a7e9989ee0c3b390729560b53eecde9e1b33346d62264baefb145e825.jpg)
238
+ (d) GAIL
239
+
240
+ Our framework is compared against AIM (Durugkar et al., 2021), which initially introduced this environment and uses a shaping reward obtained from state-matching to solve it, and GAIL (Ho & Ermon, 2016), which uses a discriminator to learn the probability of a state being the goal state. We provide a comparison to other recent methods in Appendix C. All the baselines are implemented on top of Soft Q Learning (Haarnoja et al., 2017) which along with maximizing the augmented rewards, also maximizes the entropy of the policy while $f$ -PG is implemented as an on-policy algorithm without any extrinsic entropy maximization objective. It can be seen from Figure 3 that, $f$ -PG can explore enough to find the way around the room which is difficult for methods like GAIL even after the entropy boost. AIM learns a potential function and can also find its way across the wall. As expected, $fkl$ -PG converges to the policy maximizing the entropy of the state visitation while $rkl$ -PG produces the optimal state visitation as expected from Theorem 5.1. This simple experiment clearly illustrates two things: (1) $f$ -PG can generate dense signals to explore the state space and search for the goal and (2) although discriminator-based methods like GAIL try to perform state-matching, they fail to explore the space well.
241
+
242
+ # 6.2 Point Maze
243
+
244
+ While the gridworld poses an exploration challenge, the environment is simple and has only one goal. This experiment shows that $f$ -PG scales to larger domains with continuous state space and a large set of goals. We use the Point Maze environments (Fu et al., 2020) which are a set of offline RL environments, and modify it to support our online algorithms. The state space is continuous and consists of the position and velocity of the agent and the goal. The action is the force applied in each direction. There are three variations of the environment namely PointMazeU, PointMazeMedium, PointMazeLarge. For the details of the three environments, please refer to Appendix E.
245
+
246
+ We compare $f$ -PG with several goal-based shaping reward, (used alongside the task reward as described in Ng et al. (1999)) to optimize a PPO policy<sup>1</sup>. The rewards tried (along with their abbreviations in the plots) are AIM (Durugkar et al., 2021)(aim), GAIL (Ho & Ermon, 2016)(gail), AIRL (Fu et al., 2017)(airl) and F-AIRL (Ghasemipour et al., 2019)(fairl). All these methods employ a state-matching objective. AIM uses Wasserstein's distance while the rest use some form of $f$ -divergence. But, all of them rely on discriminators. Along with these baselines, we experiment using our learning signal as a shaping reward (fkl-rew). Additionally, we also compare with PPO being optimized by only the task reward (none). For our method, we have only shown results for fkl-PG. For the rest of the possible $f$ -divergences, refer to Section 6.4.
247
+
248
+ ![](images/75f9fc58c3a57fe7c4dda5430303ebdcba57784f5a10577cfe9c14c331a24b51.jpg)
249
+ Figure 4: Success rates (averaged over 100 episodes and 3 seeds) of $fkl$ -PG and all the baselines. $fkl$ -PG performs well in all three environments and better than the baseline shaping rewards in the two tougher environments.
250
+
251
+ ![](images/d2c4c0e5f648a8a9038e08cb549ab7a2da70a29bbc336014183a97f0e3e8d6ef.jpg)
252
+
253
+ ![](images/3aaf0819754835d723cab221d3cf2d08e71b8816efbc2fee7c50fca6ff88a0cf.jpg)
254
+
255
+ Figure 4 (plotting mean and std-dev for 3 seeds) clearly illustrates that $fkl$ -PG is able to perform well in all three environments. In fact, it performs better than the baselines in the more difficult environments. It can also be seen that shaping rewards can often lead to suboptimal performance as none is higher than a few of the shaping rewards. As expected, the curve $fkl$ -new performs poorly. In the simpler PointMazeU environment, the performance for most of the shaping rewards are similar (along with none) but in more complex PointMazeMedium and PointMazeLarge, a lot of these shaping rewards fail.
256
+
257
+ # 6.3 Scaling to Complex Tasks
258
+
259
+ We scale our method to more complex tasks such as FetchReach (Plappert et al., 2018) and a difficult version of PointMaze. In the PointMaze environments used in the previous section, distributions from which the initial state and the goal are sampled, have a significant overlap easing the exploration. We modify these environments to ensure a significant distance between the sampled goal distributions and the agent's state-visitation distribution as shown in Figure 5 (top), making exploration highly challenging. Figure 5 (bottom) shows the comparison of $fkl$ -PG with GAIL (Ho & Ermon, 2016) and AIM (Durugkar et al., 2021).
260
+
261
+ The following can be concluded from these experiments: (1) The discriminative-based methods heavily depend on coverage assumptions and fail in situations where there is no significant overlap between the goal distribution and the agent's state visitation distribution. $fkl$ -PG does not depend on any such assumptions. (2) $f$ -PG is considerably more stable than these baselines (as indicated by the variance of these methods).
262
+
263
+ # 6.4 Comparing different $f$ -divergences
264
+
265
+ We perform an ablation to compare different $f$ -divergences on their performances on the three Point Maze environments. Figure 6 (plotting mean and std-dev for 3 seeds) show that, empirically, $fkl$ -PG performs the best followed by $\chi^2$ -PG. Interestingly, both of these do not guarantee optimal policies
266
+
267
+ ![](images/7d06561586ab9f02129fdff6a6ad97269e0af2ce0d1fe61e7b7bd9ec3608bf61.jpg)
268
+
269
+ ![](images/434f22b831123e9bae68f9a5f9103e6675816aee662d4d4d99684381d122a6e7.jpg)
270
+
271
+ ![](images/0197bfa08cebe69d60f5e42fce32919bd7e6b73167436ebf2cd0172c4fc2685b.jpg)
272
+
273
+ ![](images/2c67a28ef77b225aef746fd00af57c3732df4007e2ca3b2ebce57079492bc5b8.jpg)
274
+
275
+ ![](images/9e06b38757c6572d745295b7773c97e086a9aff3bc6cf3d416770c2183a3cfe9.jpg)
276
+
277
+ ![](images/5c3c2836a6618381d15a0ac92c6413fc72b2732986fc2fc1cf724e080290e597.jpg)
278
+
279
+ ![](images/447f4cc59ed618790700d5cb029fc5edeee82f3fe8a20f59a6615536a30049e7.jpg)
280
+ Figure 5: (top): Description of the environments. In the PointMaze environments, the green and red shades represent the distributions from which the initial state and goal states are sampled. (bottom): Success rates (averaged over 100 episodes and 3 seeds) of $fkl$ -PG, GAIL and AIM. $fkl$ -PG outperforms these baselines with considerably lower variance.
281
+
282
+ ![](images/f7245cd3546045a9fd7199c8f234298eda00dac766fb671c4d9a6ca76a699d61.jpg)
283
+ Figure 6: Success rates (averaged over 100 episodes and 3 seeds) of $f$ -PG for different $f$ . $fkl$ -PG performs the best followed by $\chi^2$ -PG.
284
+
285
+ ![](images/6735080bfcabd0c392f8ad4d385a0f947195102bc9c6f2a5d449dcaa35dce27a.jpg)
286
+
287
+ but it can be shown from Lemma 5.1 that $fkl$ -PG converges to the policy that along with maximizing for a "reward", maximizes the entropy of the state-visitation. A similar result can be shown for $\chi^2$ as well (proof in the Appendix A). This result can be explained by the need for exploration in the larger mazes, hence learning policies to keep the entropy of the state visitation high.
288
+
289
+ # 7 Discussion
290
+
291
+ This paper derives a novel framework for goal-conditioned RL in the form of an on-policy algorithm $f$ -policy gradients which minimizes the $f$ -divergence between the agent's state visitation and the goal distribution. It proves that for certain $f$ -divergences, we can recover the optimal policy while for some, we obtain a policy maximizing the entropy of the state-visitation. Entropy-regularized policy optimization (s-MaxEnt RL) for metric-based shaping rewards can be shown as a special case of $f$ -PG where $f$ is $fkl$ . $f$ -PG can provide an exploration bonus when the agent has yet not seen the goal. We demonstrated that $f$ -PG can scale up to complex domains.
292
+
293
+ Through this work, we introduce a new perspective for goal-conditioned RL. By circumventing rewards, $f$ -PG can avoid issues that arise with reward misspecification (Knox et al., 2021). There are several avenues to focus on for future work. First, the current framework is on-policy and poses an exploration challenge. An avenue for future work could be to develop an off-policy way to solve the objective. Second, this paper does not tackle goal distributions with several modes. Such a target distribution would be interesting to tackle in future work.
294
+
295
+ # 8 Acknowledgements
296
+
297
+ This work was in part supported by Cisco Research. Any opinions, findings and conclusions, or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of Cisco Research.
298
+ This work has partially taken place in the Learning Agents Research Group (LARG) at UT Austin. LARG research is supported in part by NSF (FAIN-2019844, NRT-2125858), ONR (N00014-18-2243), ARO (E2061621), Bosch, Lockheed Martin, and UT Austin's Good Systems grand challenge. Peter Stone serves as the Executive Director of Sony AI America and receives financial compensation for this work. The terms of this arrangement have been reviewed and approved by the University of Texas at Austin in accordance with its policy on objectivity in research.
299
+
300
+ # References
301
+
302
+ David Abel, Will Dabney, Anna Harutyunyan, Mark K. Ho, Michael L. Littman, Doina Precup, and Satinder Singh. On the expressivity of markov reward. CoRR, abs/2111.00876, 2021. URL https://arxiv.org/abs/2111.00876.
303
+ Jose A. Arjona-Medina, Michael Gillhofer, Michael Widrich, Thomas Unterthiner, Johannes Brandstetter, and Sepp Hochreiter. Rudder: Return decomposition for delayed rewards, 2019.
304
+ Andrew G. Barto. Intrinsic Motivation and Reinforcement Learning, pp. 17-47. Springer Berlin Heidelberg, Berlin, Heidelberg, 2013. ISBN 978-3-642-32375-1. doi: 10.1007/978-3-642-32375-1_2. URL https://doi.org/10.1007/978-3-642-32375-1_2.
305
+ Marc G. Bellemare, Sriram Srinivasan, Georg Ostrovski, Tom Schaul, David Saxton, and Remi Munos. Unifying count-based exploration and intrinsic motivation, 2016.
306
+ Boris Belousov and Jan Peters. f-divergence constrained policy improvement. CoRR, abs/1801.00056, 2018. URL http://arxiv.org/abs/1801.00056.
307
+ Boris Belousov and Jan Peters. Entropic regularization of markov decision processes. CoRR, abs/1907.04214, 2019. URL http://arxiv.org/abs/1907.04214.
308
+ Serena Booth, Julie Shah, Scott Niekum, Peter Stone, and Alessandro Allievi. The perils of trial-and-error reward design: misdesign through overfitting and invalid task specifications. 2023.
309
+ Jack Clark and Dario Amodei. Faulty reward functions in the wild, 2016. URL https://openai.com/research/faulty-reward-functions.
310
+ Ishan Durugkar, Maurizio Tec, Scott Niekum, and Peter Stone. Adversarial intrinsic motivation for reinforcement learning. CoRR, abs/2105.13345, 2021. URL https://arxiv.org/abs/2105.13345.
311
+ Ishan Durugkar et al. Estimation and control of visitation distributions for reinforcement learning. PhD thesis, 2023.
312
+ Benjamin Eysenbach, Ruslan Salakhutdinov, and Sergey Levine. C-learning: Learning to achieve goals via recursive classification. CoRR, abs/2011.08909, 2020. URL https://arxiv.org/abs/2011.08909.
313
+ Benjamin Eysenbach, Sergey Levine, and Ruslan Salakhutdinov. Replacing rewards with examples: Example-based policy search via recursive classification. CoRR, abs/2103.12656, 2021. URL https://arxiv.org/abs/2103.12656.
314
+ Alhussein Fawzi, Matej Balog, Aja Huang, Thomas Hubert, Bernardino Romera-Paredes, Mohammadamin Barekatain, Alexander Novikov, Francisco J. R. Ruiz, Julian Schrittwieser, Grzegorz Swirszcz, David Silver, Demis Hassabis, and Pushmeet Kohli. Discovering faster matrix multiplication algorithms with reinforcement learning. Nature, 610(7930):47-53, 2022. doi: 10.1038/s41586-022-05172-4.
315
+ Justin Fu, Katie Luo, and Sergey Levine. Learning robust rewards with adversarial inverse reinforcement learning. CoRR, abs/1710.11248, 2017. URL http://arxiv.org/abs/1710.11248.
316
+
317
+ Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4RL: datasets for deep data-driven reinforcement learning. CoRR, abs/2004.07219, 2020. URL https://arxiv.org/abs/2004.07219.
318
+ Theophile Gervet, Soumith Chintala, Dhruv Batra, Jitendra Malik, and Devendra Singh Chaplot. Navigating to objects in the real world. Science Robotics, 8(79):eadb6991, 2023. doi: 10.1126/scirobotics.adf6991. URL https://www.science.org/doi/abs/10.1126/scirobotics.adf6991.
319
+ Seyed Kamyar Seyed Ghasemipour, Richard S. Zemel, and Shixiang Gu. A divergence minimization perspective on imitation learning methods. CoRR, abs/1911.02256, 2019. URL http://arxiv.org/abs/1911.02256.
320
+ Prasoon Goyal, Scott Niekum, and Raymond J. Mooney. Using natural language for reward shaping in reinforcement learning. CoRR, abs/1903.02020, 2019. URL http://arxiv.org/abs/1903.02020.
321
+ Abhishek Gupta, Vikash Kumar, Corey Lynch, Sergey Levine, and Karol Hausman. Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning. CoRR, abs/1910.11956, 2019. URL http://arxiv.org/abs/1910.11956.
322
+ Tuomas Haarnoja, Haoran Tang, Pieter Abbeel, and Sergey Levine. Reinforcement learning with deep energy-based policies. CoRR, abs/1702.08165, 2017. URL http://arxiv.org/abs/1702.08165.
323
+ Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. CoRR, abs/1801.01290, 2018. URL http://arxiv.org/abs/1801.01290.
324
+ Elad Hazan, Sham M. Kakade, Karan Singh, and Abby Van Soest. Provably efficient maximum entropy exploration. CoRR, abs/1812.02690, 2018. URL http://arxiv.org/abs/1812.02690.
325
+ Jonathan Ho and Stefano Ermon. Generative adversarial imitation learning. CoRR, abs/1606.03476, 2016. URL http://arxiv.org/abs/1606.03476.
326
+ Rodrigo Toro Icarte, Toryn Klassen, Richard Valenzano, and Sheila McIlraith. Using reward machines for high-level task specification and decomposition in reinforcement learning. In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 2107-2116. PMLR, 10-15 Jul 2018. URL https://proceedings.mlr.press/v80/icarte18a.html.
327
+ Rodrigo Toro Icarte, Ethan Waldie, Toryn Q. Klassen, Richard Anthony Valenzano, Margarita P. Castro, and Sheila A. McIlraith. Learning reward machines: A study in partially observable reinforcement learning. CoRR, abs/2112.09477, 2021. URL https://arxiv.org/abs/2112.09477.
328
+ John Jumper, Richard Evans, Alexander Pritzel, Tim Green, Michael Figurnov, Olaf Ronneberger, Kathryn Tunyasuvunakool, Russ Bates, Augustin Žídek, Anna Potapenko, Alex Bridgland, Clemens Meyer, Simon Kohl, Andrew Ballard, Andrew Cowie, Bernardino Romera-Paredes, Stanislav Nikolov, Rishub Jain, Jonas Adler, and Demis Hassabis. Highly accurate protein structure prediction with alphafold. Nature, 596:1-11, 08 2021. doi: 10.1038/s41586-021-03819-2.
329
+ Leslie Pack Kaelbling. Learning to achieve goals. In *IJCAI*, pp. 1094–1099. Citeseer, 1993.
330
+ Hilbert J. Kappen, Vicenç Gómez, and Manfred Opper. Optimal control as a graphical model inference problem. Machine Learning, 87(2):159-182, feb 2012. doi: 10.1007/s10994-012-5278-7. URL https://doi.org/10.1007%2Fs10994-012-5278-7.
331
+ Liyiming Ke, Matt Barnes, Wen Sun, Gilwoo Lee, Sanjiban Choudhury, and Siddhartha S. Srinivasa. Imitation learning as f-divergence minimization. CoRR, abs/1905.12888, 2019. URL http://arxiv.org/abs/1905.12888.
332
+ Heechel Kim, Yoshiyuki Ohmura, and Yasuo Kuniyoshi. Robot peels banana with goal-conditioned dual-action deep imitation learning, 2022.
333
+
334
+ W. Bradley Knox, Alessandro Allievi, Holger Banzhaf, Felix Schmitt, and Peter Stone. Reward (mis)design for autonomous driving. CoRR, abs/2104.13906, 2021. URL https://arxiv.org/abs/2104.13906.
335
+ Sergey Levine. Reinforcement learning and control as probabilistic inference: Tutorial and review. CoRR, abs/1805.00909, 2018. URL http://arxiv.org/abs/1805.00909.
336
+ Xiao-Yang Liu, Hongyang Yang, Jiechao Gao, and Christina Dan Wang. FinRL. In Proceedings of the Second ACM International Conference on AI in Finance. ACM, nov 2021. doi: 10.1145/3490354.3494366. URL https://doi.org/10.1145/2F3490354.3494366.
337
+ Yecheng Jason Ma, Jason Yan, Dinesh Jayaraman, and Osbert Bastani. How far i'll go: Offline goal-conditioned reinforcement learning via $f$ -advantage regression, 2022. URL https://arxiv.org/abs/2206.03023.
338
+ Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Alex Graves, Ioannis Antonoglou, Daan Wierstra, and Martin A. Riedmiller. Playing atari with deep reinforcement learning. CoRR, abs/1312.5602, 2013. URL http://arxiv.org/abs/1312.5602.
339
+ Ofir Nachum, Yinlam Chow, Bo Dai, and Lihong Li. Dualdice: Behavior-agnostic estimation of discounted stationary distribution corrections. ArXiv, abs/1906.04733, 2019.
340
+ A. Ng, Daishi Harada, and Stuart J. Russell. Policy invariance under reward transformations: Theory and application to reward shaping. In International Conference on Machine Learning, 1999.
341
+ Tianwei Ni, Harshit S. Sikchi, Yufei Wang, Tejus Gupta, Lisa Lee, and Benjamin Eysenbach. f-irl: Inverse reinforcement learning via state marginal matching. CoRR, abs/2011.04709, 2020. URL https://arxiv.org/abs/2011.04709.
342
+ Scott Niekum. Evolved intrinsic reward functions for reinforcement learning. Proceedings of the AAAI Conference on Artificial Intelligence, 24(1):1955-1956, Jul. 2010. doi: 10.1609/aaai.v24i1.7772. URL https://ojs.aaai.org/index.php/AAAI/article/view/7772.
343
+ OpenAI, Matthias Plappert, Raul Sampedro, Tao Xu, Ilge Akkaya, Vineet Kosaraju, Peter Welinder, Ruben D'Sa, Arthur Petron, Henrique Ponde de Oliveira Pinto, Alex Paino, Hyeonwoo Noh, Lilian Weng, Qiming Yuan, Casey Chu, and Wojciech Zaremba. Asymmetric self-play for automatic goal discovery in robotic manipulation. CoRR, abs/2101.04882, 2021. URL https://arxiv.org/abs/2101.04882.
344
+ Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback, 2022.
345
+ Matthias Plappert, Marcin Andrychowicz, Alex Ray, Bob McGrew, Bowen Baker, Glenn Powell, Jonas Schneider, Josh Tobin, Maciek Chogiej, Peter Welinder, Vikash Kumar, and Wojciech Zaremba. Multi-goal reinforcement learning: Challenging robotics environments and request for research, 2018.
346
+ Yury Polyanskiy and Yihong Wu. "Information Theory From Coding to Learning". Cambridge University Press, 2022. URL https://people.lids.mit.edu/yp/homepage/data/ itbook-export.pdf.
347
+ Doina Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department Faculty Publication Series, pp. 80, 2000.
348
+ Martin L Puterman. Markov decision processes. *Handbooks in operations research and management science*, 2:331-434, 1990.
349
+ John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. CoRR, abs/1707.06347, 2017. URL http://arxiv.org/abs/1707.06347.
350
+
351
+ Dhruv Shah, Benjamin Eysenbach, Gregory Kahn, Nicholas Rhinehart, and Sergey Levine. Ving: Learning open-world navigation with visual goals. CoRR, abs/2012.09812, 2020. URL https://arxiv.org/abs/2012.09812.
352
+ David Silver, Thomas Hubert, Julian Schrittwieser, Ioannis Antonoglou, Matthew Lai, Arthur Guez, Marc Lanctot, Laurent Sifre, Dharshan Kumaran, Thore Graepel, Timothy P. Lillicrap, Karen Simonyan, and Demis Hassabis. Mastering chess and shogi by self-play with a general reinforcement learning algorithm. CoRR, abs/1712.01815, 2017. URL http://arxiv.org/abs/1712.01815.
353
+ David Silver, Satinder Singh, Doina Precup, and Richard S. Sutton. Reward is enough. Artificial Intelligence, 299:103535, 2021. ISSN 0004-3702. doi: https://doi.org/10.1016/j.artint.2021.103535. URL https://www.sciencedirect.com/science/article/pii/S0004370221000862.
354
+ Satinder Singh, Richard L. Lewis, Andrew G. Barto, and Jonathan Sorg. Intrinsically motivated reinforcement learning: An evolutionary perspective. IEEE Transactions on Autonomous Mental Development, 2(2):70-82, 2010. doi: 10.1109/TAMD.2010.2051031.
355
+ Emanuel Todorov, Tom Erez, and Yuval Tassa. Mujoco: A physics engine for model-based control. In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 5026-5033. IEEE, 2012. doi: 10.1109/IROS.2012.6386109.
356
+ Ahmed Touati, Amy Zhang, Joelle Pineau, and Pascal Vincent. Stable policy optimization via off-policy divergence regularization. CoRR, abs/2003.04108, 2020. URL https://arxiv.org/abs/2003.04108.
357
+ Ronald J Williams and Jing Peng. Function optimization using connectionist reinforcement learning algorithms. Connection Science, 3(3):241-268, 1991.
358
+ Peter R Wurman, Samuel Barrett, Kenta Kawamoto, James MacGlashan, Kaushik Subramanian, Thomas J Walsh, Roberto Capobianco, Alisa Devlic, Franziska Eckert, Florian Fuchs, et al. Outracing champion gran turismo drivers with deep reinforcement learning. Nature, 602(7896): 223-228, 2022.
359
+ Zeyu Zheng, Junhyuk Oh, and Satinder Singh. On learning intrinsic rewards for policy gradient methods. CoRR, abs/1804.06459, 2018. URL http://arxiv.org/abs/1804.06459.
360
+ Brian D. Ziebart. Modeling purposeful adaptive behavior with the principle of maximum causal entropy. 2010.
361
+ Brian D. Ziebart, Andrew L. Maas, J. Andrew Bagnell, and Anind K. Dey. Maximum entropy inverse reinforcement learning. In Dieter Fox and Carla P. Gomes (eds.), AAAI, pp. 1433-1438. AAAI Press, 2008. ISBN 978-1-57735-368-3. URL http://dblp.uni-trier.de/db/conf/aaai/aaai2008.html#ZiebartMBD08.
362
+
363
+ # Appendix
364
+
365
+ A Analysis of $J(\theta)$ 16
366
+
367
+ A.1 Proof for Theorem 5.1 16
368
+ A.2 Proof for Lemma 5.1 17
369
+
370
+ B Gradient based optimization 18
371
+
372
+ B.1 Derivation of gradients 18
373
+ B.2 Practical Algorithm 20
374
+ B.3 Discounted State-Visitations 21
375
+
376
+ C Gridworld Experiments 21
377
+
378
+ C.1 Description of the task 21
379
+ C.2 Performance of $f$ -PG 21
380
+
381
+ D Visualizing the learning signals 21
382
+
383
+ D.1 Description of the task 21
384
+ D.2 Comparing different $f$ -PG 22
385
+
386
+ E PointMaze experiments 23
387
+
388
+ # A Analysis of $J(\theta)$
389
+
390
+ In this section, we will present the proofs for all the Lemmas and Theorems stated in Section 5.1.
391
+
392
+ # A.1 Proof for Theorem 5.1
393
+
394
+ To prove Theorem 5.1, we need the following Lemmas. Lemma A.1 states that among all policies, the optimal policy has the highest state visitation at the goal.
395
+
396
+ Lemma A.1. Let $\mathcal{D}$ be the set of all possible state visitations for the agent following some policy $\pi \in \Pi$ . Let $\pi^{*}$ be the optimal goal-conditioned policy. This optimal policy's state-visitation distribution will have the most measure at the goal for all $p_{\pi} \in \mathcal{D}$ i.e., $\pi^{*} \Longrightarrow p_{\pi^{*}}(g) \geq p_{\pi}(g), \forall p_{\pi} \in \mathcal{D}$ .
397
+
398
+ Proof. Let $\pi^{*}$ be the optimal policy and $p_{\pi^{*}}$ be the corresponding state visitation distribution. The reward for the sparse setting is designed as,
399
+
400
+ $$
401
+ r (s) = \left\{ \begin{array}{l l} 1 & s = g, \\ 0 & \text {o t h e r w i s e}. \end{array} \right.
402
+ $$
403
+
404
+ Hence the expected return for a policy $\pi$ is $R_{\pi}$ is
405
+
406
+ $$
407
+ \begin{array}{l} R _ {\pi} = \mathbb {E} _ {p _ {\pi}} [ r (s) ] \\ = p _ {\pi} (g). \\ \end{array}
408
+ $$
409
+
410
+ The return for the optimal policy is maximum among all policies so $R_{\pi^*} \geq R_{\pi}, \forall \pi \in \Pi$ . This implies $p_{\pi^*}(g) \geq p_{\pi}(g), \forall p_{\pi} \in \mathcal{D}$ .
411
+
412
+ Lemma A.2 states that the f-divergence between $p_{\pi}(s)$ and $p_g(s)$ is a decreasing function with respect to $p_{\pi}(s)$ . This means that as the objective $J(\theta)$ obtains its minimum value when $p_{\pi}(g)$ is highest.
413
+
414
+ Lemma A.2. $D_{f}(p_{\pi}(\cdot)||p_{g}(\cdot))$ is a decreasing function with respect $p_{\pi}(g)\forall f$ if $f^{\prime}(\infty)$ is defined.
415
+
416
+ Proof. The goal distribution is assumed to be a Dirac distribution i.e., $p_{g}(s) = 1$ if $s = g$ and 0 everywhere else. The $f$ -divergence between the agent state-visitation distribution, $p_{\pi}$ and the goal distribution, $p_{g}$ can be defined as,
417
+
418
+ $$
419
+ \begin{array}{l} D _ {f} \left(p _ {\pi} \mid \mid p _ {g}\right) = \sum_ {p _ {g} > 0} \left[ p _ {g} (s) f \left(\frac {p _ {\pi} (s)}{p _ {g} (s)}\right) \right] + f ^ {\prime} (\infty) p _ {\pi} [ p _ {g} = 0 ] \\ = f \left(p _ {\pi} (g)\right) + f ^ {\prime} (\infty) \left(1 - p _ {\pi} (g)\right). \\ \end{array}
420
+ $$
421
+
422
+ Let $\mathcal{F} = D_f(p_\pi ||p_g)$ . Differentiating $\mathcal{F}$ w.r.t. $p_{\pi}(g)$ , we get $\mathcal{F}' = f'(p_{\pi}(g)) - f'(\infty)$ . Since $f$ is a convex function (by the definition of $f$ -divergence), $f'(x) \leq f'(y), \forall x \leq y$ .
423
+
424
+ Hence, if $f'(\infty)$ is defined, $\mathcal{F}' \leq 0$ . Hence $\mathcal{F} = D_f(p_\pi || p_g)$ is a decreasing function with respect to $p_\pi(g)$ .
425
+
426
+ Additionally, we need Lemma A.3 and Corollary 1 to complete the proof of Theorem 5.1.
427
+
428
+ Lemma A.3. If any two policies $\pi_1$ and $\pi_2$ have the same state visitation at a given goal, they have the same returns for that goal.
429
+
430
+ Proof. Follows directly from the definition of returns. $R_{\pi} = \mathbb{E}_{p_{\pi}}[r(s)] = p_{\pi}(g)$ . Hence two policies $\pi_1$ and $\pi_2$ with the same state visitation at the goal will have the same returns.
431
+
432
+ Corollary 1. Any policy that can lead to the state-visitation distribution of the optimal policy $p_{\pi^*}$ is optimal.
433
+
434
+ Proof. Directly follows from Lemma A.3. $\square$
435
+
436
+ Theorem 5.1. The policy that minimizes $D_{f}(p_{\pi}||p_{g})$ for a convex function $f$ with $f(1) = 0$ and $f'(\infty)$ being defined, is the optimal policy.
437
+
438
+ Proof. Lemma A.1 proves that the optimal policy has the maximum state-visitation probability. Lemma A.2 proves that the $f$ -divergence objective decreases with increasing the state-visitation probability at the goal. In other words, to minimize the $f$ -divergence, we need to maximize the state visitation at goal. Corollary 1 further indicates that any policy that can lead to the state-visitation distribution of the optimal policy i.e., any policy that maximizes the state-visitation distribution at the goal state is an optimal policy.
439
+
440
+ # A.2 Proof for Lemma 5.1
441
+
442
+ Lemma 5.1. $fkl - PG$ produces a policy that maximizes the reward $\log p_{g}(s)$ along with the entropy of the state-visitation distribution.
443
+
444
+ Proof. For $fkl$ -PG, $f = u\log u$ . Hence, $J(\theta) = D_{f}(p_{\pi}||p_{g})$ can be written as,
445
+
446
+ $$
447
+ \begin{array}{l} D _ {f} (p _ {\pi} | | p _ {g}) = \mathbb {E} _ {p _ {\pi}} \left[ \log \frac {p _ {\pi}}{p _ {g}} \right] \\ = - \left[ \mathbb {E} _ {p _ {\pi}} \left[ \log p _ {g} \right] - \mathbb {E} _ {p _ {\pi}} \left[ \log p _ {\pi} \right] \right] \\ = - \left[ \mathbb {E} _ {p _ {\pi}} [ \log p _ {g} ] + \mathcal {H} (p _ {\pi}) \right] \\ \end{array}
448
+ $$
449
+
450
+ where $\mathcal{H}(p_{\pi})$ is the entropy of the agent's state visitation distribution. Minimizing $D_{f}(p_{\pi}||p_{g})$ will correspond to maximizing the reward $r(s) = \log p_g(s)$ and the entropy of $p_{\pi}$ .
451
+
452
+ A similar result could be proved for $\chi^2$ divergence:
453
+
454
+ Lemma A.4. If $f(u) = (u - 1)^2 (\chi^2 \text{ divergence}), D_f(p_\pi || p_g)$ is the upper bound of $D_{FKL}(p_\pi || p_g) - 1$ . Hence minimizing $D_{\chi^2}$ will also minimize $D_{FKL}$ recovering the entropy regularized policy.
455
+
456
+ Proof. With $f = (u - 1)^2$ , $D_f(p_\pi || p_g)$ can be written as,
457
+
458
+ $$
459
+ \begin{array}{l} D _ {f} \left(p _ {\pi} \mid \mid p _ {g}\right) = \int p _ {g} (s) \left(\frac {p _ {\pi} (s)}{p _ {g} (s)} - 1\right) ^ {2} d s \\ = \int p _ {g} (s) \left(\left(\frac {p _ {\pi} (s)}{p _ {g} (s)}\right) ^ {2} - 2 \frac {p _ {\pi} (s)}{p _ {g} (s)} + 1\right) d s \\ = \int p _ {\pi} (s) \frac {p _ {\pi} (s)}{p _ {g} (s)} - 2 p _ {\pi} (s) + p _ {g} (s) d s \\ = \int p _ {\pi} (s) \frac {p _ {\pi} (s)}{p _ {g} (s)} d s - 1 \\ = \mathbb {E} _ {p _ {\pi} (s)} \left[ \frac {p _ {\pi} (s)}{p _ {g} (s)} \right] - 1 \\ \end{array}
460
+ $$
461
+
462
+ Since, $x > \log x$
463
+
464
+ $$
465
+ \begin{array}{l} \Longrightarrow \mathbb {E} _ {p _ {\pi} (s)} [ x ] > \mathbb {E} _ {p _ {\pi} (s)} [ \log x ] \\ \Longrightarrow \mathbb {E} _ {p _ {\pi} (s)} \left[ \frac {p _ {\pi} (s)}{p _ {g} (s)} \right] > \mathbb {E} _ {p _ {\pi} (s)} \left[ \log \frac {p _ {\pi} (s)}{p _ {g} (s)} \right] \\ \Longrightarrow \mathbb {E} _ {p _ {\pi} (s)} \left[ \frac {p _ {\pi} (s)}{p _ {g} (s)} \right] - 1 > \mathbb {E} _ {p _ {\pi} (s)} \left[ \log \frac {p _ {\pi} (s)}{p _ {g} (s)} \right] - 1 \\ \end{array}
466
+ $$
467
+
468
+ Minimizing LHS will also minimize RHS. RHS is essentially $D_{KL}(p_{\pi}||p_g) - 1$ . The $^{-1}$ will not have any effect on the minimization of $D_{KL}(p_{\pi}||p_g)$ .
469
+
470
+ # B Gradient based optimization
471
+
472
+ # B.1 Derivation of gradients
473
+
474
+ Theorem 4.1. The gradient of $J(\theta)$ as defined in Equation 4 is given by,
475
+
476
+ $$
477
+ \nabla_ {\theta} J (\theta) = \frac {1}{T} \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \right] \left[ \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} \left(s _ {t}\right)}{p _ {g} \left(s _ {t}\right)}\right) \right] \right]. \tag {8}
478
+ $$
479
+
480
+ Proof. We follow the proof from Ni et al. (2020). Lets start with the state-visitation distribution. In Section 2, it was shown that the state-visitation distribution can be written as,
481
+
482
+ $$
483
+ \begin{array}{l} p _ {\theta} (s) \propto \int p (\tau) \Pi_ {t = 1} ^ {T} \pi_ {\theta} (s _ {t}) \eta_ {\tau} (s) d \tau \\ \Rightarrow p _ {\theta} (s) \propto \int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) d \tau \\ \Longrightarrow p _ {\theta} (s) = \frac {\int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} \left(s _ {t}\right)} \eta_ {\tau} (s) d \tau}{\int \int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} \left(s _ {t}\right)} \eta_ {\tau} (s) d \tau d s} \\ \Longrightarrow p _ {\theta} (s) = \frac {\int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} \left(s _ {t}\right)} \eta_ {\tau} (s) d \tau}{\int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} \left(s _ {t}\right)} \int \eta_ {\tau} (s) d s d \tau} \\ \Longrightarrow p _ {\theta} (s) = \frac {\int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) d \tau}{T \int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} d \tau} \\ \Longrightarrow p _ {\theta} (s) = \frac {f (s)}{Z} \\ \end{array}
484
+ $$
485
+
486
+ where $f(s) = \int p(\tau)e^{\sum_{t=1}^{T}\log\pi_{\theta}(s_t)}\eta_{\tau}(s)d\tau$ and $Z = T\int p(\tau)e^{\sum_{t=1}^{T}\log\pi_{\theta}(s_t)}d\tau$ .
487
+
488
+ Differentiating w.r.t. $\pi_{\theta}(s^{*})$
489
+
490
+ $$
491
+ \frac {d f (s)}{d \pi_ {\theta} (s ^ {*})} = \frac {\int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) \eta_ {\tau} (s ^ {*}) d \tau}{\pi_ {\theta} (s ^ {*})}
492
+ $$
493
+
494
+ and,
495
+
496
+ $$
497
+ \begin{array}{l} \frac {d Z}{d \pi_ {\theta} (s ^ {*})} = \frac {T \int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s ^ {*}) d \tau}{\pi_ {\theta} (s ^ {*})} \\ = \frac {T f (s ^ {*})}{\pi_ {\theta} (s ^ {*})} \\ \end{array}
498
+ $$
499
+
500
+ Computing $\frac{dp_{\theta}(s)}{\pi_{\theta}(s^{*})}$ using $\frac{df(s)}{d\pi_{\theta}(s^{*})}$ and $\frac{dZ}{d\pi_{\theta}(s^{*})}$ ,
501
+
502
+ $$
503
+ \begin{array}{l} \frac {d p _ {\theta} (s)}{\pi_ {\theta} \left(s ^ {*}\right)} = \frac {Z \frac {d f (s)}{d \pi_ {\theta} \left(s ^ {*}\right)} - f (s) \frac {d Z}{d \pi_ {\theta} \left(s ^ {*}\right)}}{Z ^ {2}} \\ = \frac {\int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) \eta_ {\tau} (s ^ {*}) d \tau}{Z \pi_ {\theta} (s ^ {*})} - \frac {f (s)}{Z} T \frac {f (s ^ {*})}{Z \pi_ {\theta} (s ^ {*})} \\ = \frac {\int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) \eta_ {\tau} (s ^ {*}) d \tau}{Z \pi_ {\theta} (s ^ {*})} - \frac {T}{\pi_ {\theta} (s ^ {*})} p _ {\theta} (s) p _ {\theta} (s ^ {*}) \\ \end{array}
504
+ $$
505
+
506
+ Now we can compute $\frac{dp_{\theta}(s)}{d\theta}$ ,
507
+
508
+ $$
509
+ \begin{array}{l} \frac {d p _ {\theta} (s)}{d \theta} = \int \frac {d p _ {\theta} (s)}{\pi_ {\theta} (s ^ {*})} \frac {d \pi_ {\theta} (s ^ {*})}{d \theta} d s ^ {*} \\ = \int \left(\frac {\int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) \eta_ {\tau} (s ^ {*}) d \tau}{Z \pi_ {\theta} (s ^ {*})} - \frac {T}{\pi_ {\theta} (s ^ {*})} p _ {\theta} (s) p _ {\theta} (s ^ {*})\right) \frac {d \pi_ {\theta} (s ^ {*})}{d \theta} d s ^ {*} \\ \end{array}
510
+ $$
511
+
512
+ $$
513
+ \begin{array}{l} = \frac {1}{Z} \iint p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) \eta_ {\tau} (s ^ {*}) \frac {\nabla_ {\theta} \pi_ {\theta} (s ^ {*})}{\pi_ {\theta} (s ^ {*})} d s ^ {*} d \tau - T p _ {\theta} (s) \int p _ {\theta} (s ^ {*}) \frac {\nabla_ {\theta} \pi_ {\theta} (s ^ {*})}{\pi_ {\theta} (s ^ {*})} d s ^ {*} \\ = \frac {1}{Z} \int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) d \tau - T p _ {\theta} (s) \mathbb {E} _ {s \sim p _ {\theta} (s)} [ \nabla_ {\theta} \log \pi_ {\theta} (s) ] \\ \end{array}
514
+ $$
515
+
516
+ The objective $L(\theta) = D_{f}(p_{\theta}(s)||p_{g}(s)) = \int p_{g}(s)f\left(\frac{p_{\theta}(s)}{p_{g}(s)}\right)ds$
517
+
518
+ The gradient for $L(\theta)$ will be given by,
519
+
520
+ $$
521
+ \begin{array}{l} \nabla_ {\theta} L (\theta) = \int p _ {g} (s) f ^ {\prime} \left(\frac {p _ {\theta} (s)}{p _ {g} (s)}\right) \left(\frac {\nabla_ {\theta} p _ {\theta} (s)}{p _ {g} (s)}\right) d s \\ = \int \nabla p _ {\theta} (s) f ^ {\prime} \left(\frac {p _ {\theta} (s)}{p _ {g} (s)}\right) d s \\ = \int f ^ {\prime} \left(\frac {p _ {\theta} (s)}{p _ {g} (s)}\right) \left(\frac {1}{Z} \int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \eta_ {\tau} (s) \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) d \tau \right. \\ \left. - T p _ {\theta} (s) \mathbb {E} _ {s \sim p _ {\theta} (s)} \left[ \nabla_ {\theta} \log \pi_ {\theta} (s) \right]\right) d s \\ = \frac {1}{Z} \int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) \int \eta_ {\tau} (s) f ^ {\prime} \left(\frac {p _ {\theta} (s)}{p _ {g} (s)}\right) d s d \tau \\ - \int T p _ {\theta} (s) f ^ {\prime} \left(\frac {p _ {\theta} (s)}{p _ {g} (s)}\right) \mathbb {E} _ {s \sim p _ {\theta} (s)} [ \nabla_ {\theta} \log \pi_ {\theta} (s) ] d s \\ = \frac {1}{Z} \int p (\tau) e ^ {\sum_ {t = 1} ^ {T} \log \pi_ {\theta} (s _ {t})} \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} (s _ {t})}{p _ {g} (s _ {t})}\right) d \tau \\ - T \int p _ {\theta} (s) f ^ {\prime} \left(\frac {p _ {\theta} (s)}{p _ {g} (s)}\right) \mathbb {E} _ {s \sim p _ {\theta} (s)} [ \nabla_ {\theta} \log \pi_ {\theta} (s) ] d s \\ = \frac {1}{T} \int p _ {\theta} (\tau) \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} (s _ {t})}{p _ {g} (s _ {t})}\right) d \tau \\ - T \mathbb {E} _ {s \sim p _ {\theta} (s)} \left[ f ^ {\prime} \left(\frac {p _ {\theta} (s)}{p _ {g} (s)}\right) \right] \mathbb {E} _ {s \sim p _ {\theta} (s)} \left[ \nabla_ {\theta} \log \pi_ {\theta} (s) \right] \\ = \frac {1}{T} \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} (s _ {t})}{p _ {g} (s _ {t})}\right) \right] \\ - T \mathbb {E} _ {s \sim p _ {\theta} (s)} \left[ f ^ {\prime} \left(\frac {p _ {\theta} (s)}{p _ {g} (s)}\right) \right] \mathbb {E} _ {s \sim p _ {\theta} (s)} [ \nabla_ {\theta} \log \pi_ {\theta} (s) ] \\ = \frac {1}{T} \left[ \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} (s _ {t})}{p _ {g} (s _ {t})}\right) \right] \right. \\ \left. - \mathbb {E} _ {s \sim p _ {\theta} (s)} \left[ \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} (s _ {t})}{p _ {g} (s _ {t})}\right) \right] \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) \right] \right] \\ = \frac {1}{T} \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}) \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} (s _ {t})}{p _ {g} (s _ {t})}\right) \right] \\ \end{array}
522
+ $$
523
+
524
+ ![](images/6e9e3102d2b17f7969e8ce939f178ddf5301df7728205409d1b9da628221447b.jpg)
525
+
526
+ Theorem 4.2. Updating the policy using the gradient maximizes $\mathbb{E}_{p_{\theta}}[\eta_{\tau}(g)]$
527
+
528
+ Proof. In goal-based setting, $p^g$ is sparse, so we need to use the full definition of $f$ -divergence, $D_f(p_\theta ||p_g) = \sum_{p_g > 0} [p_g(s)f(\frac{p_\theta(s)}{p_g(s)})] + f'(\infty)p_\theta[p_g = 0] = \sum_{p_g > 0} [p_g(s)f(\frac{p_\theta(s)}{p_g(s)})] + f'(\infty)(1 - p_\theta(g))$ . Differentiating with respect to $\theta$ gives,
529
+
530
+ $$
531
+ \begin{array}{l} \nabla_ {\theta} L (\theta) = \left(f ^ {\prime} \left(p _ {\theta} (g)\right) - f ^ {\prime} (\infty)\right) \nabla_ {\theta} p _ {\theta} (s) \\ = \left(f ^ {\prime} \left(p _ {\theta} (g)\right) - f ^ {\prime} (\infty)\right) \left(\frac {1}{T} \int p _ {\theta} (\tau) \eta_ {\tau} (g) \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} \left(s _ {t}\right) d \tau - T p _ {\theta} (s) \mathbb {E} _ {s \sim p _ {\theta} (s)} [ \nabla_ {\theta} l o g \pi_ {\theta} (s) ]\right) \\ = \left(f ^ {\prime} \left(p _ {\theta} (g)\right) - f ^ {\prime} (\infty)\right) \left(\frac {1}{T} \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \eta_ {\tau} (g) \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} \left(s _ {t}\right) \right] \right. \\ \left. - p _ {\theta} (g) \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (s) \right]\right) \\ = \frac {1}{T} \left(f ^ {\prime} \left(p _ {\theta} (g)\right) - f ^ {\prime} (\infty)\right) \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} \left(s _ {t}\right) \eta_ {\tau} (g) \right] \\ \end{array}
532
+ $$
533
+
534
+ The gradient has two terms, the first term $\left(f'(p_{\theta}(g)) - f'(\infty)\right)$ weighs the gradient based on the value of $p_{\theta}(g)$ and is always negative. It acts as an adaptive learning schedule, reducing its magnitude as the $p_{\theta}(g)$ increases. The second term is the gradient of $\mathbb{E}_{p_{\theta}}[\eta_{\tau}(g)]$ . Hence using $\nabla_{\theta}L(\theta)$ , we minimize $L(\theta)$ which would imply maximizing $\mathbb{E}_{p_{\theta}}[\eta_{\tau}(g)]$ .
535
+
536
+ # B.2 Practical Algorithm
537
+
538
+ As mentioned in Section 4, the derived gradient is highly sample inefficient. We employ established methods to improve the performance of policy gradients like importance sampling.
539
+
540
+ The first modification is to use importance sampling weights to allow sampling from previous policy $\theta^{\prime}$ . The gradient now looks like,
541
+
542
+ $$
543
+ \nabla_ {\theta} J (\theta) = \mathbb {E} _ {\tau \sim p _ {\theta^ {\prime}} (\tau)} \left[ \frac {\pi_ {\theta} (\tau)}{\pi_ {\theta^ {\prime}} (\tau)} \left[ \sum_ {t = 1} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \right] \left[ \sum_ {t = 1} ^ {T} f ^ {\prime} \left(\frac {p _ {\theta} (s _ {t})}{p _ {g} (s _ {t})}\right) \right] \right]. \tag {9}
544
+ $$
545
+
546
+ To reduce the variance in the gradients, the objective can be modified to use the causal connections in the MDP and ensure that the action taken at step $t$ only affects rewards at times $t' \to [t, T]$ . Moreover, a discount factor $\gamma$ is used to prevent the sum $\sum_{t'=t}^{T} f'\left(\frac{p_{\theta}(s_t)}{p_g(s_t)}\right)$ from exploding.
547
+
548
+ Additionally, the expectation is modified to be over states rather than trajectories,
549
+
550
+ $$
551
+ \nabla_ {\theta} J (\theta) = \mathbb {E} _ {s _ {t}, a _ {t} \sim p _ {\theta^ {\prime}} \left(s _ {t}, a _ {t}\right)} \left[ \frac {\pi_ {\theta} \left(a _ {t} \mid s _ {t}\right)}{\pi_ {\theta^ {\prime}} \left(a _ {t} \mid s _ {t}\right)} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \sum_ {t ^ {\prime} = t} ^ {T} \gamma^ {t ^ {\prime}} f ^ {\prime} \left(\frac {p _ {\theta} \left(s _ {t}\right)}{p _ {g} \left(s _ {t}\right)}\right) \right]. \tag {10}
552
+ $$
553
+
554
+ This gradient computation is still inefficient, because even though the samples are from a previous policy $\pi_{\theta'}$ , it still needs to compute $\sum_{t'=t}^{T} \gamma^{t'} f'\left(\frac{p_{\theta}(s_t)}{p_g(s_t)}\right)$ , requiring iteration through full trajectories. We can add a bias to the gradient by modifying $f'\left(\frac{p_{\theta}(s_t)}{p_g(s_t)}\right)$ to $f'\left(\frac{p_{\theta'}(s_t)}{p_g(s_t)}\right)$ in the objective. To ensure the bias is small, an additional constraint needs to be added to keep $\theta'$ close to $\theta$ . Following the literature from natural gradients, the constraint we add is $D_{KL}(p_{\theta'}||p_{\theta})$ . Proximal Policy Optimization (Schulman et al., 2017) showed that in practical scenarios, clipped objective can be enough to do away with the KL regularization term. The final objective that we use is,
555
+
556
+ $$
557
+ \nabla_ {\theta} J (\theta) = \mathbb {E} _ {s _ {t}, a _ {t} \sim p _ {\theta^ {\prime}} \left(s _ {t}, a _ {t}\right)} \left[ \min \left(r _ {\theta} \left(s _ {t}\right) F _ {\theta^ {\prime}} \left(s _ {t}\right), c l i p \left(r _ {\theta} \left(s _ {t}\right), 1 - \epsilon , 1 + \epsilon\right) F _ {\theta^ {\prime}} \left(s _ {t}\right)\right) \right], \tag {11}
558
+ $$
559
+
560
+ where $r_{\theta}(s_t) = \frac{\nabla_\theta\pi_\theta(a_t|s_t)}{\pi_{\theta'}(s_at|s_t)}$ and $F_{\theta^{\prime}}(s_{t}) = \sum_{t^{\prime} = t}^{T}\gamma^{t^{\prime}}f^{\prime}\left(\frac{p_{\theta^{\prime}}(s_{t})}{p_{g}(s_{t})}\right)$ .
561
+
562
+ # B.3 Discounted State-Visitations
563
+
564
+ The state-visitation distribution defined so far has not considered a discount factor. To include discounting, the state-visitation frequency gets modified to $\eta_{\tau}(s) = \sum_{t=1}^{T} \gamma^t \mathbb{1}_{s_t=s}$ . Throughout the derivation of the gradient, we used $\int \eta_{\tau}(s) f(s) ds = \sum_{t=1}^{T} f(s_t)$ but this will be modified to $\int \eta_{\tau}(s) f(s) ds = \sum_{t=1}^{T} \gamma^t f(s_t)$ . The corresponding gradient will be,
565
+
566
+ $$
567
+ \nabla_ {\theta} J (\theta) = \frac {1}{T} \mathbb {E} _ {\tau \sim p _ {\theta} (\tau)} \left[ \left[ \sum_ {t = 1} ^ {T} \gamma^ {t} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \right] \left[ \sum_ {t = 1} ^ {T} \gamma^ {t} f ^ {\prime} \left(\frac {p _ {\theta} \left(s _ {t}\right)}{p _ {g} \left(s _ {t}\right)}\right) \right] \right]. \tag {12}
568
+ $$
569
+
570
+ This gradient can be modified as before,
571
+
572
+ $$
573
+ \nabla_ {\theta} J (\theta) = \mathbb {E} _ {s _ {t}, a _ {t} \sim p _ {\theta} (s _ {t}, a _ {t})} \left[ \gamma^ {t} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \sum_ {t ^ {\prime} = t} ^ {T} \gamma^ {t ^ {\prime}} f ^ {\prime} \left(\frac {p _ {\theta} (s _ {t})}{p _ {g} (s _ {t})}\right) \right]. \tag {13}
574
+ $$
575
+
576
+ Adding importance sampling to the gradient in Equation 13 will give a gradient very similar to Equation 10. In fact, Equation 10 is a biased estimate for the gradient of the $f$ -divergence between the discounted state-visitation distribution and the goal distribution. We can use either of the two gradients but Equation 10 will be preferred for long horizon tasks.
577
+
578
+ # C Gridworld Experiments
579
+
580
+ # C.1 Description of the task
581
+
582
+ The task involves navigating a gridworld to reach the goal state which is enclosed in a room. The agent can move in any of the four directions and has no idea where the goal is. It needs to explore the gridworld to find the path around the room to reach the goal. The task is further elaborated in Figure 7. The green square represents the agent position while the red square represents the goal.
583
+
584
+ State: The state visible to the policy is simply the normalized $x$ and $y$ coordinates.
585
+
586
+ Action: The action is discrete categorical distribution with four categories one for each - left, top, right and bottom.
587
+
588
+ **Reward:** The task reward is 1 at the goal and 0 everywhere else. $f$ -PG does not require rewards but the baselines use task rewards.
589
+
590
+ ![](images/635824a1f85f64f72b654f56d66fbaaa2f5a5c6819f6d84e036fdc1ff5180edd.jpg)
591
+ Figure 7: Description of the gridworld: The bold lines show the walls, green square is the start position and the red square is the goal.
592
+
593
+ # C.2 Performance of $f$ -PG
594
+
595
+ In Section 5.1, we had compared $fkl$ -PG and $rkl$ -PG with AIM (Durugkar et al., 2021) and GAIL (Ho & Ermon, 2016). In Figure 8 we present additional baselines AIRL (Fu et al., 2017) and f-AIRL (Ghasemipour et al., 2019).
596
+
597
+ # D Visualizing the learning signals
598
+
599
+ # D.1 Description of the task
600
+
601
+ To visualize the learning signals, we use the Reacher environment (Figure 9) (Todorov et al., 2012). The task involves rotating a reacher arm (two joints with one end fixed). The applied actions (torques) would rotate the arm so that the free end reaches the goal. The goal is fixed to be at $(-0.21, 0)$ and the goal distribution is a normal centred at the goal with a standard deviation of 0.02.
602
+
603
+ ![](images/99298086fc3996ed5829e6cd23541e99fd131d543dafcd690ea6d639903a1a0e.jpg)
604
+ Figure 9: The Reacher environment with fixed goal
605
+
606
+ ![](images/7a9001c64f1016b2ad4a1c1d0ef3647f53ee46a150ebfad2bfb959bd389b9b34.jpg)
607
+
608
+ ![](images/ad907b18fb615cd8f273b8d31e15f12215000bf4664513d4f4b124a962532acd.jpg)
609
+
610
+ ![](images/4b2d2dfd42ca60610aaa255b8f8303833776d1ba8c52a6b5e3e2975b8727e670.jpg)
611
+
612
+ ![](images/1a6473e131147dbefbdf096534ea35ed01835aeffc83f28c809be2cc6f4a19a4.jpg)
613
+ (a) FKL
614
+ (d) GAIL
615
+ Figure 8: Gridworld: The agent needs to move from the green circle to the red circle. The state visitations of the final policies are shown when using our framework for training (fkl, rkl) compared with AIM and GAIL trained on top of soft Q learning.
616
+
617
+ ![](images/6142918e1d80c39c033f002c4a1592080de021bb599996bf0d1a915dd90feeaa.jpg)
618
+ (b) RKL
619
+ (e) AIRL
620
+
621
+ ![](images/3a0172d2e57c71918ccf9ead1ee762397727190e0f40bba3922ab370eece252b.jpg)
622
+ (c) AIM
623
+ (f) FAIRL
624
+
625
+ State: The state of the original environment contains several things but here we simplify the state space to simply be the position of the free end and the target or the goal position.
626
+
627
+ Actions: The actions are two dimensional real numbers in $[-1, 1]$ which correspond to the torques applied on the two joint respectively.
628
+
629
+ Reward: The reward is sparse i.e., 1 when the goal is reached by the tip of the arm. But $f$ -PG does not use rewards for training policies.
630
+
631
+ # D.2 Comparing different $f$ -PG
632
+
633
+ Figure 10 shows the evolution of $-f^{\prime}\left(\frac{p_{\theta}(s)}{p_{g}(s)}\right)$ for the environment. The red regions correspond to signals having a lower value while the darker blue regions correspond to signals with high value. For $fkl$ , the scale of these signals generally varies from 10 to $-5$ while for $\chi^2$ , the scale varies from 600 to 50. Also, for the same objective, as the policy trains, these scales generally get smaller in magnitude.
634
+
635
+ The following can be observed from these plots:
636
+
637
+ 1. In all the cases, the signals are maximum at the goal pulling the state-visitions towards the goal.
638
+ 2. All of these also push for exploration. This is most pronounced in $fkl$ and $\chi^2$ . These provide significant push towards the unexplored regions which show their inclination towards entropy-maximization, confirming the theory (Lemma 5.1).
639
+
640
+ # E PointMaze experiments
641
+
642
+ PointMaze (Fu et al., 2020) are continuous state-space domains where the agent needs to navigate to the goal in the 2D maze. The agent and the goal are spawned at a random location in the maze for every episode. There are three levels based on the difficulty of the maze as shown in Figure 11.
643
+
644
+ State: The state consists of the agent's 2D position and the velocity in the maze. The goal position is appended to the state.
645
+
646
+ Action: The actions are 2D real numbers in the range $[-1, 1]$ correspond to the force applied to the agent in each of the two directions.
647
+
648
+ Reward: Although $f$ -PG does not use rewards, the baselines use the task reward which is sparse (1 when the goal is reached and 0 everywhere else).
649
+
650
+ ![](images/3a59a58de37d6d6f469f360be1b4a0bae60b0bae1ce1857d6125d2790e882dbd.jpg)
651
+ Figure 11: Description of PointMaze environments: PointMaze-U (left), PointMaze-Medium (middle), PointMaze-Large(right).
652
+
653
+ ![](images/c9ea6328680673b8ca5e5a846e7ed6800c7e9da48bb65f270fe5b641870e0561.jpg)
654
+
655
+ ![](images/2416a3bc4b1429812c027d0a961831d8f4d90a67f531425d2545c805bfa0ea91.jpg)
656
+
657
+ For the experiments in Section 6.2, the initial and goal states are sampled uniformly over all the "VALID" states i.e., states that can be reached by the agent. Such an initialization allows discriminator-based methods to fulfill their coverage assumptions. In Section 6.3, the initialization procedure is modified so that the initial state and the goal state are considerably far. This is done by restricting the sampling of the initial and goal states from disjoint (considerably far away) distributions as shown in Figure 5.
658
+
659
+ ![](images/74b3df634441c53ec55960e4cb7872c2274db755be5e0c2b934c057f244d5ebd.jpg)
660
+
661
+ ![](images/f4762df4382138536f637a62e26130734a761d9b76a94689924aa4b651b28bbd.jpg)
662
+
663
+ ![](images/e20a7cd9ba3c5ef1458d4012a3fb4f57843f0526eeab3338272cb37bc61d052a.jpg)
664
+
665
+ ![](images/0eebcc7ed8a740f661352f98afaa1c6c855b9ac05a9d8f4225b3951902c5bd55.jpg)
666
+
667
+ ![](images/4936362145d567c04489e61d644ec97f23a200c54f75376fe3c0e0137040377e.jpg)
668
+
669
+ ![](images/772e1bfa2d12392642cc7f8efe569f19536f8fced875f85c8d390f2419a88c7c.jpg)
670
+
671
+ ![](images/0a13ab17e2782417456c70757356bcfc950ab3abf3e15cc87426c5831798bcaa.jpg)
672
+
673
+ ![](images/c6dec45eeb87c69de79003b3af15b03e901b1b21e268d2887c10eb2c7e49e7f3.jpg)
674
+ (a) FKL
675
+
676
+ ![](images/79ce5f4100c6a924d9fc788dab757a54b4abfd1570ceed49733c3765a1b639a8.jpg)
677
+
678
+ ![](images/da66f89c5e281a25d7ce31038791a76ab9f997826c259518db0a9c7463ccd400.jpg)
679
+
680
+ ![](images/236ee0608314771462f67132acf2f0332706ed1a30f6e6f7e314b4149e879d90.jpg)
681
+
682
+ ![](images/6ca214b7f4e1504c2f9f4ba17914ed34e1482da605837744ac0d5ce2540b351d.jpg)
683
+
684
+ ![](images/6bfd44751aebac5d686eb2c2b82169860857d2fa9169c8fdb1f8e9adc3a7efab.jpg)
685
+
686
+ ![](images/2f9c45bc4c5d21453eab449dd0776c86c3a195862757ad491ef00b91c1f88fc8.jpg)
687
+
688
+ ![](images/582ca8e3b2cd7832b985c4be662be7a63633e5aee022f689ea56efd5567934fd.jpg)
689
+
690
+ ![](images/d76a79a8b908be2f96e533988c9cf628dffcbb62aeca72af0e48212106f17100.jpg)
691
+
692
+ ![](images/a7381eb8476ff9452ee79bc113907f4f498c4ee7a307ecc521411aedf1a5f91b.jpg)
693
+
694
+ ![](images/81e00a78cab481c7af5544804fd8a87649e1fd39b526f6fe42f2e877d666b740.jpg)
695
+ (b) RKL
696
+
697
+ ![](images/243e64bab2c283440b4c2c0e00506b29f5f13c9db3137fd0ca9af778bdebc95e.jpg)
698
+
699
+ ![](images/467742850d5e6b21ef931a945d46792223e61a738542867da86cf7974b6db0fc.jpg)
700
+
701
+ ![](images/48963379f8661f373bdf01c29be6288272c9cfb554d984561680f4d4eea2ab52.jpg)
702
+
703
+ ![](images/cc193a4e1240cb9dd937222ebdb82cb13e5a1eea147ab1903d10136a2feb95c3.jpg)
704
+
705
+ ![](images/9e866783a63d8f02ad87527d88c1e18e184165453f31ae85671017f3806e803f.jpg)
706
+
707
+ ![](images/bcfafa87c5d2d3f0930533e13d4396e25ce0dc4bd64d046399e92c3f8ff7b0fb.jpg)
708
+
709
+ ![](images/27b965f8557d98b6b88ad8e8ba38017b9be2646dbd4e3aa37cd70889b3928b3c.jpg)
710
+
711
+ ![](images/830928d7d0ee462596d4af957c73089326e8f9e0ee36e9693f24a7e35fb90603.jpg)
712
+
713
+ ![](images/ff5c3848fda31230b3424c94418e4522a4d7ad75f5853aed8c9c3448ce583c65.jpg)
714
+
715
+ ![](images/780508ba85b0ea646af86fc9f0a03a1e340330630d6d39c4c659f4e8f53d95da.jpg)
716
+ (c)JS
717
+
718
+ ![](images/08d1268596abde29b04c47fc15f68828239533743d85848106081e9e3546bae7.jpg)
719
+
720
+ ![](images/90edb6f8b2d8e9570108ec0f3fc3f42991d8e39754bf544594f5c30243e6d6a4.jpg)
721
+
722
+ ![](images/f59c1f735b0ea2f6829e753c9b8d832cb7ba5e3f93737cc478c3b86823a94fab.jpg)
723
+
724
+ ![](images/112cc7790532ecffb964adf04488d710d1d9b53017521423180540e762ccf216.jpg)
725
+
726
+ ![](images/2932600303d44ca5bb0b8ba915919c27e440013a9f22dcdae606a003c97e3b71.jpg)
727
+
728
+ ![](images/a34494807c1fd6652f0d1e4d2614289c5656b39d697acdbda3252ec4b4a8a670.jpg)
729
+
730
+ ![](images/cc4b69211433a622f29188b3a4b334a069f8775fa5b5bb58240f1ac09676a489.jpg)
731
+
732
+ ![](images/d77963471e5b15dda5a1b8f51d8c4a308f5328edd25d619c9736bf1d8e81ea07.jpg)
733
+ Figure 10: Evolution of $-f^{\prime}(\frac{p_{\theta}(s)}{p_g(s)})$ along with the corresponding state-visitations for different $f$ -divergences - $fkl$ , $rkl$ , $js$ and $\chi^2$ . The scales for the value of these signals are not shown but they vary as the policy converges. $f$ -PG provides dense signals for pushing towards exploration.
734
+
735
+ ![](images/0ffc863b6273fe5c54440dc804078a226da75c14b7ea3cdba8bac7d52fb5d3fe.jpg)
736
+
737
+ ![](images/ae076c3305596e860e804597cdf1675eceeb75f1ea6a8afaec90a097fff44ac8.jpg)
738
+ (d) $\chi^2$
739
+
740
+ ![](images/959dbfdb49a763e44bb6042067e51583ca6c64bba4be2b097edc32e09112c441.jpg)
741
+
742
+ ![](images/fac9c4a46a3a6815fd4bff39faab36dce8c1c9cd13f41e4c6b59c53370e429e9.jpg)
fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:611a02903614b90485539f9bfc5011a485225d9a2e933e4b144c9d06a905b651
3
+ size 1174255
fpolicygradientsageneralframeworkforgoalconditionedrlusingfdivergences/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09acadd88a5830c3c78bd8b1f087e2e2f61127bfd69b4f51f9045352ec4e4cf1
3
+ size 986017
iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/025dad01-183e-408f-b811-9bb744cef25c_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7a6492c49b1158acaafb1adb87bb3327be9d842bb7274b9cdb98121611230c2
3
+ size 200186
iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/025dad01-183e-408f-b811-9bb744cef25c_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94695964932a25379e08ee57fd2706ecfc8ed2e2dd52407f464244164d682883
3
+ size 241322
iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/025dad01-183e-408f-b811-9bb744cef25c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7f27f37295af3ec1848694be0a66e7f996142dab20618be7685e989428b72b0
3
+ size 3459036
iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/full.md ADDED
The diff for this file is too large to render. See raw diff
 
iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdf581681dd78a590c739e041856beffe369f9ea3d270111be4c8434dd3e744d
3
+ size 2656618
iscanidentifyingcausalmechanismshiftsamongnonlinearadditivenoisemodels/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ce6ba9ce747a79db1809ce887ddc63873b9bc9e36c16075f6d0016216390288
3
+ size 1428199
kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/2dd01f7a-eb35-4d5f-8538-975c25eab59c_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec9ddf1bf445cf618a6620a533b2f22a64cadcd2aa9226ca8bcf73ea2d0e6370
3
+ size 132107
kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/2dd01f7a-eb35-4d5f-8538-975c25eab59c_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:584c4f853dd7a41ecc3ded17df48c2cf9160100ca5afcbdc031ccfb81a5bbae0
3
+ size 158892
kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/2dd01f7a-eb35-4d5f-8538-975c25eab59c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:782f2ec970d3e1b81475ef67b947913c5b622c7c642c94b8968c06c1ae24d3ff
3
+ size 720966
kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/full.md ADDED
@@ -0,0 +1,669 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # $k$ -Median Clustering via Metric Embedding: Towards Better Initialization with Differential Privacy
2
+
3
+ Chenglin Fan, Ping Li, Xiaoyun Li
4
+
5
+ Cognitive Computing Lab
6
+
7
+ Baidu Research
8
+
9
+ 10900 NE 8th St. Bellevue, WA 98004, USA
10
+
11
+ {fanchenglin, pingli98, lixiaoyun996}@gmail.com
12
+
13
+ # Abstract
14
+
15
+ In clustering, the choice of initial centers is crucial for the convergence speed of the algorithms. We propose a new initialization scheme for the $k$ -median problem in the general metric space (e.g., discrete space induced by graphs), based on the construction of metric embedding tree structure of the data. We propose a novel and efficient search algorithm which finds initial centers that can be used subsequently for the local search algorithm. The so-called HST initialization method can produce initial centers achieving lower error than those from another popular method $k$ -median++, also with higher efficiency when $k$ is not too small. Our HST initialization are then extended to the setting of differential privacy (DP) to generate private initial centers. We show that the error of applying DP local search followed by our private HST initialization improves prior results on the approximation error, and approaches the lower bound within a small factor. Experiments demonstrate the effectiveness of our proposed methods.
16
+
17
+ # 1 Introduction
18
+
19
+ Clustering is an important classic problem in unsupervised learning that has been widely studied in statistics, data mining, machine learning, network analysis, etc. (Punj and Stewart, 1983; Dhillon and Modha, 2001; Banerjee et al., 2005; Berkhin, 2006; Abbasi and Younis, 2007). The objective of clustering is to divide a set of data points into clusters, such that items within the same cluster exhibit similarities, while those in different clusters distinctly differ. This is concretely measured by the sum of distances (or squared distances) between each point to its nearest cluster center. One conventional notion to evaluate a clustering algorithms is: with high probability, $cost(C, D) \leq \gamma OPT_k(D) + \xi$ , where $C$ is the centers output by the algorithm and $cost(C, D)$ is a cost function defined for $C$ on dataset $D$ . $OPT_k(D)$ is the cost of optimal clustering solution on $D$ . When everything is clear from context, we will use $OPT$ for short. Here, $\gamma$ is called multiplicative error and $\xi$ is called additive error. Alternatively, we may also use the notion of expected cost.
20
+
21
+ Two popularly studied clustering problems are 1) the $k$ -median problem, and 2) the $k$ -means problem. The origin of $k$ -median dates back to the 1970's (e.g., Kaufman et al. (1977)), where one tries to find the best location of facilities that minimizes the cost measured by the distance between clients and facilities. Formally, given a set of points $D$ and a distance measure, the goal is to find $k$ center points minimizing the sum of absolute distances of each sample point to its nearest center. In $k$ -means, the objective is to minimize the sum of squared distances instead. There are two general frameworks for clustering. One heuristic is the Lloyd's algorithm (Lloyd, 1982), which is built upon an iterative distortion minimization approach. In most cases, this method can only be applied to numerical data,
22
+
23
+ typically in the (continuous) Euclidean space. Clustering in general metric spaces (discrete spaces) is also important and useful when dealing with, for example, the graph data, where Lloyd's method is no longer applicable. A more generally applicable approach, the local search method (Kanungo et al., 2002; Arya et al., 2004), has also been widely studied. It iteratively finds the optimal swap between the center set and non-center data points to keep lowering the cost. Local search can achieve a constant approximation (i.e., $\gamma = O(1)$ ) to the optimal solution (Arya et al., 2004). For general metric spaces, the state of the art approximation ratio is 2.675 for $k$ -median Byrka et al. (2015) and 6.357 for $k$ -means Ahmadian et al. (2017).
24
+
25
+ Initialization of cluster centers. It is well-known that the performance of clustering can be highly sensitive to initialization. If clustering starts with good initial centers with small approximation error, the algorithm may use fewer iterations to find a better solution. The $k$ -median++ algorithm (Arthur and Vassilvitskii, 2007) iteratively selects $k$ data points as initial centers, favoring distant points in a probabilistic way, such that the initial centers tend to be well spread over the data points (i.e., over different clusters). The produced initial centers are proved to have $O(\log k)$ multiplicative error. Follow-up works further improved its efficiency and scalability, e.g., Bahmani et al. (2012); Bachem et al. (2016); Lattanzi and Sohler (2019); Choo et al. (2020); Cohen-Addad et al. (2021); Grunau et al. (2023); Fan et al. (2023). In this work, we propose a new initialization framework, called HST initialization, which is built upon a novel search algorithm on metric embedding trees constructed from the data. Our method achieves improved approximation error compared with $k$ -median++. Moreover, importantly, our initialization scheme can be conveniently combined with the notion of differential privacy (DP) to protect the data privacy.
26
+
27
+ Clustering with Differential Privacy. The concept of differential privacy (Dwork, 2006; McSherry and Talwar, 2007) has been popular to rigorously define and resolve the problem of keeping useful information for machine learning models, while protecting privacy for each individual. DP has been adopted to a variety of algorithms and tasks, such as regression, classification, principle component analysis, graph distance release, matrix completion, optimization, and deep learning (Chaudhuri and Monteleoni, 2008; Chaudhuri et al., 2011; Abadi et al., 2016; Ge et al., 2018; Wei et al., 2020; Dong et al., 2022; Fan and Li, 2022; Fan et al., 2022; Fang et al., 2023; Li and Li, 2023a,b). Private $k$ -means clustering has also been widely studied, e.g., Feldman et al. (2009); Nock et al. (2016); Feldman et al. (2017), mostly in the continuous Euclidean space. Balcan et al. (2017) considered identifying a good candidate set (in a private manner) of centers before applying private local search, which yields $O(\log^3 n)$ multiplicative error and $O((k^2 + d)\log^5 n)$ additive error. Later on, the private Euclidean $k$ -means error axis further improved by Stemmer and Kaplan (2018), with more advanced candidate set selection. Huang and Liu (2018) gave an optimal algorithm in terms of minimizing Wasserstein distance under some data separability condition.
28
+
29
+ For private $k$ -median clustering, Feldman et al. (2009); Ghazi et al. (2020) considered the problem in high dimensional Euclidean space. However, it is rather difficult to extend their analysis to more general metrics in discrete spaces (e.g., on graphs). The strategy of Balcan et al. (2017) to form a candidate center set could as well be adopted to $k$ -median, which leads to $O(\log^{3/2} n)$ multiplicative error and $O((k^2 + d) \log^3 n)$ additive error in the Euclidean space where $n$ is the sample size. In discrete space, Gupta et al. (2010) proposed a private method for the classical local search heuristic, which applies to both $k$ -medians and $k$ -means. To cast privacy on each swapping step, the authors applied the exponential mechanism of McSherry and Talwar (2007). Their method produced an $\epsilon$ -differentially private solution with cost $6OPT + O(\triangle k^2 \log^2 n / \epsilon)$ , where $\triangle$ is the diameter of the point set. In this work, we will show that our proposed HST initialization can improve the DP local search for $k$ -median of Gupta et al. (2010) in terms of both approximation error and efficiency. Stemmer and Kaplan (2018); Jones et al. (2021) proposed $(\epsilon, \delta)$ -differentially private solution also with constant multiplicative error but smaller additive error.
30
+
31
+ The main contributions of this work include the following:
32
+
33
+ - We introduce the Hierarchically Well-Separated Tree (HST) as an initialization tool for the $k$ -median clustering problem. We design an efficient sampling strategy to select the initial center set from the tree, with an approximation factor $O(\log \min \{k,\triangle \})$ in the non-private setting, which is $O(\log \min \{k,d\})$ when $\log \triangle = O(\log d)$ . This improves the $O(\log k)$ error of $k$ -median++. Moreover, the complexity of our HST based method can be smaller than that of $k$ -median++. When the number of clusters $k$ is not too small ( $k\geq \log n$ ), which is a common scenario in practical applications.
34
+
35
+ - We propose a differentially private version of HST initialization under the setting of Gupta et al. (2010) in discrete metric space. The so-called DP-HST algorithm finds initial centers with $O(\log n)$ multiplicative error and $O(\epsilon^{-1}\triangle k^2\log^2 n)$ additive error. Moreover, running DP-local search starting from this initialization gives $O(1)$ multiplicative error and $O(\epsilon^{-1}\triangle k^2 (\log \log n)\log n)$ additive error, which improves previous results towards the well-known lower bound $O(\epsilon^{-1}\triangle k\log (n / k))$ on the additive error of DP $k$ -median (Gupta et al., 2010) within a small $O(k\log \log n)$ factor. This is the first clustering initialization method with $\epsilon$ -differential privacy guarantee and improved error rate in general metric space.
36
+ - We conduct experiments on simulated and real-world datasets to demonstrate the effectiveness of our methods. In both non-private and private settings, our proposed HST-based approach achieves smaller cost at initialization than $k$ -median++, which may also lead to improvements in the final clustering quality.
37
+
38
+ # 2 Background and Setup
39
+
40
+ The definition of differential privacy (DP) is as follows.
41
+
42
+ Definition 2.1 (Differential Privacy (DP) (Dwork, 2006)). If for any two adjacent datasets $D$ and $D'$ with symmetric difference of size one and any $O \subset \operatorname{Range}(\mathbb{A})$ , an algorithm $\mathbb{A}$ with map $f$ satisfies
43
+
44
+ $$
45
+ P r [ \mathbb {A} (D) \in O ] \leq e ^ {\epsilon} P r [ \mathbb {A} (D ^ {\prime}) \in O ],
46
+ $$
47
+
48
+ then algorithm $\mathbb{A}$ is said to be $\epsilon$ -differentially private ( $\epsilon$ -DP).
49
+
50
+ Intuitively, DP requires that after removing any data point from $D$ (e.g., a node in a graph), the output of $D'$ should not be too different from that of the original dataset $D$ . The Laplace mechanism adds Laplace $(\eta(f)/\epsilon)$ noise to the output where $\eta(f) = \sup_{|D - D'| = 1} |f(D) - f(D')|$ is the sensitivity of $f$ , which is known to achieve $\epsilon$ -DP. The exponential mechanism is also a tool for many DP algorithms with discrete outputs. Let $O$ be the output domain. The utility function $q: D \times O \to \mathbb{R}$ is what we aim to maximize. The exponential mechanism outputs an element $o \in O$ with probability $P[\mathbb{A}(D) = o] \propto \exp(\frac{\epsilon q(D, o)}{2\eta(q)})$ . Both mechanisms will be used in our paper.
51
+
52
+ # 2.1 $k$ -Median Clustering and Local Search
53
+
54
+ In this paper, we follow the classic problem setting in the metric clustering literature, e.g. Arya et al. (2004); Gupta et al. (2010). Specifically, the definitions of metric $k$ -median clustering problem (DP and non-DP) are stated as follow.
55
+
56
+ Definition 2.2 ( $k$ -median). Given a universe point set $U$ and a metric $\rho: U \times U \to \mathbb{R}$ , the goal of $k$ -median to pick $F \subseteq U$ with $|F| = k$ to minimize
57
+
58
+ $$
59
+ k \text {m e d i a n :} \quad \operatorname {c o s t} _ {k} (F, U) = \sum_ {v \in U} \min _ {f \in F} \rho (v, f). \tag {1}
60
+ $$
61
+
62
+ Let $D \subseteq U$ be a set of "demand points". The goal of DP $k$ -median is to minimize
63
+
64
+ $$
65
+ D P k \text {m e d i a n}: \quad \operatorname {c o s t} _ {k} (F, D) = \sum_ {v \in D} \min _ {f \in F} \rho (v, f), \tag {2}
66
+ $$
67
+
68
+ and the output $F$ is required to be $\epsilon$ -differentially private with respect to $D$ . We may drop "F" and use "cost $_k(U)$ " or "cost $_k(D)$ " if there is no risk of ambiguity.
69
+
70
+ Note that in Definition 2.2, our aim is to protect the privacy of a subset $D \subset U$ . To better understand the motivation and application scenario, we provide a real-world example as below.
71
+
72
+ Example 2.3. Consider $U$ to be the universe of all users in a social network (e.g., Facebook, LinkedIn, etc.). Each user (account) has some public information (e.g., name, gender, interests, etc.), but also has some private personal data that can only be seen by the data server. Let $D$ be a set of users grouped by some feature that might be set as private. Suppose a third party plans to collaborate with the most influential users in $D$ for e.g., commercial purposes, thus requesting the cluster centers of $D$ . In this case, we need a differentially private algorithm to safely release the centers, while protecting the individuals in $D$ from being identified (since the membership of $D$ is private).
73
+
74
+ Algorithm 1: Local search for $k$ -median clustering (Arya et al., 2004)
75
+ Input: Data points $U$ , parameter $k$ , constant $\alpha$
76
+ Initialization: Randomly select $k$ points from $U$ as initial center set $F$
77
+ while $\exists x\in F,y\in U$ s.t. cost $(F - \{x\} +\{y\})\leq (1 - \alpha /k)$ cost $(F)$ do Select $(x,y)\in F_i\times (D\setminus F_i)$ with arg $\min_{x,y}\{\mathrm{cost}(F - \{x\} +\{y\})\}$ Swap operation: $F\gets F - \{x\} +\{y\}$
78
+ Output: Center set $F$
79
+
80
+ The (non-private) local search procedure for $k$ -median proposed by Arya et al. (2004) is summarized in Algorithm 1. First, we randomly pick $k$ points in $U$ as the initial centers. In each iteration, we search over all $x \in F$ and $y \in U$ , and do the swap $F \gets F - \{x\} + \{y\}$ such that the new centers improve the cost the most, and if the improvement is more than $(1 - \alpha / k)$ for some $\alpha > 0^2$ . We repeat the procedure until no such swap exists. Arya et al. (2004) showed that the output center set $F$ achieves 5 approximation error to the optimal solution, i.e., $cost(F) \leq 5OPT$ .
81
+
82
+ # 2.2 $k$ -median++ Initialization
83
+
84
+ Although local search is able to find a solution with constant error, it takes $O(n^{2})$ per iteration (Re-sende and Werneck, 2007) in expected $O(k\log n)$ steps (which gives total complexity $O(kn^{2}\log n)$ ) when started from a random center set, which would be slow for large datasets. Indeed, we do not need such complicated/meticulous algorithm to reduce the cost at the beginning, i.e., when the cost is large. To accelerate the process, efficient initialization methods find a "roughly" good center set as the starting point for local search. In the paper, we compare our new initialization scheme mainly with a popular (and perhaps the most well-known) initialization method, the $k$ -median++ (Arthur and Vassilitskii, 2007)<sup>3</sup> as presented in Algorithm 2. The output centers $C$ by $k$ -median++ achieve $O(\log k)$ approximation error with time complexity $O(nk)$ . Starting from the initialization, we only need to run $O(k\log \log k)$ steps of the computationally heavy local search to reach a constant error solution. Thus, initialization may greatly improve the clustering efficiency.
85
+
86
+ Algorithm 2: $k$ -median++ initialization (Arthur and Vassilvitskii, 2007)
87
+ Input: Data points $U$ , number of centers $k$ Randomly pick a point $c_{1}\in U$ and set $F = \{c_1\}$ for $i = 2,\dots,k$ do Select $c_{i} = u\in U$ with probability $\frac{\rho(u,F)}{\sum_{u^{\prime}\in U}\rho(u^{\prime},F)}$ $F = F\cup \{c_i\}$ Output: $k$ -median++ initial center set $F$
88
+
89
+ # 3 Initialization via Hierarchical Well-Separated Tree (HST)
90
+
91
+ In this section, we propose our new initialization scheme for $k$ -median clustering, and provide our analysis in the non-private case solving (1). The idea is based on the metric embedding theory. We will start with an introduction to the main tool used in our approach.
92
+
93
+ # 3.1 Hierarchically Well-Separated Tree (HST)
94
+
95
+ In this paper, for an $L$ -level tree, we will count levels in a descending order down the tree. We use $h_v$ to denote the level of $v$ , and let $n_i$ be the number of nodes at level $i$ . The Hierarchically Well-Separated Tree (HST) is based on the padded decompositions of a general metric space in a hierarchical manner (Fakcharoenphol et al., 2004). Let $(U,\rho)$ be a metric space with $|U| = n$ , and we will refer to this metric space without specific clarification. A $\beta$ -padded decomposition of $U$
96
+
97
+ ![](images/1087cef9e52419bcedb4ea64d12df934c9ce7c59e2d433eee2a11367f0999773.jpg)
98
+ Figure 1: An example of a 3-level padded decomposition and the corresponding 2-HST. Left: The thickness of the ball represents the level. The colors correspond to different levels in the HST in the right panel. “ $\triangle$ ”s are the center nodes of partitions (balls), and “ $\times$ ”s are the non-center data points. Right: The 2-HST generated from the padded decomposition. Bold indices represent the centers.
99
+
100
+ ![](images/61d4e39ea494bc74d44a7e253933ea0e62f2b51e105f25697192e4557da68fd1.jpg)
101
+
102
+ is a probabilistic partition of $U$ such that the diameter of each cluster $U_{i} \in U$ is at most $\beta$ , i.e., $\rho(u, v) \leq \beta, \forall u, v \in U_{i}, i = 1, \dots, k$ . The formal definition of HST is given as below.
103
+
104
+ Definition 3.1. Assume $\min_{u,v\in U}\rho (u,v) = 1$ and denote the diameter $\triangle = \max_{u,v\in U}\rho (u,v)$ . An $\alpha$ -Hierarchically Well-Separated Tree ( $\alpha$ -HST) with depth $L$ is an edge-weighted rooted tree $T$ , such that an edge between any pair of two nodes of level $i - 1$ and level $i$ has length at most $\triangle/\alpha^{L - i}$ .
105
+
106
+ Our analysis will consider $\alpha = 2$ -HST for conciseness, since $\alpha$ only affects the constants in our theoretical analysis. Figure 1 is an example 2-HST (right panel) with $L = 3$ levels, along with its underlying padded decompositions (left panel). Using Algorithm 3, a 2-HST can be built as follows: we first find a padded decomposition $P_{L} = \{P_{L,1},\dots,P_{L,n_{L}}\}$ of $U$ with parameter $\beta = \triangle /2$ . The center of each partition in $P_{L,j}$ serves as a root node in level $L$ . Then, we re-do a padded decomposition for each partition $P_{L,j}$ , to find sub-partitions with diameter $\beta = \triangle /4$ , and set the corresponding centers as the nodes in level $L - 1$ , and so on. Each partition at level $i$ is obtained with $\beta = \triangle /2^{L - i}$ . This process proceeds until a node has a single point (leaf), or a pre-specified tree depth is reached. It is worth mentioning that, Blelloch et al. (2017) proposed an efficient HST construction in $O(m\log n)$ time, where $n$ and $m$ are the number of nodes and edges in a graph, respectively. Therefore, the construction of HST can be very efficient in practice.
107
+
108
+ The first step of our method is to embed the data points into an HST (see Algorithm 4). Next, we will describe our new strategy to search for the initial centers on the tree (w.r.t. the tree metric). Before moving on, it is worth mentioning that, there are polynomial time algorithms for computing an exact $k$ -median solution in the tree metric (Tamir (1996); Shah (2003)). However, the dynamic programming algorithms have high complexity (e.g., $O(kn^2)$ ), making them unsuitable for the purpose of fast initialization. Moreover, it is unknown how to apply them effectively to the private case. The three key merits of our new algorithm are: (1) It is more efficient than $k$ -median++ when $k$ is not too small, which is a very common scenario in practice; (2) It achieves $O(1)$ approximation error in the tree metric; (3) It can be easily extended to incorporating differential privacy (DP).
109
+
110
+ Algorithm 3: Build 2-HST(U,L)
111
+ Input: Data points $U$ with diameter $\triangle, L$
112
+ Randomly pick a point in $U$ as the root node of $T$
113
+ Let $r = \triangle / 2$
114
+ Apply a permutation $\pi$ on $U$ // so points will be chosen in a random sequence for each $v \in U$ do
115
+ Set $C_v = [v]$
116
+ for each $u \in U$ do
117
+ | Add $u \in U$ to $C_v$ if $d(v, u) \leq r$ and $u \notin \bigcup_{v' \neq v} C_{v'}$
118
+ Set the non-empty clusters $C_v$ as the children nodes of $T$
119
+ for each non-empty cluster $C_v$ do
120
+ | Run 2-HST $(C_v, L - 1)$ to extend the tree $T$ ; stop until $L$ levels or reaching a leaf node
121
+ Output: 2-HST $T$
122
+
123
+ # 3.2 HST Initialization Algorithm
124
+
125
+ Let $L = \log \Delta$ and suppose $T$ is a level- $L$ 2-HST in $(U, \rho)$ , where we assume $L$ is an integer. For a node $v$ at level $i$ , we use $T(v)$ to denote the subtree rooted at $v$ . Let $N_v = |T(v)|$ be the number of data points in $T(v)$ . The search strategy for the initial centers, NDP-HST initialization ("NDP" stands for "Non-Differentially Private"), is presented in Algorithm 4 with two phases.
126
+
127
+ Subtree search. The first step is to identify the subtrees that contain the $k$ centers. To begin with, $k$ initial centers $C_1$ are picked from $T$ who have the largest score $(v) = N(v)\cdot 2^{h_v}$ . This is intuitive, since to get a good clustering, we typically want the ball surrounding each center to include more data points. Next, we do a screening over $C_1$ : if there is any ancestor-descendant pair of nodes, we remove the ancestor from $C_1$ . If the current size of $C_1$ is smaller than $k$ , we repeat the process until $k$ centers are chosen (we do not re-select nodes in $C_1$ and their ancestors). This way, $C_1$ contains $k$ root nodes of $k$ disjoint subtrees.
128
+
129
+ Algorithm 4: NDP-HST initialization
130
+ Input: $U,\triangle ,k$
131
+ Initialization: $L = \log \triangle ,C_0 = \emptyset ,C_1 = \emptyset$
132
+ Call Algorithm 3 to build a level- $L2\mathrm{-HST}T$ using $U$
133
+ for each node $v$ in $T$ do $N_{v}\gets |U\cap T(v)|$ score $(v)\leftarrow N_v\cdot 2^{h_v}$
134
+ while $|C_1| < k$ do Add top $(k - |C_1|)$ nodes with highest score to $C_1$ for each $v\in C_1$ do $C_1 = C_1\backslash \{v\}$ if $\exists v^{\prime}\in C_{1}$ such that $v^{\prime}$ is a descendant of $v$ $C_0 =$ FIND-LEAF(T,C1)
135
+ Output: Initial center set $C_0\subseteq U$
136
+
137
+ Leaf search. After we find $C_1$ the set of $k$ subtrees, the next step is to find the center in each subtree using Algorithm 5 ("FIND-LEAF"). We employ a greedy search strategy, by finding the child node with largest score level by level, until a leaf is found. This approach is intuitive since the diameter of the partition ball exponentially decays with the level. Therefore, we are in a sense focusing more and more on the region with higher density (i.e., with more data points).
138
+
139
+ The complexity of our search algorithm is given as follows. All proofs are placed in Appendix B.
140
+
141
+ Proposition 3.2 (Complexity). Algorithm 4 takes $O(dn\log n)$ time in the Euclidean space.
142
+
143
+ Remark 3.3 (Comparison with $k$ -median++.) The complexity of $k$ -median++ is $O(dnk)$ in the Euclidean space (Arthur and Vassilvitskii, 2007). Our algorithm would be faster when $k > \log n$ , which is a common scenario. Similar comparison also holds for general metrics.
144
+
145
+ # 3.3 Approximation Error of HST Initialization
146
+
147
+ We provide the error analysis of our algorithm. Firstly, we show that the initial center set produced by NDP-HST is already a good approximation to the optimal $k$ -median solution. Let $\rho^T(x,y) = d_T(x,y)$ denote the "2-HST metric" between $x$ and $y$ in the 2-HST $T$ , where $d_T(x,y)$ is the tree distance between nodes $x$ and $y$ in $T$ . By Definition 3.1 and since $\triangle = 2^L$ , in the analysis we assume
148
+
149
+ Algorithm 5: FIND-LEAF $(T,C_1)$
150
+ Input: $T,C_1$
151
+ Initialization: $C_0 = \emptyset$
152
+ for each node $v$ in $C_1$ do while $v$ is not a leaf node do $\begin{array}{rl} & v\gets \arg_{w}\max \{N_{w},w\in ch(v)\} ,\mathrm{where~}ch(v)\mathrm{~denotes~the~children~nodes~of~}v\\ & \mathrm{Add~}v\mathrm{~to~}C_{0} \end{array}$
153
+ Output: Initial center set $C_0\subseteq U$
154
+
155
+ equivalently that the edge weight of the $i$ -th level is $2^{i - 1}$ . The crucial step of our analysis is to examine the approximation error in terms of the 2-HST metric, after which the error can be adapted to the general metrics by the following qwll-known result.
156
+
157
+ Lemma 3.4 (Bartal (1996)). In a metric space $(U,\rho)$ with $|U| = n$ and diameter $\triangle$ , it holds that $\forall x,y\in U$ , $E[\rho^T (x,y)] = O(\min \{\log n,\log \triangle \})\rho (x,y)$ . In the Euclidean space $\mathbb{R}^d$ , $E[\rho^T (x,y)] = O(d)\rho (x,y)$ , $\forall x,y\in U$ .
158
+
159
+ Recall $C_0, C_1$ from Algorithm 4. We define
160
+
161
+ $$
162
+ \operatorname {c o s t} _ {k} ^ {T} (U) = \sum_ {y \in U} \min _ {x \in C _ {0}} \rho^ {T} (x, y), \tag {3}
163
+ $$
164
+
165
+ $$
166
+ \operatorname {c o s t} _ {k} ^ {T ^ {\prime}} (U, C _ {1}) = \min _ {| F \cap T (v) | = 1, \atop \forall v \in C _ {1}} \sum_ {y \in U} \min _ {x \in F} \rho^ {T} (x, y), \tag {4}
167
+ $$
168
+
169
+ $$
170
+ O P T _ {k} ^ {T} (U) = \min _ {F \subset U, | F | = k} \sum_ {y \in U} \min _ {x \in F} \rho^ {T} (x, y) \equiv \min _ {C _ {1} ^ {\prime}} c o s t _ {k} ^ {T \prime} (U, C _ {1} ^ {\prime}). \tag {5}
171
+ $$
172
+
173
+ For simplicity, we will use $cost_k^{T'}(U)$ to denote $cost_k^{T'}(U, C_1)$ . Here, $OPT_k^T$ (5) is the cost of the global optimal solution with the 2-HST metric. The last equivalence in (5) holds because the optimal centers can always be located in $k$ disjoint subtrees, as each leaf only contains one point. (3) is the $k$ -median cost with 2-HST metric of the output $C_0$ of Algorithm 4. (4) is the optimal cost after the subtrees are chosen. That is, it represents the minimal cost to pick one center from each subtree in $C_1$ . We first bound the error of the subtree search step and the leaf search step, respectively.
174
+
175
+ Lemma 3.5 (Subtree search). $cost_{k}^{T^{\prime}}(U)\leq 5OPT_{k}^{T}(U)$
176
+
177
+ Lemma 3.6 (Leaf search). $\text{cost}_k^T(U) \leq 2\text{cost}_k^{T'}(U)$ .
178
+
179
+ Combining Lemma 3.5 and Lemma 3.6, we obtain:
180
+
181
+ Theorem 3.7 (2-HST error). Running Algorithm 4, we have $\text{cost}_k^T(U) \leq 10OPT_k^T(U)$ .
182
+
183
+ Thus, HST-initialization produces an $O(1)$ approximation to the optimal cost in the 2-HST metric. Define $cost_{k}(U)$ as (1) for our HST centers, and the optimal cost w.r.t. $\rho$ as
184
+
185
+ $$
186
+ O P T _ {k} (U) = \min _ {| F | = k} \sum_ {y \in U} \min _ {x \in F} \rho (x, y). \tag {6}
187
+ $$
188
+
189
+ We have the following result based on Lemma 3.4.
190
+
191
+ Theorem 3.8. In the general metric space, the expected $k$ -median cost of NDP-HST (Algorithm 4) is $E[\mathrm{cost}_k(U)] = O(\min\{\log n, \log \Delta\}) OPT_k(U)$ .
192
+
193
+ Remark 3.9. In the Euclidean space, Makarychev et al. (2019) showed that using $O(\log k)$ random projections suffices for $k$ -median to achieve $O(1)$ error. Thus, if $\log \triangle = O(\log d)$ , by Lemma 3.4, HST initialization is able to achieve $O(\log (\min \{d,k\}))$ error, which is better than $O(\log k)$ of $k$ -median $++$ (Arthur and Vassilvitskii, 2007) when $d$ is small.
194
+
195
+ NDP-HST Local Search. We are interested in the approximation quality of standard local search (Algorithm 1), when the initial centers are produced by our NDP-HST.
196
+
197
+ Theorem 3.10. When initialized by NDP-HST, local search achieves $O(1)$ approximation error in expected $O(k\log \log \min \{n,\triangle \})$ number of iterations for input in general metric space.
198
+
199
+ We remark that the initial centers found by NDP-HST can be used for $k$ -means clustering analogously. For general metrics, $E[\text{cost}_{km}(U)] = O(\min\{\log n, \log \Delta\})^2 OPT_{km}(U)$ where $\text{cost}_{km}(U)$ is the optimal $k$ -means cost. See Appendix C for more detaileds.
200
+
201
+ # 4 HST Initialization with Differential Privacy
202
+
203
+ In this section, we consider initialization and clustering with differential privacy (DP). Recall (2) that in this problem, $U$ is the universe of data points, and $D \subset U$ is a demand set that needs to be clustered with privacy. Since $U$ is public, simply running initialization algorithms on $U$ would
204
+
205
+ Algorithm 6: DP-HST initialization
206
+ Input: $U, D, \Delta, k, \epsilon$
207
+ Build a level- $L$ 2-HST $T$ based on input $U$
208
+ for each node $v$ in $T$ do
209
+ $\mid N_v \leftarrow |D \cap T(v)|$ , $\hat{N}_v \leftarrow N_v + \text{Lap}(2^{(L - h_v)} / \epsilon)$ , $\text{score}(v) \leftarrow \hat{N}(v) \cdot 2^{h_v}$
210
+ Based on $\hat{N}_v$ , apply the same strategy as Algorithm 4: find $C_1$ ; $C_0 = \text{FIND-LEAF}(C_1)$
211
+ Output: Private initial center set $C_0 \subseteq U$
212
+
213
+ Algorithm 7: DP-HST local search
214
+ Input: $U$ demand points $D\subseteq U$ , parameter $k,\epsilon ,T$
215
+ Initialization: $F_{1}$ the private initial centers generated by Algorithm 6 with privacy $\epsilon /2$
216
+ Set parameter $\epsilon^{\prime} = \frac{\epsilon}{4\Delta(T + 1)}$
217
+ for $i = 1$ to $T$ do Select $(x,y)\in F_i\times (V\setminus F_i)$ with prob. proportional to $\exp (-\epsilon^{\prime}\times (cost(F_i - \{x\} +\{y\}))$ Let $F_{i + 1}\gets F_i - \{x\} +\{y\}$
218
+ Select $j$ from $\{1,2,\dots,T + 1\}$ with probability proportional to $\exp (-\epsilon^{\prime}\times cost(F_j))$
219
+ Output: $F = F_{j}$ the private center set
220
+
221
+ preserve the privacy of $D$ . However, 1) this might be too expensive; 2) in many cases one would probably want to incorporate some information about $D$ in the initialization, since $D$ could be a very imbalanced subset of $U$ . For example, $D$ may only contain data points from one cluster, out of tens of clusters in $U$ . In this case, initialization on $U$ is likely to pick initial centers in multiple clusters, which would not be helpful for clustering on $D$ .
222
+
223
+ Next, we show how our proposed HST initialization can be easily combined with differential privacy and at the same time contains useful information about the demand set $D$ , leading to improved approximation error (Theorem 4.3). Again, suppose $T$ is an $L = \log \triangle$ -level 2-HST of universe $U$ in a general metric space. Denote $N_v = |T(v) \cap D|$ for a node point $v$ . Our private HST initialization (DP-HST) is similar to the non-private Algorithm 4. To gain privacy, we perturb $N_v$ by adding i.i.d. Laplace noise: $\hat{N}_v = N_v + \text{Lap}(2^{(L - h_v)} / \epsilon)$ , where $\text{Lap}(2^{(L - h_v)} / \epsilon)$ is a Laplace random number with rate $2^{(L - h_v)} / \epsilon$ . We will use the perturbed $\hat{N}_v$ for node sampling instead of the true value $N_v$ , as described in Algorithm 6. The DP guarantee of this initialization scheme is straightforward by the composition theory (Dwork, 2006).
224
+
225
+ Theorem 4.1. Algorithm 6 is $\epsilon$ -differentially private.
226
+
227
+ Proof. For each level $i$ , the subtrees $T(v,i)$ are disjoint to each other. The privacy budget used in $i$ -th level is $\epsilon / 2^{(L - i)}$ , so by composition the total privacy budget is $\sum_{i}\epsilon / 2^{(L - i)} < \epsilon$ .
228
+
229
+ Theorem 4.2. Algorithm 6 finds initial centers such that
230
+
231
+ $$
232
+ E \left[ \operatorname {c o s t} _ {k} (D) \right] = O (\log n) \left(O P T _ {k} (D) + k \epsilon^ {- 1} \triangle \log n\right).
233
+ $$
234
+
235
+ DP-HST Local Search. Similarly, we can use private HST initialization to improve the performance of private $k$ -median local search, which is presented in Algorithm 7. After DP-HST initialization, the DP local search procedure follows Gupta et al. (2010) using the exponential mechanism.
236
+
237
+ Theorem 4.3. Algorithm 7 achieves $\epsilon$ -differential privacy. The output centers achieve $\mathrm{cost}_k(D) \leq 6OPT_k(D) + O(\epsilon^{-1}k^2\triangle (\log \log n)\log n)$ in $O(k\log \log n)$ iterations with probability $(1 - \frac{1}{poly(n)})$ .
238
+
239
+ In prior literature, the DP local search with random initialization (Gupta et al., 2010) has 6 multiplicative error and $O(\epsilon^{-1} \triangle k^2 \log^2 n)$ additive error. Our result improves the log $n$ term to $\log \log n$ in the additive error. Meanwhile, the number of iterations needed is improved from $T = O(k \log n)$ to $O(k \log \log n)$ (see Appendix A for an empirical justification). Notably, it has been shown in Gupta et al. (2010) that for $k$ -median problem, the lower bounds on the multiplicative and additive error of any $\epsilon$ -DP algorithm are $O(1)$ and $O(\epsilon^{-1} \triangle k \log (n / k))$ , respectively. Our result matches the lower bound on the multiplicative error, and the additive error is only worse than the bound by a factor of $O(k \log \log n)$ . To our knowledge, Theorem 4.3 is the first result in the literature to improve the error of DP local search in general metric space.
240
+
241
+ # 5 Numerical Results
242
+
243
+ # 5.1 Datasets and Algorithms
244
+
245
+ Discrete Euclidean space. Following previous work, we test $k$ -median clustering on the MNIST hand-written digit dataset (LeCun et al., 1998) with 10 natural clusters (digit 0 to 9). We set $U$ as 10000 randomly chosen data points. We choose the demand set $D$ using two strategies: 1) "balance", where we randomly choose 500 samples from $U$ ; 2) "imbalance", where $D$ contains 500 random samples from $U$ only from digit "0" and "8" (two clusters). We note that, the imbalanced $D$ is a very practical setting in real-world scenarios, where data are typically not uniformly distributed. On this dataset, we test clustering with both $l_{1}$ and $l_{2}$ distance as the underlying metric.
246
+
247
+ Metric space induced by graph. Random graphs have been widely considered in testing $k$ -median methods (Balcan et al., 2013; Todo et al., 2019). Our construction of graphs follows a similar approach as the synthetic pmedinfo graphs provided by the popular OR-Library (Beasley, 1990). The metric $\rho$ for this experiment is the (weighted) shortest path distance. To generate a size- $n$ graph, we first randomly split the nodes into 10 clusters. Within each cluster, each pair of nodes is connected with probability 0.2, and with weight drawn from uniform [0, 1]. For every pair of clusters, we randomly connect some nodes from each cluster, with weights following uniform $[0.5, r]$ . A larger $r$ makes the graph more separable, i.e., clusters are farther from each other (see Appendix A for example graphs). For this task, $U$ has 3000 nodes, and the private set $D$ (500 nodes) is chosen using the similar "balanced" and "imbalanced" approaches as described above. In the imbalanced case, we choose the demand set $D$ randomly from only two clusters.
248
+
249
+ Algorithms. We compare the following clustering algorithms in both non-DP and DP setting: (1) NDP-rand: Local search with random initialization; (2) NDP-kmedian++. Local search with $k$ -median++. initialization (Algorithm 2); (3) NDP-HST: Local search with NDP-HST initialization (Algorithm 4), as described in Section 3; (4) DP-rand: Standard DP local search algorithm (Gupta et al., 2010), which is Algorithm 7 with initial centers randomly chosen from $U$ ; (5) DP-kmedian++. DP local search with $k$ -median++. initialization run on $U$ ; (6) DP-HST: DP local search with HST-initialization (Algorithm 7). For non-DP tasks, we set $L = 6$ . For DP clustering, we use $L = 8$ .
250
+
251
+ For non-DP methods, we set $\alpha = 10^{-3}$ in Algorithm 1 and the maximum number of iterations as 20. To examine the quality of initialization as well as the final centers, We report both the cost at initialization and the cost of the final output. For DP methods, we run the algorithms for $T = 20$ steps and report the results with $\epsilon = 1$ (comparisons/results with other $T$ and $\epsilon$ are similar). We test $k\in \{2,5,10,15,20\}$ . The average cost over $T$ iterations is reported for robustness. All the results are averaged over 10 independent repetitions.
252
+
253
+ # 5.2 Results
254
+
255
+ The results on MNIST and graph data are given in Figure 2. Here we present the $l_{2}$ -clustering on MNIST, and the simulated graph with $r = 1$ (clusters are less separable). The comparisons are similar for both $l_{1}$ metric on MNIST and $r = 100$ graph (see Figure 4 in Appendix A):
256
+
257
+ - From the left column, the initial centers found by HST has lower cost than $k$ -median++ and random initialization, for both non-DP and DP setting, and for both balanced and imbalanced demand set $D$ . This confirms that the proposed HST initialization is more powerful than $k$ -median++ in finding good initial centers.
258
+ - From the right column, we also observe lower final cost of HST followed by local search in DP clustering. In the non-DP case, the final cost curves overlap, which means that despite HST offers better initial centers, local search can always find a good solution eventually.
259
+ - The advantage of DP-HST, in terms of both the initial and the final cost, is more significant when $D$ is an imbalanced subset of $U$ . As mentioned before, this is because our DP-HST initialization approach also privately incorporates the information of $D$ .
260
+
261
+ To sum up, the proposed HST initialization scheme could perform better with various metrics and data patterns, in both non-private and private setting—in all cases, HST finds better initial centers with smaller cost than $k$ -median++. HST considerably outperforms $k$ -median++. in the private and imbalanced $D$ setting, for MNIST with both $l_{2}$ and $l_{1}$ metric, and for graph with both $r = 100$ (highly separable) and $r = 1$ (less separable).
262
+
263
+ ![](images/c8e70e52d046c72baaa21c754b4855306587b73060e017cccec665d77ab66db6.jpg)
264
+
265
+ ![](images/7aba38444eab5842cdc5493ec28bd9c6cb4f1333539f6fa7ecedc95057d91349.jpg)
266
+
267
+ ![](images/cddd3b2e607c053e22e251f0c8c1ea42c7b6fc60fc05d94fd0891b1b9c1a6747.jpg)
268
+
269
+ ![](images/0a2643d3c7534b0dd44c309d736b91a48bd3025b28739d85ff79109bb44d5854.jpg)
270
+
271
+ ![](images/38f8b987513b19659ef1ab199375a0ecd2805812fda5ce603b2469f95c4391d0.jpg)
272
+
273
+ ![](images/e308379a9a12791b16f6baa74832934e9090761f8f65d73a4e7b503a4db09a8d.jpg)
274
+
275
+ ![](images/087d420688e957b42ffb70e5bd95b08d77bb33c1b9e6a732d8b239ee44379568.jpg)
276
+ Figure 2: $k$ -median cost on the MNIST ( $l_2$ -metric) and graph dataset ( $r = 1$ ). 1st column: initial cost. 2nd column: final output cost.
277
+
278
+ ![](images/24442efe850f5dd8175768926fcd27890f4bbd58567d0fbfde7c8a889afe152b.jpg)
279
+
280
+ # 6 Conclusion
281
+
282
+ We develop a new initialization framework for the $k$ -median problem in the general metric space. Our approach, called HST initialization, is built upon the HST structure from metric embedding theory. We propose a novel and efficient tree search approach which provably improves the approximation error of the $k$ -median++ method, and has lower complexity (higher efficiency) than $k$ -median++ when $k$ is not too small, which is a common practice. Moreover, we propose differentially private (DP) HST initialization algorithm, which adapts to the private demand point set, leading to better clustering performance. When combined with subsequent DP local search heuristic, our algorithm is able to improve the additive error of DP local search, which is close to the theoretical lower bound within a small factor. Experiments with Euclidean metrics and graph metrics verify the effectiveness of our methods, which improve the cost of both the initial centers and the final $k$ -median output.
283
+
284
+ # References
285
+
286
+ Martín Abadi, Andy Chu, Ian J. Goodfellow, H. Brendan McMahan, Ilya Mironov, Kunal Talwar, and Li Zhang. Deep learning with differential privacy. In Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security (CCS), pages 308-318, Vienna, Austria, 2016.
287
+ Ameer Ahmed Abbasi and Mohamed F. Younis. A survey on clustering algorithms for wireless sensor networks. Comput. Commun., 30(14-15):2826-2841, 2007.
288
+ Sara Ahmadian, Ashkan Norouzi-Fard, Ola Svensson, and Justin Ward. Better guarantees for k-means and euclidean k-median by primal-dual algorithms. In Proceedings of the 58th IEEE Annual Symposium on Foundations of Computer Science (FOCS), pages 61-72, Berkeley, CA, 2017.
289
+ David Arthur and Sergei Vassilitskii. k-means++: the advantages of careful seeding. In Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), pages 1027-1035, New Orleans, LA, 2007.
290
+ Vijay Arya, Naveen Garg, Rohit Khandekar, Adam Meyerson, Kamesh Munagala, and Vinayaka Pandit. Local search heuristics for k-median and facility location problems. SIAM J. Comput., 33 (3):544-562, 2004.
291
+ Olivier Bachem, Mario Lucic, S. Hamed Hassani, and Andreas Krause. Approximate k-means++ in sublinear time. In Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence (AAAI), pages 1459-1467, Phoenix, AZ, 2016.
292
+ Bahman Bahmani, Benjamin Moseley, Andrea Vattani, Ravi Kumar, and Sergei Vassilvitskii. Scalable k-means++. Proc. VLDB Endow., 5(7):622-633, 2012.
293
+ Maria-Florina Balcan, Steven Ehrlich, and Yingyu Liang. Distributed k-means and k-median clustering on general communication topologies. In Advances in Neural Information Processing Systems (NIPS), pages 1995-2003, Lake Tahoe, NV, 2013.
294
+ Maria-Florina Balcan, Travis Dick, Yingyu Liang, Wenlong Mou, and Hongyang Zhang. Differentially private clustering in high-dimensional euclidean spaces. In Proceedings of the 34th International Conference on Machine Learning (ICML), pages 322-331, Sydney, Australia, 2017.
295
+ Arindam Banerjee, Srujana Merugu, Inderjit S. Dhillon, and Joydeep Ghosh. Clustering with bregman divergences. J. Mach. Learn. Res., 6:1705-1749, 2005.
296
+ Yair Bartal. Probabilistic approximations of metric spaces and its algorithmic applications. In Proceedings of the 37th Annual Symposium on Foundations of Computer Science (FOCS), pages 184-193, Burlington, VT, 1996.
297
+ John E Beasley. OR-Library: distributing test problems by electronic mail. Journal of the Operational Research Society, 41(11):1069-1072, 1990.
298
+ Pavel Berkhin. A survey of clustering data mining techniques. In Grouping Multidimensional Data, pages 25-71. Springer, 2006.
299
+ Guy E. Blelloch, Yan Gu, and Yihan Sun. Efficient construction of probabilistic tree embeddings. In Proceedings of the 44th International Colloquium on Automata, Languages, and Programming (ICALP), pages 26:1-26:14, Warsaw, Poland, 2017.
300
+ Jaroslaw Byrka, Thomas W. Pensyl, Bartosz Rybicki, Aravind Srinivasan, and Khoa Trinh. An improved approximation for $k$ -median, and positive correlation in budgeted optimization. In Proceedings of the Twenty-Sixth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), pages 737-756, San Diego, CA, 2015.
301
+ Kamalika Chaudhuri and Claire Monteleoni. Privacy-preserving logistic regression. In Advances in Neural Information Processing Systems (NIPS), pages 289-296, Vancouver, Canada, 2008.
302
+ Kamalika Chaudhuri, Claire Monteleoni, and Anand D. Sarwate. Differentially private empirical risk minimization. J. Mach. Learn. Res., 12:1069-1109, 2011.
303
+
304
+ Davin Choo, Christoph Grunau, Julian Portmann, and Václav Rozhon. k-means++: few more steps yield constant approximation. In International Conference on Machine Learning, pages 1909-1917, 2020.
305
+ Vincent Cohen-Addad, Silvio Lattanzi, Ashkan Norouzi-Fard, Christian Sohler, and Ola Svensson. Parallel and efficient hierarchical k-median clustering. In Advances in Neural Information Processing Systems (NeurIPS), pages 20333–20345, virtual, 2021.
306
+ Inderjit S. Dhillon and Dharmendra S. Modha. Concept decompositions for large sparse text data using clustering. Mach. Learn., 42(1/2):143-175, 2001.
307
+ Jinshuo Dong, Aaron Roth, and Weijie J Su. Gaussian differential privacy. Journal of the Royal Statistical Society Series B: Statistical Methodology, 84(1):3-37, 2022.
308
+ Cynthia Dwork. Differential privacy. In Proceedings of the 33rd International Colloquium on Automata, Languages and Programming (ICALP), Part II, pages 1-12, Venice, Italy, 2006.
309
+ Jittat Fakcharoenphol, Satish Rao, and Kunal Talwar. A tight bound on approximating arbitrary metrics by tree metrics. J. Comput. Syst. Sci., 69(3):485-497, 2004.
310
+ Chenglin Fan and Ping Li. Distances release with differential privacy in tree and grid graph. In IEEE International Symposium on Information Theory (ISIT), pages 2190-2195, 2022.
311
+ Chenglin Fan, Ping Li, and Xiaoyun Li. Private graph all-pairwise-shortest-path distance release with improved error rate. In Advances in Neural Information Processing Systems (NeurIPS), New Orleans, LA, 2022.
312
+ Chenglin Fan, Ping Li, and Xiaoyun Li. LSDS++: Dual sampling for accelerated k-means++. In Proceedings of the International Conference on Machine Learning (ICML), pages 9640-9649, Honolulu, HI, 2023.
313
+ Huang Fang, Xiaoyun Li, Chenglin Fan, and Ping Li. Improved convergence of differential private sgd with gradient clipping. In Proceedings of the Eleventh International Conference on Learning Representations (ICLR), Kigali, Rwanda, 2023.
314
+ Dan Feldman, Amos Fiat, Haim Kaplan, and Kobbi Nissim. Private coresets. In Proceedings of the 41st Annual ACM Symposium on Theory of Computing (STOC), pages 361-370, Bethesda, MD, 2009.
315
+ Dan Feldman, Chongyuan Xiang, Ruihao Zhu, and Daniela Rus. Coresets for differentially private k-means clustering and applications to privacy in mobile sensor networks. In Proceedings of the 16th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN), pages 3-15, Pittsburgh, PA, 2017.
316
+ Jason Ge, Zhaoran Wang, Mengdi Wang, and Han Liu. Minimax-optimal privacy-preserving sparse PCA in distributed systems. In Proceedings of the International Conference on Artificial Intelligence and Statistics (AISTATS), pages 1589-1598, Playa Blanca, Lanzarote, Canary Islands, Spain, 2018.
317
+ Badih Ghazi, Ravi Kumar, and Pasin Manurangsi. Differentially private clustering: Tight approximation ratios. In Advances in Neural Information Processing Systems (NeurIPS), virtual, 2020.
318
+ Christoph Grunau, Ahmet Alper Özüdogru, Václav Rozhón, and Jakub Tětek. A nearly tight analysis of greedy k-means++. In Proceedings of the 2023 Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), pages 1012-1070, Florence, Italy, 2023.
319
+ Anupam Gupta, Katrina Ligett, Frank McSherry, Aaron Roth, and Kunal Talwar. Differentially private combinatorial optimization. In Proceedings of the Twenty-First Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), pages 1106-1125, Austin, TX, 2010.
320
+ Zhiyi Huang and Jinyan Liu. Optimal differentially private algorithms for k-means clustering. In Proceedings of the 37th ACM SIGMOD-SIGACT-SIGAI Symposium on Principles of Database Systems (PODS), pages 395-408, Houston, TX, 2018.
321
+
322
+ Matthew Jones, Huy L. Nguyen, and Thy D. Nguyen. Differentially private clustering via maximum coverage. In Proceedings of the Thirty-Fifth AAAI Conference on Artificial Intelligence (AAAI), pages 11555-11563, Virtual Event, 2021.
323
+ Tapas Kanungo, David M. Mount, Nathan S. Netanyahu, Christine D. Piatko, Ruth Silverman, and Angela Y. Wu. A local search approximation algorithm for k-means clustering. In Proceedings of the 18th Annual Symposium on Computational Geometry (CG), pages 10-18, Barcelona, Spain, 2002.
324
+ Leon Kaufman, Marc Vanden Eede, and Pierre Hansen. A plant and warehouse location problem. Journal of the Operational Research Society, 28(3):547-554, 1977.
325
+ Silvio Lattanzi and Christian Sohler. A better k-means++ algorithm via local search. In Proceedings of the 36th International Conference on Machine Learning (ICML), pages 3662-3671, Long Beach, CA, 2019.
326
+ Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner. Gradient-based learning applied to document recognition. Proc. IEEE, 86(11):2278-2324, 1998.
327
+ Ping Li and Xiaoyun Li. Differential privacy with random projections and sign random projections. In Advances in Neural Information Processing Systems (NeurIPS), New Orleans, LA, 2023a.
328
+ Xiaoyun Li and Ping Li. Differentially private one permutation hashing and bin-wise consistent weighted sampling. arXiv preprint arXiv:2306.07674, 2023b.
329
+ Stuart P. Lloyd. Least squares quantization in PCM. IEEE Trans. Inf. Theory, 28(2):129-136, 1982.
330
+ Konstantin Makarychev, Yury Makarychev, and Ilya P. Razenshteyn. Performance of johnson-lindenstrauss transform for $k$ -means and $k$ -medians clustering. In Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing (STOC), pages 1027-1038, Phoenix, AZ, 2019.
331
+ Frank McSherry and Kunal Talwar. Mechanism design via differential privacy. In Proceedings of the 48th Annual IEEE Symposium on Foundations of Computer Science (FOCS), pages 94-103, Providence, RI, 2007.
332
+ Richard Nock, Raphaël Canyasse, Roksana Boreli, and Frank Nielsen. k-variates++: more pluses in the k-means++. In Proceedings of the 33nd International Conference on Machine Learning (ICML), pages 145-154, New York City, NY, 2016.
333
+ Girish Punj and David W Stewart. Cluster analysis in marketing research: Review and suggestions for application. Journal of Marketing Research, 20(2):134-148, 1983.
334
+ Maurizio G. C. Resende and Renato Fonseca F. Werneck. A fast swap-based local search procedure for location problems. Ann. Oper. Res., 150(1):205-230, 2007.
335
+ Rahul Shah. Faster algorithms for k-median problem on trees with smaller heights. Technical Report, 2003.
336
+ Uri Stemmer and Haim Kaplan. Differentially private k-means with constant multiplicative error. In Advances in Neural Information Processing Systems (NeurIPS), pages 5436-5446, Montréal, Canada, 2018.
337
+ Arie Tamir. An $o(pn^2)$ algorithm for the p-median and related problems on tree graphs. Oper. Res. Lett., 19(2):59-64, 1996.
338
+ Keisuke Todo, Atsuyoshi Nakamura, and Mineichi Kudo. A fast approximate algorithm for k-median problem on a graph. In Proceedings of the 15th International Workshop on Mining and Learning with Graphs (MLG), Anchorage, AK, 2019.
339
+ Kang Wei, Jun Li, Ming Ding, Chuan Ma, Howard H. Yang, Farhad Farokhi, Shi Jin, Tony Q. S. Quek, and H. Vincent Poor. Federated learning with differential privacy: Algorithms and performance analysis. IEEE Trans. Inf. Forensics Secur., 15:3454-3469, 2020.
340
+
341
+ # $k$ -Median Clustering via Metric Embedding: Towards Better Initialization with Differential Privacy (Supplementary Material)
342
+
343
+ # A More Details on Experiments
344
+
345
+ # A.1 Examples of Graph Data
346
+
347
+ In Figure 3, we plot two example graphs (subgraphs of 50 nodes) with $r = 100$ and $r = 1$ . When $r = 100$ , the graph is highly separable (i.e., clusters are far from each other). When $r = 1$ , the clusters are harder to be distinguished from each other.
348
+
349
+ ![](images/7801249386627e1fad5a417576a201a836d54c27083dea07e65f41f4dffc099a.jpg)
350
+
351
+ ![](images/6b767a6ddd348e41d89a0094a3df7ffe6430fd9f21850841fac67bf0ef8d2d50.jpg)
352
+ Figure 3: Example of synthetic graphs: subgraph of 50 nodes. Upper $r = 1$ . Bottom: $r = 100$ . Darker and thicker edged have smaller distance. When $r = 100$ , the graph is more separable.
353
+
354
+ ![](images/6f0be3444bd405d26f53638f43120cad3eadae21e38febfa1355f9e0163a21ce.jpg)
355
+
356
+ ![](images/97428805cd115f3648cbb93308b38bc82bb6e710ea7b849836d2d8091a22419c.jpg)
357
+
358
+ ![](images/9b98966b244237b67fe5917f0afcc49d547c44526e2c4fcf403f1ac441338e53.jpg)
359
+
360
+ ![](images/7196fff7eed19099060f35682f35d5fb601c4cd40ef692dc7b5bb3e179de9334.jpg)
361
+
362
+ ![](images/86998a040df659ff02376cc79d762c6cb37f8b591668bf53c4dcf88da3e7bdd8.jpg)
363
+
364
+ ![](images/8216df4b93b7e68c6618adbd70486970ceb74f163577e87604ba3f7ef1bde78f.jpg)
365
+
366
+ ![](images/eaf7ee4b9e3aec5423dc3b487d3810d1a7dfa28ec740baa90bc7bd3f8a86fadc.jpg)
367
+ Figure 4: $k$ -median cost on the MNIST ( $l_1$ -metric) and graph dataset ( $r = 100$ ) 1st column: initial cost. 2nd column: final output cost.
368
+
369
+ ![](images/eaa6adf9674ffd4998506431e5281dcb3f083bc6e3b4f532a7d643baf574c432.jpg)
370
+
371
+ # A.2 More Experiments
372
+
373
+ # A.3 Improved Iteration Cost of DP-HST
374
+
375
+ In Theorem 4.3, we show that under differential privacy constraints, the proposed DP-HST (Algorithm 7) improves both the approximation error and the number of iterations required to find a good solution of classical DP local search (Gupta et al., 2010). In this section, we provide some numerical results to justify the theory.
376
+
377
+ First, we need to properly measure the iteration cost of DP local search. This is because, unlike the non-private clustering, the $k$ -median cost after each iteration in DP local search is not decreasing monotonically, due to the probabilistic exponential mechanism. To this end, for the cost sequence with length $T = 20$ , we compute its moving average sequence with window size 5. Attaining the
378
+
379
+ minimal value of the moving average indicates that the algorithm has found a "local optimum", i.e., it has reached a "neighborhood" of solutions with small clustering cost. Thus, we use the number of iterations to reach such local optimum as the measure of iteration cost. The results are provided in Figure 5. We see that on all the tasks (MNIST with $l_{1}$ and $l_{2}$ distance, and graph dataset with $r = 1$ and $r = 100$ ), DP-HST has significantly smaller iterations cost. In Figure 6, we further report the $k$ -median cost of the best solution in $T$ iterations found by each DP algorithm. We see that DP-HST again provide the smallest cost. This additional set of experiments again validates the claims of Theorem 4.3, that DP-HST is able to find better initial centers in fewer iterations.
380
+
381
+ ![](images/71f19bc752e0f7e36459d91a330735a4ecbefb55e737c7fe50eb40b797faeeed.jpg)
382
+
383
+ ![](images/c7a186da5478b1a2e72e88883138c116a51f8092bfac2449dd4b245844ebb10e.jpg)
384
+
385
+ ![](images/b8ce09e8bc8c9dbf78dac473c138102a198cc1494d7c87214e87520dcb939133.jpg)
386
+ Figure 5: Iteration cost to reach a locally optimal solution, on MNIST and graph datasets with different $k$ . The demand set is an imbalanced subset of the universe.
387
+
388
+ ![](images/beddfcc9318965f60234c94ecacef38bf12e2c572f294ec1e8f11deea7ddd7ae.jpg)
389
+
390
+ ![](images/440b79e0a9ad9bb882f61c5dda7e1b1105c17c759a0ed888320cf6f2e1841914.jpg)
391
+ Figure 6: The $k$ -median cost of the best solution found by each differentially private algorithm. The demand set is an imbalanced subset of the universe. Same comparison holds on graph data.
392
+
393
+ ![](images/4116b449c10ee4b8430438ab6ddd1a373c067dc7f8c56024633f48d4940ada99.jpg)
394
+
395
+ # B Technical Proofs
396
+
397
+ The following composition result of differential privacy will be used in our proof.
398
+
399
+ Theorem B.1 (Composition Theorem (Dwork, 2006)). If Algorithms $\mathbb{A}_1,\mathbb{A}_2,\dots,\mathbb{A}_m$ are $\epsilon_1,\epsilon_2,\ldots ,\epsilon_m$ differentially private respectively, then the union $(\mathbb{A}_1(D),\mathbb{A}_2(D),\ldots ,\mathbb{A}_m(D))$ is $\sum_{i = 1}^{m}\epsilon_{i}$ -DP.
400
+
401
+ # B.1 Proof of Lemma 3.5
402
+
403
+ Proof. Consider the intermediate output of Algorithm 4, $C_1 = \{v_1, v_2, \dots, v_k\}$ , which is the set of roots of the minimal subtrees each containing exactly one output center $C_0$ . Suppose one of the optimal "root set" that minimizes (4) is $C_1^* = \{v_1', v_2', \dots, v_k'\}$ . If $C_1 = C_1^*$ , the proof is done. Thus, we prove the case for $C_1 \neq C_1^*$ . Note that $T(v), v \in C_1$ are disjoint subtrees. We have the following reasoning.
404
+
405
+ - Case 1: for some $i, j'$ , $v_i$ is a descendant node of $v_j'$ . Since the optimal center point $f^*$ is a leaf node by the definition of (4), we know that there must exist one child node of $v_j'$ that expands a subtree which contains $f^*$ . Therefore, we can always replace $v_j'$ by one of its child nodes. Hence, we can assume that $v_i$ is not a descendant of $v_j'$ .
406
+
407
+ Note that, we have $score(v_j') \leq score(v_i)$ if $v_j' \notin C_1^* \cap C_1$ . Algorithm 4 sorts all the nodes based on cost value, and it would have more priority to pick $v_j'$ than $v_i$ if $score(v_j') > score(v_i)$ and $v_i$ is not a child node of $v_j'$ .
408
+
409
+ - Case 2: for some $i, j'$ , $v_j'$ is a descendant of $v_i$ . In this case, optimal center point $f^*$ , which is a leaf of $T(v_i)$ , must also be a leaf node of $T(v_j')$ . We can simply replace $C_1$ with the swap $C_1 \setminus \{v_i\} + \{v_j'\}$ which does not change $\text{cost}_k^{T'}(U)$ . Hence, we can assume that $v_j'$ is not a descendant of $v_i$ .
410
+ - Case 3: Otherwise. By the construction of $C_1$ , we know that $score(v_j') \leq \min\{score(v_i), i = 1, \dots, k\}$ when $v_j' \in C_1^* \setminus C_1$ . Consider the swap between $C_1$ and $C_1^*$ . By the definition of tree distance, we have $OPT_k^T(U) \geq \sum_{v_i \in C_1 \setminus C_1^*} N_{v_i} 2^{h_{v_i}}$ , since $\{T(v_i), v_i \in C_1 \setminus C_1^*\}$ does not contain any center of the optimal solution determined by $C_1^*$ (which is also the optimal "root set" for $OPT_k^T(U)$ ).
411
+
412
+ Thus, we only need to consider Case 3. Let us consider the optimal clustering with center set be $C^* = \{c_1^*, c_2^*, \dots, c_k^*\}$ (each center $c_j^*$ is a leaf of subtree whose root be $c_j'$ ), and $S_j'$ be the leaves assigned to $c_j^*$ . Let $S_j$ denote the set of leaves in $S_j'$ whose distance to $c_j^*$ is strictly smaller than its distance to any centers in $C_1$ . Let $P_j$ denote the union of paths between leaves of $S_j$ to its closest center in $C_1$ . Let $v_j''$ be the nodes in $P_j$ with highest level satisfying $T(v_j'') \cap C_1 = \emptyset$ . The score of $v_j''$ is $2^{h_{v_j''}} N(v_j'')$ . That means the swap with a center $v_j'$ into $C_1$ can only reduce $4 \cdot 2^{h_{v_j''}} N(v_j'')$ to $cost_k^{T'}(U)$ (the tree distance between any leaf in $S_j$ and its closest center in $C_1$ is at most $4 \cdot 2^{h_{v_j''}}$ ). We just use $v_j'$ to represent $v_j''$ for later part of this proof for simplicity. By our reasoning, summing all the swaps over $C_1^* \setminus C_1$ gives
413
+
414
+ $$
415
+ \operatorname{cost}_{k}^{T^{\prime}}(U) - \operatorname{OPT}_{k}^{T}(U)\leq 4\sum_{v^{\prime}_{j}\in C_{1}^{*}\setminus C_{1}}N_{v^{\prime}_{j}}2^{h_{v^{\prime}_{j}}},
416
+ $$
417
+
418
+ $$
419
+ O P T _ {k} ^ {T} (U) \geq \sum_ {v _ {i} \in C _ {1} \backslash C _ {1} ^ {*}} N _ {v _ {i}} 2 ^ {h _ {v _ {i}}}.
420
+ $$
421
+
422
+ Also, based on our discussion on Case 1, it holds that
423
+
424
+ $$
425
+ N _ {v _ {j} ^ {\prime}} 2 ^ {h _ {v _ {j} ^ {\prime}}} - N _ {v _ {i}} 2 ^ {h _ {v _ {i}}} \leq 0.
426
+ $$
427
+
428
+ Summing them together, we have $\text{cost}_k^{T'}(U) \leq 5OPT_k^T(U)$ .
429
+
430
+ # B.2 Proof of Lemma 3.6
431
+
432
+ Proof. Since the subtrees in $C_1$ are disjoint, it suffices to consider one subtree with root $v$ . With a little abuse of notation, let $cost_1^{T'}(v, U)$ denote the optimal $k$ -median cost within the point set $T(v)$ with one center in 2-HST:
433
+
434
+ $$
435
+ \operatorname {c o s t} _ {1} ^ {T ^ {\prime}} (v, U) = \min _ {x \in T (v)} \sum_ {y \in T (v)} \rho^ {T} (x, y), \tag {7}
436
+ $$
437
+
438
+ which is the optimal cost within the subtree. Suppose $v$ has more than one children $u, w, \ldots$ , otherwise the optimal center is clear. Suppose the optimal solution of $cost_1^{T'}(v, U)$ chooses a leaf node in $T(u)$ , and our HST initialization algorithm picks a leaf of $T(w)$ . If $u = w$ , then HST chooses the optimal one where the argument holds trivially. Thus, we consider $u \neq w$ . We have the following two observations:
439
+
440
+ - Since one needs to pick a leaf of $T(u)$ to minimize $cost_1^{T'}(v, U)$ , we have $cost_1^{T'}(v, U) \geq \sum_{x \in ch(v), x \neq u} N_x \cdot 2^{h_x}$ where $ch(u)$ denotes the children nodes of $u$ .
441
+ - By our greedy strategy, $\text{cost}_1^T(v, U) \leq \sum_{x \in \text{ch}(u)} N_x \cdot 2^{h_x} \leq \text{cost}_1^{T'}(v, U) + N_u \cdot 2^{h_u}$ .
442
+
443
+ Since $h_u = h_w$ , we have
444
+
445
+ $$
446
+ 2 ^ {h _ {u}} \cdot (N _ {u} - N _ {w}) \leq 0,
447
+ $$
448
+
449
+ since our algorithm picks subtree roots with highest scores. Then we have $cost_1^T(v, U) \leq cost_1^{T'}(v, U) + N_w \cdot 2^{h_w} \leq 2cost_1^{T'}(v, U)$ . Since the subtrees in $C_1$ are disjoint, the union of centers for $OPT_1^T(v, U)$ , $v \in C_1$ forms the optimal centers with size $k$ . Note that, for any data point $p \in U \setminus C_1$ , the tree distance $\rho^T(p, f)$ for $\forall f$ that is a leaf node of $T(v)$ , $v \in C_1$ is the same. That is, the choice of leaf in $T(v)$ as the center does not affect the $k$ -median cost under 2-HST metric. Therefore, union bound over $k$ subtree costs completes the proof.
450
+
451
+ # B.3 Proof of Proposition 3.2
452
+
453
+ Proof. It is known that the 2-HST can be constructed in $O(dn\log n)$ (Bartal, 1996). The subtree search in Algorithm 4 involves at most sorting all the nodes in the HST based on the score, which takes $O(n\log n)$ . We use a priority queue to store the nodes in $C_1$ . When we insert a new node $v$ into queue, its parent node (if existing in the queue) would be removed from the queue. The number of nodes is $O(n)$ and each operation (insertion, deletion) in a priority queue based on score has $O(\log n)$ complexity. Lastly, the total time to obtain $C_0$ is $O(n)$ , as the FIND-LEAF only requires a top down scan in $k$ disjoint subtrees of $T$ . Summing parts together proves the claim.
454
+
455
+ # B.4 Proof of Theorem 4.2
456
+
457
+ Similarly, we prove the error in general metric by first analyzing the error in 2-HST metric. Then the result follows from Lemma 3.4. Let $\text{cost}_k^T(D)$ , $\text{cost}_k^{T'}(D)$ and $\text{OPT}_k^T(D)$ be defined analogously to (3), (4) and (5), where " $y \in U$ " in the summation is changed into " $y \in D$ " since $D$ is the demand set. That is,
458
+
459
+ $$
460
+ \operatorname {c o s t} _ {k} ^ {T} (D) = \sum_ {y \in D} \min _ {x \in C _ {0}} \rho^ {T} (x, y), \tag {8}
461
+ $$
462
+
463
+ $$
464
+ \operatorname {c o s t} _ {k} ^ {T ^ {\prime}} (D, C _ {1}) = \min _ {| F \cap T (v) | = 1, \forall v \in C _ {1}} \sum_ {y \in D} \min _ {x \in F} \rho^ {T} (x, y), \tag {9}
465
+ $$
466
+
467
+ $$
468
+ O P T _ {k} ^ {T} (D) = \min _ {F \subset D, | F | = k} \sum_ {y \in D} \min _ {x \in F} \rho^ {T} (x, y) \equiv \min _ {C _ {1} ^ {\prime}} c o s t _ {k} ^ {T \prime} (D, C _ {1} ^ {\prime}). \tag {10}
469
+ $$
470
+
471
+ We have the following.
472
+
473
+ Lemma B.2. $\text{cost}_k^T(D) \leq 10OPT_k^T(D) + 10ck\epsilon^{-1}\triangle \log n$ with probability $1 - 4k/n^c$ .
474
+
475
+ Proof. The result follows by combining the following Lemma B.4, Lemma B.5, and applying union bound. $\square$
476
+
477
+ Lemma B.3. For any node $v$ in $T$ , with probability $1 - 1/n^c$ , $|\hat{N}_v \cdot 2^{h_v} - N_v \cdot 2^{h_v}| \leq c\epsilon^{-1}\triangle \log n$ .
478
+
479
+ Proof. Since $\hat{N}_v = N_v + \text{Lap}(2^{(L - h_v) / 2} / \epsilon)$ , we have
480
+
481
+ $$
482
+ P r \left[ \left| \hat {N} _ {v} - N _ {v} \right| \geq x / \epsilon \right] = e x p \left(- x / 2 ^ {\left(L - h _ {v}\right)}\right).
483
+ $$
484
+
485
+ As $L = \log \triangle$ , we have
486
+
487
+ $$
488
+ P r \left[ | \hat {N} _ {v} - N _ {v} | \geq x \triangle / \left(2 ^ {h _ {v}} \epsilon\right) \right] \leq e x p (- x).
489
+ $$
490
+
491
+ Hence, for some constant $c > 0$
492
+
493
+ $$
494
+ P r \left[ \left| \hat {N} _ {v} \cdot 2 ^ {h _ {v}} - N _ {v} \cdot 2 ^ {h _ {v}} \right| \leq c \epsilon^ {- 1} \triangle \log n \right] \geq 1 - e x p (- c \log n) = 1 - 1 / n ^ {c}.
495
+ $$
496
+
497
+ ![](images/bde034e65431913bedb11158286353444188a0a0f63a89727b84fd23b0af478f.jpg)
498
+
499
+ Lemma B.4 (DP Subtree Search). With probability $1 - 2k / n^{c}$ , $\text{cost}_{k}^{T'}(D) \leq 5OPT_{k}^{T}(D) + 4ck\epsilon^{-1}\triangle \log n$ .
500
+
501
+ Proof. The proof is similar to that of Lemma 3.5. Consider the intermediate output of Algorithm 4, $C_1 = \{v_1, v_2, \dots, v_k\}$ , which is the set of roots of the minimal disjoint subtrees each containing exactly one output center $C_0$ . Suppose one of the optimal "root set" that minimizes (4) is $C_1^* = \{v_1', v_2', \dots, v_k'\}$ . Assume $C_1 \neq C_1^*$ . By the same argument as the proof of Lemma 3.5, we consider for some $i, j$ such that $v_i \neq v_j'$ , where $v_i$ is not a descendant of $v_j'$ and $v_j'$ is either a descendant of $v_i$ . By the construction of $C_1$ , we know that $score(v_j') \leq \min \{score(v_i), i = 1, \dots, k\}$ when $v_j' \in C_1^* \setminus C_1$ . Consider the swap between $C_1$ and $C_1^*$ . By the definition of tree distance, we have $OPT_k^T(U) \geq \sum_{v_i \in C_1 \setminus C_1^*} N_{v_i} 2^{h_{v_i}}$ , since $\{T(v_i), v_i \in C_1 \setminus C_1^*\}$ does not contain any center of the optimal solution determined by $C_1^*$ (which is also the optimal "root set" for $OPT_k^T$ ).
502
+
503
+ Let us consider the optimal clustering with center set be $C^* = \{c_1^*, c_2^*, \dots, c_k^*\}$ (each center $c_j^*$ is a leaf of subtree whose root be $c_j'$ ), and $S_j'$ be the leaves assigned to $c_j^*$ . Let $S_j$ denote the set of leaves in $S_j'$ whose distance to $c_j^*$ is strictly smaller than its distance to any centers in $C_1$ . Let $P_j$ denote the union of paths between leaves of $S_j$ to its closest center in $C_1$ . Let $v_j''$ be the nodes in $P_j$ with highest level satisfying $T(v_j'') \cap C_1 = \emptyset$ . The score of $v_j''$ is $2^{h_{v_j''}} N(v_j'')$ . That means the swap with a center $v_j'$ into $C_1$ can only reduce $4 \cdot 2^{h_{v_j''}} N(v_j'')$ to $cost_k^{T'}(U)$ (the tree distance between any leaf in $S_j$ and its closest center in $C_1$ is at most $4 \cdot 2^{h_{v_j''}}$ ). We just use $v_j'$ to represent $v_j''$ for later part of this proof for simplicity. Summing all the swaps over $C_1^* \setminus C_1$ , we obtain
504
+
505
+ $$
506
+ \operatorname{cost}_{k}^{T^{\prime}}(U) - OPT_{k}^{T}(U)\leq 4\sum_{v^{\prime}_{j}\in C_{1}^{*}\backslash C_{1}}N_{v^{\prime}_{j}}2^{h_{v^{\prime}_{j}}},
507
+ $$
508
+
509
+ $$
510
+ O P T _ {k} ^ {T} (U) \geq \sum_ {v _ {i} \in C _ {1} \backslash C _ {1} ^ {*}} N _ {v _ {i}} 2 ^ {h _ {v _ {i}}}.
511
+ $$
512
+
513
+ Applying union bound with Lemma B.3, with probability $1 - 2 / n^{c}$ , we have
514
+
515
+ $$
516
+ N _ {v _ {j} ^ {\prime}} 2 ^ {h _ {v _ {j} ^ {\prime}}} - N _ {v _ {i}} 2 ^ {h _ {v _ {i}}} \leq 2 c \epsilon^ {- 1} \triangle \log n.
517
+ $$
518
+
519
+ Consequently, we have with probability, $1 - 2k / n^{c}$
520
+
521
+ $$
522
+ \begin{array}{l} \operatorname {c o s t} _ {k} ^ {T \prime} (D) \leq 5 O P T _ {k} ^ {T} (D) + 4 c | C _ {1} \backslash C _ {1} ^ {*} | \epsilon^ {- 1} \triangle \log n \\ \leq 5 O P T _ {k} ^ {T} (D) + 4 c k \epsilon^ {- 1} \triangle \log n, \\ \end{array}
523
+ $$
524
+
525
+ which proves the claim.
526
+
527
+ ![](images/4e3c7a8dd34392a0d54ba8d44a5bcdfd6df4eb82539d06ca4ea70ec3d56cdb79.jpg)
528
+
529
+ Lemma B.5 (DP Leaf Search). With probability $1 - 2k / n^{c}$ , Algorithm 6 produces initial centers with $\mathrm{cost}_k^T (D)\leq 2\mathrm{cost}_k^{T'}(D) + 2ck\epsilon^{-1}\triangle \log n$
530
+
531
+ Proof. The proof strategy follows Lemma 3.6. We first consider one subtree with root $v$ . Let $cost_1^{T'}(v, U)$ denote the optimal $k$ -median cost within the point set $T(v)$ with one center in 2-HST:
532
+
533
+ $$
534
+ \operatorname {c o s t} _ {1} ^ {T ^ {\prime}} (v, D) = \min _ {x \in T (v)} \sum_ {y \in T (v) \cap D} \rho^ {T} (x, y). \tag {11}
535
+ $$
536
+
537
+ Suppose $v$ has more than one children $u, w, \ldots$ , and the optimal solution of $cost_1^{T'}(v, U)$ chooses a leaf node in $T(u)$ , and our HST initialization algorithm picks a leaf of $T(w)$ . If $u = w$ , then HST chooses the optimal one where the argument holds trivially. Thus, we consider $u \neq w$ . We have the following two observations:
538
+
539
+ - Since one needs to pick a leaf of $T(u)$ to minimize $cost_1^{T'}(v, U)$ , we have $cost_1^{T'}(v, U) \geq \sum_{x \in ch(v), x \neq u} N_x \cdot 2^{h_x}$ where $ch(u)$ denotes the children nodes of $u$ .
540
+ - By our greedy strategy, $\text{cost}_1^T(v, U) \leq \sum_{x \in \text{ch}(u)} N_x \cdot 2^{h_x} \leq \text{cost}_1^{T'}(v, U) + N_u \cdot 2^{h_u}$ .
541
+
542
+ As $h_u = h_w$ , leveraging Lemma B.3, with probability $1 - 2 / n^c$
543
+
544
+ $$
545
+ \begin{array}{l} 2 ^ {h _ {u}} \cdot \left(N _ {u} - N _ {w}\right) \leq 2 ^ {h _ {u}} \left(\hat {N} _ {u} - \hat {N} _ {w}\right) + 2 c \epsilon^ {- 1} \triangle \log n \\ \leq 2 c \epsilon^ {- 1} \triangle \log n. \\ \end{array}
546
+ $$
547
+
548
+ since our algorithm picks subtree roots with highest scores. Then we have $cost_1^T(v, D) \leq cost_k^{T'}(v, D) + N_w \cdot 2^{h_u} + 2c\epsilon^{-1}\triangle \log n \leq 2cost_k^{T'}(v, D) + 2c\epsilon^{-1}\triangle \log n$ with high probability. Lastly, applying union bound over the disjoint $k$ subtrees gives the desired result.
549
+
550
+ # B.5 Proof of Theorem 4.3
551
+
552
+ Proof. The privacy analysis is straightforward, by using the composition theorem (Theorem B.1). Since the sensitivity of $cost(\cdot)$ is $\triangle$ , in each swap iteration the privacy budget is $\epsilon / 2(T + 1)$ . Also, we spend another $\epsilon / 2(T + 1)$ privacy for picking a output. Hence, the total privacy is $\epsilon / 2$ for local search. Algorithm 6 takes $\epsilon / 2$ DP budget for initialization, so the total privacy is $\epsilon$ .
553
+
554
+ The analysis of the approximation error follows from Gupta et al. (2010), where the initial cost is reduced by our private HST method. We need the following two lemmas.
555
+
556
+ Lemma B.6 (Gupta et al. (2010)). Assume the solution to the optimal utility is unique. For any output $o \in O$ of $2\triangle \epsilon - DP$ exponential mechanism on dataset $D$ , it holds for $\forall t > 0$ that
557
+
558
+ $$
559
+ P r [ q (D, o) \leq \max _ {o \in O} q (D, o) - (\ln | O | + t) / \epsilon ] \leq e ^ {- t},
560
+ $$
561
+
562
+ where $|O|$ is the size of the output set.
563
+
564
+ Lemma B.7 (Arya et al. (2004)). For any set $F \subseteq D$ with $|F| = k$ , there exists some swap $(x, y)$ such that the local search method admits
565
+
566
+ $$
567
+ c o s t _ {k} (F, D) - c o s t _ {k} (F - \{x \} + \{y \}, D) \geq \frac {c o s t _ {k} (F , D) - 5 O P T (D)}{k}.
568
+ $$
569
+
570
+ From Lemma B.7, we know that when $\text{cost}_k(F_i, D) > 6OPT(D)$ , there exists a swap $(x, y)$ s.t.
571
+
572
+ $$
573
+ \operatorname {c o s t} _ {k} \left(F _ {i} - \{x \} + \{y \}, D\right) \leq \left(1 - \frac {1}{6 k}\right) \operatorname {c o s t} _ {k} \left(F _ {i}, D\right).
574
+ $$
575
+
576
+ At each iteration, there are at most $n^2$ possible outputs (i.e., possible swaps), i.e., $|O| = n^2$ . Using Lemma B.6 with $t = 2\log n$ , for $\forall i$ ,
577
+
578
+ $$
579
+ P r [ c o s t _ {k} (F _ {i + 1}, D) \geq c o s t _ {k} (F _ {i + 1} ^ {*}, D) + 4 \frac {\log n}{\epsilon^ {\prime}} ] \geq 1 - 1 / n ^ {2},
580
+ $$
581
+
582
+ where $\text{cost}_k(F_{i+1}^*, D)$ is the minimum cost among iteration 1, 2, ..., $t + 1$ . Hence, we have that as long as $\text{cost}(F_i, D) > 6OPT(D) + \frac{24k\log n}{\epsilon'}$ , the improvement in cost is at least by a factor of $(1 - \frac{1}{6k})$ . By Theorem 4.2, we have $\text{cost}_k(F_1, D) \leq C(\log n)(6OPT(D) + 6k\triangle\log n/\epsilon)$ for some constant $C > 0$ . Let $T = 6Ck\log\log n$ . We have that
583
+
584
+ $$
585
+ \begin{array}{l} E [ \operatorname {c o s t} (F _ {i}, D) ] \leq (6 O P T (D) + 6 k \epsilon^ {- 1} \triangle \log n) C (\log n) (1 - 1 / 6 k) ^ {6 C k \log \log n} \\ \leq 6 O P T (D) + 6 k \epsilon^ {- 1} \triangle \log n \leq 6 O P T (D) + \frac {2 4 k \log n}{\epsilon^ {\prime}}. \\ \end{array}
586
+ $$
587
+
588
+ Therefore, with probability at least $\left(1 - T / n^{2}\right)$ , there exists an $i \leq T$ s.t. $cost(F_{i},D) \leq 6OPT(D) + \frac{24k\log n}{\epsilon'}$ . Then by using the Lemma B.7, one will pick an $F_{j}$ with additional additive error $4\ln n / \epsilon'$ to the min $\{cost(F_j,D), j = 1,2,\dots,T\}$ with probability $1 - 1 / n^2$ . Consequently, we know that the expected additive error is
589
+
590
+ $$
591
+ 2 4 k \triangle \log n / \epsilon^ {\prime} + 4 \log n / \epsilon^ {\prime} = O \left(\epsilon^ {- 1} k ^ {2} \triangle (\log \log n) \log n\right),
592
+ $$
593
+
594
+ with probability $1 - 1/poly(n)$ .
595
+
596
+ # C Extend HST Initialization to $k$ -Means
597
+
598
+ Naturally, our HST method can also be applied to $k$ -means clustering problem. In this section, we extend the HST to $k$ -means and provide some brief analysis similar to $k$ -median. We present the analysis in the non-private case, which can then be easily adapted to the private case. Define the following costs for $k$ -means.
599
+
600
+ $$
601
+ \operatorname {c o s t} _ {k m} ^ {T} (U) = \sum_ {y \in U} \min _ {x \in C _ {0}} \rho^ {T} (x, y) ^ {2}, \tag {12}
602
+ $$
603
+
604
+ $$
605
+ \operatorname {c o s t} _ {k m} ^ {T} ^ {\prime} (U, C _ {1}) = \min _ {| F \cap T (v) | = 1, \forall v \in C _ {1}} \sum_ {y \in U} \min _ {x \in F} \rho^ {T} (x, y) ^ {2}, \tag {13}
606
+ $$
607
+
608
+ $$
609
+ O P T _ {k m} ^ {T} (U) = \min _ {F \subset U, | F | = k} \sum_ {y \in U} \min _ {x \in F} \rho^ {T} (x, y) ^ {2} \equiv \min _ {C _ {1} ^ {\prime}} c o s t _ {k m} ^ {T} ^ {\prime} (U, C _ {1} ^ {\prime}). \tag {14}
610
+ $$
611
+
612
+ For simplicity, we will use $cost_{km}^{T}'(U)$ to denote $cost_{km}^{T}(U, C_{1})$ if everything is clear from context. Here, $OPT_{km}^{T}$ (14) is the cost of the global optimal solution with 2-HST metric.
613
+
614
+ Lemma C.1 (Subtree search). $cost_{km}^{T}'(U) \leq 17OPT_{km}^{T}(U)$ .
615
+
616
+ Proof. The analysis is similar with the proof of Lemma 3.5. Thus, we mainly highlight the difference. Let us just use some notations the same as in Lemma 3.5 here. Let us consider the clustering with center set be $C^* = \{c_1^*, c_2^*, \ldots, c_k^*\}$ (each center $c_j^*$ is a leaf of subtree whose root be $c_j'$ ), and $S_j'$ be the leaves assigned to $c_j^*$ in optimal k-means clustering in tree metric. Let $S_j$ denote the set of leaves in $S_j'$ whose distance to $c_j^*$ is strictly smaller than its distance to any centers in $C_1$ . Let $P_j$ denote the union of paths between leaves of $S_j$ to its closest center in $C_1$ . Let $v_j''$ be the nodes in $P_j$ with highest level satisfying $T(v_j'') \cap C_1 = \emptyset$ . The score of $v_j''$ is $2^{h_{v_j''}} N(v_j'')$ . That means the swap with a center $v_j'$ into $C_1$ can only reduce $(4 \cdot 2^{h_{v_j''}})^2 N(v_j'')$ to $cost_{km}'(U)$ . We just use $v_j'$ to represent $v_j''$ for later part of this proof for simplicity. By our reasoning, summing all the swaps over $C_1^* \setminus C_1$ gives
617
+
618
+ $$
619
+ c o s t _ {k m} ^ {T} ^ {\prime} (U) - O P T _ {k m} ^ {T} (U) \leq \sum_ {v _ {j} ^ {\prime} \in C _ {1} ^ {*} \backslash C _ {1}} N _ {v _ {j} ^ {\prime}} \cdot (4 \cdot 2 ^ {h _ {v _ {j} ^ {\prime}}}) ^ {2},
620
+ $$
621
+
622
+ $$
623
+ O P T _ {k m} ^ {T} (U) \geq \sum_ {v _ {i} \in C _ {1} \backslash C _ {1} ^ {*}} N _ {v _ {i}} \left(2 ^ {h _ {v _ {i}}}\right) ^ {2}.
624
+ $$
625
+
626
+ Also, based on our discussion on Case 1, it holds that
627
+
628
+ $$
629
+ N _ {v _ {j} ^ {\prime}} 2 ^ {h _ {v _ {j} ^ {\prime}}} - N _ {v _ {i}} 2 ^ {h _ {v _ {i}}} \leq 0.
630
+ $$
631
+
632
+ Summing them together, we have $\text{cost}_{km}^T'(U) \leq 17\text{OPT}_{km}^T(U)$ .
633
+
634
+ ![](images/1b136e570305b3e80697a98704355610a063389aaf85aab8eb5bd3a84b84b7d7.jpg)
635
+
636
+ Next, we show that the greedy leaf search strategy (Algorithm 5) only leads to an extra multiplicative error of 2.
637
+
638
+ Lemma C.2 (Leaf search). $cost_{km}^{T}(U) \leq 2cost_{km}^{T}'(U)$ .
639
+
640
+ Proof. Since the subtrees in $C_1$ are disjoint, it suffices to consider one subtree with root $v$ . With a little abuse of notation, let $cost_1^{T'}(v, U)$ denote the optimal $k$ -means cost within the point set $T(v)$ with one center in 2-HST:
641
+
642
+ $$
643
+ \operatorname {c o s t} _ {1} ^ {T ^ {\prime}} (v, U) = \min _ {x \in T (v)} \sum_ {y \in T (v)} \rho^ {T} (x, y) ^ {2}, \tag {15}
644
+ $$
645
+
646
+ which is the optimal cost within the subtree. Suppose $v$ has more than one children $u, w, \ldots$ , otherwise the optimal center is clear. Suppose the optimal solution of $cost_1^{T'}(v, U)$ chooses a leaf node in $T(u)$ , and our HST initialization algorithm picks a leaf of $T(w)$ . If $u = w$ , then HST chooses the optimal one where the argument holds trivially. Thus, we consider $u \neq w$ . We have the following two observations:
647
+
648
+ - Since one needs to pick a leaf of $T(u)$ to minimize $cost_1^{T'}(v, U)$ , we have $cost_1^{T'}(v, U) \geq \sum_{x \in ch(v), x \neq u} N_x \cdot (2^{h_x})^2$ where $ch(u)$ denotes the children nodes of $u$ .
649
+ - By our greedy strategy, $\text{cost}_1^T(v, U) \leq \sum_{x \in \text{ch}(u)} N_x \cdot (2^{h_x})^2 \leq \text{cost}_1^{T'}(v, U) + N_u \cdot (2^{h_u})^2$ .
650
+
651
+ Since $h_u = h_w$ , we have
652
+
653
+ $$
654
+ 2 ^ {h _ {u}} \cdot (N _ {u} - N _ {w}) \leq 0,
655
+ $$
656
+
657
+ since our algorithm picks subtree roots with highest scores. Then we have $cost_1^T(v, U) \leq cost_1^{T'}(v, U) + N_w \cdot (2^{h_w})^2 \leq 2cost_1^{T'}(v, U)$ . Since the subtrees in $C_1$ are disjoint, the union of centers for $OPT_1^T(v, U)$ , $v \in C_1$ forms the optimal centers with size $k$ . Note that, for any data point $p \in U \setminus C_1$ , the tree distance $\rho^T(p, f)$ for $\forall f$ that is a leaf node of $T(v)$ , $v \in C_1$ is the same. That is, the choice of leaf in $T(v)$ as the center does not affect the $k$ -median cost under 2-HST metric. Therefore, union bound over $k$ subtree costs completes the proof.
658
+
659
+ We are ready to state the error bound for our proposed HST initialization (Algorithm 4), which is a natural combination of Lemma C.1 and Lemma C.2.
660
+
661
+ Theorem C.3 (HST initialization). $cost_{km}^{T}(U) \leq 34OPT_{km}^{T}(U)$ .
662
+
663
+ We have the following result based on Lemma 3.4.
664
+
665
+ Theorem C.4. In a general metric space,
666
+
667
+ $$
668
+ E \left[ \operatorname {c o s t} _ {k m} (U) \right] = O \left(\min \left\{\log n, \log \bigtriangleup \right\}\right) ^ {2} O P T _ {k m} (U).
669
+ $$
kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd9ba5484eaa3daa0ecb52190644b756b9fe01dbcf33b0137b8d6cb69cf73a11
3
+ size 1069253
kmedianclusteringviametricembeddingtowardsbetterinitializationwithdifferentialprivacy/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5870135c672a1faf0d9223ef353fe123d63625c39a784058fef4229079668c2a
3
+ size 1227958
rppgtoolboxdeepremoteppgtoolbox/ccf1513f-6339-48d5-896d-284ac3142418_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad0b410e8bcb102d8e95a88f7d772a1b017837e6b761909b519573f2e24cc2e3
3
+ size 147565
rppgtoolboxdeepremoteppgtoolbox/ccf1513f-6339-48d5-896d-284ac3142418_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08b7b93356fcc5e07c4b91ab2083793b9d3dcf942a150eecb3d99a03a9943dda
3
+ size 177134
rppgtoolboxdeepremoteppgtoolbox/ccf1513f-6339-48d5-896d-284ac3142418_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b68511aa6b74286ffcaac10898d5f52254d664e85c769890f0c12c6ccb460e94
3
+ size 1811144
rppgtoolboxdeepremoteppgtoolbox/full.md ADDED
The diff for this file is too large to render. See raw diff
 
rppgtoolboxdeepremoteppgtoolbox/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:266e0eae2b21d8e2a43afdc322714f3b54e3ddd43abf78981cdc2c614c4e41ed
3
+ size 1613403
rppgtoolboxdeepremoteppgtoolbox/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca85d5e39a927d7b90ad745f9e94d8fd920fa7355aa36d8400c77436addf4f07
3
+ size 568948
trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/ab71f100-7bbb-444d-9ce5-a23368f05f0c_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac7b73afe37893751f1805a3604d378f5541a648477f940acf2ff0a2918a3b1a
3
+ size 66503
trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/ab71f100-7bbb-444d-9ce5-a23368f05f0c_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da7d405225fb6f739d78840042e45ca2113c099d9f853a2f46d03231b16dcaae
3
+ size 79967
trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/ab71f100-7bbb-444d-9ce5-a23368f05f0c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:750315c52ee5a9a54f371a07ab215c7869f2e52ebd3b5a07718513f1583739ba
3
+ size 2036467
trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/full.md ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Trajdata: A Unified Interface to Multiple Human Trajectory Datasets
2
+
3
+ Boris Ivanovic<sup>1</sup> Guanyu Song<sup>2</sup> Igor Gilitschenski<sup>2</sup> Marco Pavone<sup>1,3</sup>
4
+ <sup>1</sup>NVIDIA Research University of Toronto <sup>3</sup>Stanford University
5
+
6
+ # Abstract
7
+
8
+ The field of trajectory forecasting has grown significantly in recent years, partially owing to the release of numerous large-scale, real-world human trajectory datasets for autonomous vehicles (AVs) and pedestrian motion tracking. While such datasets have been a boon for the community, they each use custom and unique data formats and APIs, making it cumbersome for researchers to train and evaluate methods across multiple datasets. To remedy this, we present trajdata: a unified interface to multiple human trajectory datasets. At its core, trajdata provides a simple, uniform, and efficient representation and API for trajectory and map data. As a demonstration of its capabilities, in this work we conduct a comprehensive empirical evaluation of existing trajectory datasets, providing users with a rich understanding of the data underpinning much of current pedestrian and AV motion forecasting research, and proposing suggestions for future datasets from these insights. trajdata is permissively licensed (Apache 2.0) and can be accessed online at https://github.com/NVlabs/trajdata.
9
+
10
+ # 1 Introduction
11
+
12
+ Research in trajectory forecasting (i.e., predicting where an agent will be in the future) has grown significantly in recent years, partially owing to the success of deep learning methods on the task [1]; availability of new large-scale, real-world datasets (see Fig. 1); and investment in its deployment within domains such as autonomous vehicles (AVs) [2,3,4,5,6,7,8,9] and social robots [10,11,12].
13
+
14
+ In addition, recent dataset releases have held associated prediction challenges which have periodically benchmarked the field and spurned new developments [13, 14, 15, 16]. While this has been a boon for research progress, each dataset has a unique data format and development API, making it cumbersome for researchers to train and evaluate methods across multiple datasets. For instance, the recent Waymo Open Motion dataset employs binary TFRecords [17] which differ significantly from nuScenes' foreign-key format [18] and Woven Planet (Lyft) Level 5's compressed zarr files [19]. The variety of data formats has also hindered research on topics which either require or greatly benefit from multi-dataset comparisons, such as prediction model generalization (e.g., [20, 21]). To remedy this, we present trajdata: a unified interface to multiple human trajectory datasets.
15
+
16
+ Contributions. Our key contributions are threefold. First, we introduce a standard and simple data format for trajectory and map data, as well as an extensible API to access and transform such data for research use. Second, we conduct a comprehensive empirical evaluation of existing trajectory datasets, providing users with a richer understanding of the data underpinning much of pedestrian and AV motion forecasting research. Finally, we leverage insights from these analyses to provide suggestions for future dataset releases.
17
+
18
+ ![](images/b2a7a0304134386b1f6b1aecd5a7585ab494cd59f3717efdd29c0b6d899e8ea1.jpg)
19
+ Figure 1: Recent datasets provide access to thousands of hours of autonomous driving data, albeit with different data formats and APIs, complicating the use of multiple datasets in research projects.
20
+
21
+ # 2 Related Work
22
+
23
+ Human Trajectory Datasets. Initial trajectory forecasting research employed video motion tracking datasets for benchmarking, primarily due to the availability of annotated agent positions over time. Of these, the ETH [22] and UCY [23] pedestrian datasets were among the most widely-used [1], containing a total of 1536 pedestrians and challenging behaviors such as couples walking together, groups crossing each other, and groups forming and dispersing. Soon after the successful application of deep learning models to pedestrian trajectory forecasting [24], and as data needs grew in autonomous driving research and industry, numerous large-scale datasets have emerged containing significantly more heterogeneous-agent interactive scenarios (e.g., between vehicles and pedestrians) in urban environments. Fig. [1] visualizes the scale, collection, and annotation strategy of such datasets, with a comprehensive review of earlier human motion datasets available in [1, 25]. In particular, the gradual shift from human annotation to autolabeling can be seen, with the recent large-scale Yandex Shifts [26], Waymo Open Motion [17], and nuPlan [27] datasets employing powerful autolabeling pipelines to accurately label sensor data collected by vehicle fleets at scale.
24
+
25
+ Multi-Dataset Benchmarking. While the increase in datasets and associated challenges has bolstered research, their unique formats increase the complexity of evaluating methods across datasets, complicating efforts to analyze, e.g., prediction model generalization. To address this issue for pedestrian motion data, OpenTraj [25] created dataloaders for different pedestrian motion datasets as part of its effort to evaluate and compare motion complexity across pedestrian datasets. More recently, TrajNet++ [28] and Atlas [29] present multi-dataset benchmarks to systematically evaluate human motion trajectory prediction algorithms in a unified framework. While these efforts have provided the community with multi-dataset benchmarks, they are primarily focused on pedestrian data. In contrast, trajdata tackles the standardization of both pedestrian and autonomous vehicle datasets, including additional data modalities such as maps.
26
+
27
+ # 3 trajdata: A Unified Interface to Multiple Human Trajectory Datasets
28
+
29
+ trajdata is a software package that efficiently compiles multiple disparate dataset formats into one canonical format, with an API to access and transform that data for use in downstream frameworks (e.g., PyTorch [30], which is natively supported). Currently, trajdata supports 8 diverse datasets, comprising 3,216 hours of data, $200+$ million unique agents, and $10+$ locations across 7 countries (see Table I). To date, trajdata has been extensively used in research on trajectory forecasting [21], pedestrian [31] and vehicle [32, 33] simulation, and AV motion planning [34, 35].
30
+
31
+ # 3.1 Standardized Trajectory and Map Formats
32
+
33
+ Trajectories. For each dataset, trajdata extracts position, velocity, acceleration, heading, and extent (length, width, height) information for all agents in standard SI units (see Fig. 2). In order to support a variety of dataset formats, trajdata has minimal base data requirements: As long as agent positions (i.e., $x$ , $y$ coordinates) are provided, all other dynamic information can be derived automatically. If entire dynamic quantities (e.g., velocity) are not captured in the original dataset, trajdata uses finite differences to compute derivatives by default. Further, missing data between
34
+
35
+ Table 1: Datasets currently supported by trajdata. More details can be found in the appendix.
36
+
37
+ <table><tr><td>Dataset</td><td>Size</td><td>Locations</td><td>Maps?</td><td>Dataset</td><td>Size</td><td>Locations</td><td>Maps?</td></tr><tr><td>ETH [22]</td><td>0.4h</td><td>2</td><td>No</td><td>INTERACTION [39]</td><td>16.5h</td><td>4</td><td>Yes</td></tr><tr><td>UCY [23]</td><td>0.3h</td><td>2</td><td>No</td><td>Lyft Level 5 [19]</td><td>1118h</td><td>1</td><td>Yes</td></tr><tr><td>SDD [40]</td><td>5h</td><td>1</td><td>No</td><td>Waymo Open [17]</td><td>570h</td><td>6</td><td>Yes</td></tr><tr><td>nuScenes [18]</td><td>5.5h</td><td>2</td><td>Yes</td><td>nuPlan [27]</td><td>1500h</td><td>4</td><td>Yes</td></tr></table>
38
+
39
+ ![](images/89d9d6279f1d6ca8d2f48fe33205f58b4b097090f54f34460186c9e15f84c79c.jpg)
40
+ Figure 2: Left: trajdata adopts a tabular representation for trajectory data, leveraging advanced indexing to satisfy user data queries. Right: Agent trajectories from the nuScenes [18] dataset visualized on the scene's VectorMap, containing all of trajdata's core map elements.
41
+
42
+ timesteps is imputed via linear interpolation. trajdata internally represents and stores trajectory data as tabular data frames, allowing for advanced indexing and data grouping depending on user queries and the use of efficient open-source tabular data storage frameworks such as Apache Arrow [36]. Note that each of these default choices (finite differences, linear interpolation, and tabular data frames) can be changed by the end user.
43
+
44
+ Maps. To retain the most information from high-definition (HD) dataset maps, trajdata adopts a polyline representation for map data. This choice matches the vast majority of modern trajectory datasets which provide vector map data and makes them immediately compatible with our format. Currently, there are four core map elements: RoadLane, RoadArea, PedCrosswalk, and PedWalkway. As illustrated in Fig. 2 a RoadLane represents a driveable road lane with a centerline and optional left and right boundaries. A RoadArea represents other drivable areas of roads which are not part of lanes, e.g., parking lots or shoulders. A PedCrosswalk denotes a marked area where pedestrians can cross the road. Finally, a PedWalkway marks sidewalks adjacent to roads. Of these, only RoadLane elements are required to be extracted, other elements are optional (they are absent in some datasets). Our map format additionally supports lane connectivity information in the form of left/right adjacent lanes (i.e., lanes accessible by left/right lane changes) and successor/predecessor lanes (i.e., lanes that continue from / lead into the current lane following the road direction).
45
+
46
+ Each map element is designed to be compatible with popular computational geometry packages, such as Shapely [37], enabling efficient set-theoretic queries to calculate, e.g., road boundary violations. By default, trajdata serializes map data using Google protocol buffers [38], and, in particular, only stores neighboring position differences for efficiency, similar to the implementation used in [19]. Dynamic traffic light information is also supported, and trajdata makes use of a separate data frame to link the traffic signal shown per timestep with the lane ID being controlled.
47
+
48
+ # 3.2 Core trajdata Functionalities
49
+
50
+ Multi-dataset training and evaluation. One of trajdata's core functionalities is aggregating data from multiple datasets in a UnifiedDataset object (a PyTorch Dataset subclass by default).
51
+
52
+ ```python
53
+ from trajdata import UnifiedDataset
54
+ dataset = UnifiedDataset()
55
+ desired_data = ["nusc_miniboston", "sdd-train"], desired_dt=0.1,
56
+ centric="agent", history(sec=(1.0, 3.0), future(sec=(4.0, 4.0)) # These settings were used to create Figure 2.
57
+ ```
58
+
59
+ ![](images/79b150ce76f7d5bef1125139088e1ad9adde4dfea7148eeb879e317d0f12d131.jpg)
60
+ Figure 3: trajdata can provide agent-centric (or scene-centric) batches of trajectory data for model training and evaluation in associated AgentBatch (or SceneBatch) objects. The indexing and padding strategy of a few core AgentBatch tensors are visualized here.
61
+
62
+ The example above creates a dataset that provides agent-centric data batches (i.e., each batch element contains data for one agent at one timestep, see Fig. 3) sourced from only Boston in the nuScenes mini dataset ("nusc_mini-boston") as well as the Stanford Drone Dataset's entire training split ("sdd-train"), with time upsampling ensuring all data is at $10\mathrm{Hz}$ (desired_dt=0.1). history_sec=(1.0, 3.0) specifies that the predicted agent's trajectory must have at least $1.0s$ of history available, with padding for any missing data up to $3.0s$ (see Fig. 3). Similarly, future_sec=(4.0, 4.0) requires that the predicted agent's trajectory have $4.0s$ of future available.
63
+
64
+ trajdata provides many other capabilities in addition to the above, including scene-centric batches (i.e., data for all agents in a scene at the same timestep), semantic search (e.g., nuScenes [18] provides text descriptions for each scene), agent filtering (e.g., only vehicles), coordinate frame standardization (i.e., making trajectories relative to the predicted agent's frame at the current timestep), map rasterization (e.g., if encoding scene context with a convolutional architecture), data augmentations (e.g., additive Gaussian noise to past trajectories), and general data transforms via custom functions.
65
+
66
+ Map API. trajdata's standardized vector map object is VectorMap. In addition to providing access to individual map elements (e.g., lanes, sidewalks), it also leverages precomputed spatial indices to make nearest neighbor queries very efficient.
67
+
68
+ ```python
69
+ from trajdata import MapAPI, VectorMap
70
+ vec_map: VectorMap = MapAPI(<=>).get_map("nusc-mini:boston-seaport")
71
+ lane = vec_map.get_closest_lane(np.array([50.0, 100.0, 0.0]))
72
+ ```
73
+
74
+ In the example above, the polyline map of Boston's seaport neighborhood (from nuScenes [18]) is loaded from the user's trajdata cache (its path would be specified instead of $<=$ ) and queried for the closest RoadLane to a given $x, y, z$ position.
75
+
76
+ Simulation Interface. trajdata also provides a simulation interface that enables users to initialize a scene from real-world data and simulate agents from a specific timestep onwards. Simulated agent motion is recorded by trajdata and can be analyzed with a library of evaluation metrics (e.g., collision and offroad rates, statistical differences to real-world data distributions) or exported to disk. This functionality was extensively used to benchmark learning-based traffic models in [32, 33].
77
+
78
+ ```python
79
+ from trajdata.simulation import SimulationScene
80
+ simscene = SimulationScene(<=>) # Specify initial scene to use.
81
+ obs = simscene.reset() # Initialized from real agent states in data.
82
+ for t in range(10): # Simulating 10 timesteps in this example.
83
+ new_state_dict = ... # Compute the new state of sim agents.
84
+ obs = simscene.Step(new_state_dict)
85
+ ```
86
+
87
+ In this example, a SimulationScene is initialized from a scene in an existing dataset (specified with the <=> arguments), after which it can be accessed similarly to an OpenAI Gym [41] reinforcement learning environment, using methods like reset and step.
88
+
89
+ # 4 Dataset Comparisons and Analyses
90
+
91
+ In this section, we leverage trajdata's standardized trajectory and map representations to directly compare many popular AV and pedestrian trajectory datasets along a variety of metrics. Our goal is to provide a deeper understanding of the datasets underpinning much of human motion research by analyzing their data distributions, motion complexity, and annotation quality.
92
+
93
+ ![](images/c931ed8e26cced4da7adc929b0fdb1c9f6ababf1377a94237f94af830cf1b5b3.jpg)
94
+
95
+ ![](images/516cf03e97691e681990e72210e25e736fdecd3ad90d00b12824213be1f5ff16.jpg)
96
+ Figure 4: Left: Number of unique agents per dataset. Right: Distribution of agent types per dataset.
97
+
98
+ Note that we only analyze dataset training and validation splits, since these are the splits predominantly used by methods for development. We explicitly do not analyze test splits since they are either not available publicly or because doing so may harm existing benchmark validity. Further, while trajdata supports data frequency up- and down-scaling via interpolation and down-sampling, all of the following analyses were conducted in each dataset's native data resolution. All analyses were performed using the latest version of trajdata at the time of writing (v1.3.2) on a desktop computer with 64 GB of RAM and an AMD Ryzen Threadripper PRO 3975WX 32-core CPU. For larger datasets, an NVIDIA DGX-1 server with 400 GB of RAM and 64 CPU cores was used.
99
+
100
+ # 4.1 Agent Distributions
101
+
102
+ Population. To build a fundamental understanding of the considered datasets, we first analyze and compare agent populations. Fig. 4 visualizes overall agent counts and proportions per dataset. As can be expected, modern large-scale AV datasets such as Waymo [17] and Lyft Level 5 [19] contain multiple orders of magnitude more agents than earlier pedestrian datasets SDD [40], ETH [22], or UCY [23]. However, as we will show later, pedestrian datasets still provide value in terms of agent diversity, density, and motion complexity in popular social robotics settings such as college campuses.
103
+
104
+ As can be seen in Fig. 4 (right), the vast majority of agents in AV datasets are vehicles or pedestrians, with the exception of Lyft Level 5 [19] where $71.8\%$ of agents have unknown types. In contrast, bicycles (a relatively niche category in many datasets) account for $41\%$ of all agents in SDD [40] (indeed, biking is a popular method of transportation around Stanford's large campus). Such imbalances in agent populations are indicative of real-world distributions, e.g., motorcycles make up only $3.5\%$ of vehicles in the USA [42], similar to their proportion in nuScenes [18] $(1.6\%)$ .
105
+
106
+ Density and Observation Duration. In addition to which agent types are captured in scenes, the amount and density of agents can be an important desiderata (e.g., for research on crowd behavior) or computational consideration (e.g., for methods whose runtime scales with the number of agents). Fig.5 visualizes the distribution of the number of agents observed per scene per timestep (left), as well as the maximum number of simultaneous agents per scene (right). As can be seen, urban scenarios captured in modern AV datasets frequently contain $100+$ detected agents (with a long tail extending to $250+$ agents). In this respect, ETH [22], UCY [23], and INTERACTION [39] are limited by their fixed-camera and drone-based data-collection strategies compared to the comprehensive on-vehicle sensors used in nuScenes [18], Waymo [17], Lyft [19], and nuPlan [27]. However, while ETH [22], UCY [23], and INTERACTION [39] do not contain as many agents, they consistently provide the highest-density scenes (see Fig. 6), especially for pedestrians and bicycles. We compute agent density by dividing the number of agents in a scene by their overall bounding rectangle area, as in [25].
107
+
108
+ Each dataset supported by trajdata adopts different scenario lengths and corresponding agent observation durations. As can be seen in Fig. 7 AV datasets are comprised of scenarios with lengths ranging from $4s$ in INTERACTION [39] to $25s$ in Lyft Level 5 [19]. The peaks at the right of each AV dataset duration distribution are caused by the always-present ego-vehicle (for Vehicles) as well as other agents detected throughout the scene (common in steady traffic, parking lots, or at an intersection with stopped traffic and pedestrians waiting to cross). One can also see that Lyft Level 5 [19] agent detections are much shorter-lived compared to other AV datasets' relatively uniform distributions (Waymo [17], nuScenes [18], and nuPlan [27]). This could be caused by Lyft's
109
+
110
+ ![](images/d881a1cd17df4ba397330f33e34b75ac0faa6c74019c78df482693a64679269e.jpg)
111
+ Figure 5: Left: Number of agents present per timestamp and scene. Right: Maximum number of agents present at the same time per scene.
112
+
113
+ ![](images/092a088fbe98c04da1a4afbcb214c43e086f86f27e84845c297614a35188179a.jpg)
114
+ Figure 6: Agent density per timestep and scene.
115
+
116
+ annotations being collected from an onboard perception system [19] (which are affected by noise and occlusions) vs human annotators [18] or autolabeling [27, 17] which can leverage data from past and future timesteps be more robust to such errors. We conduct additional comparisons between data collection methodologies in Section 4.3.
117
+
118
+ Ego-Agent Distances. When developing AV perception systems, an important consideration is the sensor range(s) necessary to facilitate the desired prediction and planning horizons as well as provide advanced warning of critical situations (e.g., stopped traffic on a highway). In Fig. 8, we compare the distribution of ego-agent distances and find that, while nuScenes [18] and Lyft Level 5 [19] have long-tailed distributions extending past $200m$ , Waymo [17] and nuPlan [27] appear to have artificial cut-offs at $75 - 80m$ , potentially to maintain data quality by avoiding poor data from distant agents. However, it would be more useful to maintain distant detections and add uncertainty outputs from the autolabeler to support uncertain long-range detection research in addition to improving autolabeling.
119
+
120
+ Mapped Areas. HD maps are a core component of many AV datasets, frequently leveraged in trajectory forecasting and motion planning research to provide scene context and geometric lane information (e.g., for global search-based planning and trajectory optimization). Current AV dataset maps are very large (see Table 2 in the appendix) and comprehensive, spanning multiple neighborhoods in different cities. However, not all HD maps are created equal, commonly differing along three axes: Area completeness, lane definitions, and traffic lights. While most AV datasets provide complete HD maps of neighborhoods, Waymo [17] differs by only providing local map crops per scenario without a common reference frame across scenarios [2]. This also significantly increases the storage requirements of Waymo [17] maps compared to other datasets.
121
+
122
+ Lane definitions can also differ significantly between datasets, with intersections being a notable differentiator. For instance, the nuScenes dataset [18] does not annotate intersections fully, opting for only lane centerlines without associated edges (Fig. 2 shows an example). Lyft Level 5 [19] and nuPlan [27] both include full lane center and edge information for all possible motion paths through an intersection. Waymo [17] maps are unique in that they provide full lane center and boundary information, but there are many gaps in the associations between lane centerlines and boundaries, making it difficult to construct lane edge polylines or lane area polygons. As a result, we exclude Waymo maps from map-based analyses in this work.
123
+
124
+ # 4.2 Motion Complexity
125
+
126
+ Measuring the complexity of driving scenarios is an important open problem in the AV domain, with a variety of proposed approaches ranging from heuristic methods [25] to powerful conditional behavior
127
+
128
+ ![](images/321dba5f1b9b22ba2f28eca7be984058d0b952f4fed4e01be5724c948a9f5af5.jpg)
129
+ Figure 7: Distributions of the length of time agents are observed in each scene.
130
+
131
+ ![](images/6ca9af5615af36ef07a1adfe72456f5188bc42126655dd5d39b8eae838e3b516.jpg)
132
+
133
+ ![](images/51997e39bdd2c52f8b91228cfaf4357f3727a30e1edf6ab381604d248fd97968.jpg)
134
+
135
+ ![](images/bf237d8960bae0313d334fae0d4372bb6bdf4c6c1eb649edaced94c697db7cbd.jpg)
136
+
137
+ ![](images/3e63734efaaf404a0c0acdf978ae15c6bc6e7be97534c9ba4af0c3642a9b19a7.jpg)
138
+
139
+ ![](images/0e8c899009729487f7aeff98084bfba9a728f12330a3e7e4a037ea9d86a15378.jpg)
140
+ Figure 8: Distribution of distances between agents and data-collecting ego-vehicle in AV datasets.
141
+
142
+ ![](images/e1ac744692f645988aa7c556c1ffaec1f24fe8083a2430df9d7f0bb8b979c86d.jpg)
143
+
144
+ ![](images/b2707884397575d93f821868baf82a026e47f33e0a33b89b00dbd4c6c34699c5.jpg)
145
+
146
+ ![](images/d158b649575570025ea9e62c4a0bb370e4ade971c0806eaf9c9878b5f9cab927.jpg)
147
+
148
+ ![](images/c258f9d1e13864ee78abcd540335b48bfbae5d0d8f5567ff536e0c5a22058322.jpg)
149
+
150
+ prediction models [43]. To avoid potential biases in analyzing datasets with an externally-trained model, we employ simple and interpretable heuristics similar to [25].
151
+
152
+ Motion Diversity. We first analyze distributions of dynamic agent quantities (e.g., speed, acceleration, jerk). As can be seen in Fig. 9, the majority of speed distributions have high peaks at zero (no motion). This is corroborated by Table 3 in the appendix, which shows that a significant portion of agents are stationary in many datasets, especially for nuScenes [18] (17.5%) and Waymo [17] (53.6%). After the initial peak, agent speed distributions drop sharply to a roughly uniform plateau (up to $20m/s$ for vehicles) before dropping completely around $30m/s$ (a common highway speed around the world).
153
+
154
+ While SDD [40] and INTERACTION [39] have sensible vehicle speeds, their pedestrian speeds can be too high. Such high speeds may be caused by annotations near the edge of drone camera view or by rectification artifacts near the image border. Additionally, the very long-tailed distribution of Lyft [19]) and Waymo [17]) vehicle, pedestrian, and bicycle speeds (exceeding $60m / s$ ) show a remaining area of improvement for state-of-the-art AV perception systems and autolabeling pipelines. Comparisons of acceleration and jerk can be found in the appendix. Overall, from dynamic quantities alone, Waymo [17]) and Lyft [19] provide the most diversity in agent motion. If such long-tailed data is undesirable, the INTERACTION [39] dataset provides the most realistic set of vehicle speeds.
155
+
156
+ Trajectory Nonlinearity. To analyze the spatial diversity of agent trajectories, we first compare each agent's heading to their initial timestep. As can be seen in Fig. [10] and reiterating earlier analyses, the vast majority of human movement is straight and linear ( $\Delta h = 0$ ). Moving away from the center, we also see repeated symmetric peaks at $\pm \frac{\pi}{2}$ (capturing left and right turns) and $\pm k\pi$ in some datasets. One possible reason for these periodic peaks in the distribution is an artifact of the autolabeling methods used in the datasets (since only datasets that autolabel sensor data are affected), another is that their respective scene geometries contain more roundabouts, cul-de-sacs, and repeated turns than other datasets (more detailed heading distributions can be found in the appendix). We can also see that pedestrians' distributions are more uniform as they do not have to adhere to rigid road geometry.
157
+
158
+ Path Efficiency. Lastly, we also measure agent path efficiencies, defined as the ratio of the distance between trajectory endpoints to the trajectory length [25]. Intuitively, the closer to $100\%$ , the closer the trajectory is to a straight line. As can be seen in Fig. 15 in the appendix, most path efficiency distributions are uniformly distributed, with peaks near $100\%$ , echoing earlier straight-line findings. However, the INTERACTION [39] dataset is an outlier in that its agent trajectories are predominantly straight lines with much less curved motion than other AV and pedestrian datasets.
159
+
160
+ # 4.3 Annotation Quality
161
+
162
+ While analyzing datasets' true annotation accuracy would be best, neither we nor the original data annotators have access to the underlying real-world ground truth. As a proxy, we instead analyze the self-consistency of annotations in the form of incidence rates of collisions between agents, off-road driving, and uncomfortable high-acceleration events (using $0.4g$ as a standard threshold [44, 45]).
163
+
164
+ ![](images/9a24f61180bc3a82b32790fa60897f3d11cf4c6e7573d848371a328b0c63e01b.jpg)
165
+ Figure 9: Agent speed distributions per dataset and agent type.
166
+
167
+ ![](images/a40bf8bc3376b96d4090c88a9bf095d560b51cc1597c98633ff0ef04cbe40b1a.jpg)
168
+
169
+ ![](images/da68c665debbad0c9b2a33bd15430013e1f199510c08412d541c6fa85d7f983f.jpg)
170
+
171
+ ![](images/770743230c906a9e2bf910afcf1f0c21b7bd0b22ea60ceaf84a472e55df6ddb4.jpg)
172
+
173
+ ![](images/628bcdd77b4d2c677a09883bd18d7afe28a2712e2de37b02363dedfadfe98b66.jpg)
174
+
175
+ ![](images/b469f721061c9fabaef71cea650552bd0e5bf7ca51c91cc7646cec9598056d42.jpg)
176
+ Figure 10: Changes in heading relative to an agent's first timestep.
177
+
178
+ ![](images/e66a0fa7e2c9da96c1747c9cd3569265b4a52fcdf9bea910a10fd2387b5579f6.jpg)
179
+
180
+ ![](images/8c17fe52798b6cf435404ae2b14088ee85b4296ebde08a8265d41c41d8490522.jpg)
181
+
182
+ ![](images/ee807c481a152d96479767302b6003bb33f4df97dd6860b267e481be2dcc455b.jpg)
183
+
184
+ ![](images/9b18be54760711bfa1e15cadb6e06e5ddff15ed04fe97ee12e1aa0e3e3946dc2.jpg)
185
+
186
+ Virtually all observed agent data is free of collisions and off-road driving, save for rare one-offs (e.g., the INTERACTION dataset contains a minor car accident [39]). We denote bounding box intersections between agents as collisions, and agent center-of-mass exiting the road boundary as off-road driving. Collisions typically indicate errors in bounding box annotations, whereas off-road driving can indicate erroneous bounding box dimensions, missing map coverage, or harsh driving that, e.g., cuts corners during a right turn.
187
+
188
+ As can be seen in Fig. [11] (left), most vehicles in datasets experience collision rates below $5\%$ . Of particular note is the fact that state-of-the-art autolabeling systems (e.g., used in Waymo [17]) are nearly matching the accuracy of human annotations (e.g., used in nuscenes [18]) in terms of resulting collision rates. However, detecting agents from a near-ground perspective (even with 3D LiDAR) is a very challenging task, and current performance still lags behind high altitude viewpoints. In particular, The INTERACTION [39] dataset achieves orders of magnitude lower vehicle collision, off-road, and harsh acceleration rates owing to its drone-based data collection strategy. In theory, SDD [40] should enjoy a similar advantage, but it only provides axis-aligned bounding box annotations (which overestimate agent extents) and Stanford's college campus contains much more interactive agents than other urban environments. More generally, the notion of bounding box intersections as collisions does not transfer exactly to pedestrians as they can enter/exit cars and walk in close groups, and further study is needed to robustly distinguish between errant motion and normal interactive motion.
189
+
190
+ In Fig. [11](middle), we find that vehicles in general experience very few ( $< 1\%$ ) harsh acceleration events, with Waymo [17], Lyft [19], and nuScenes [18] all having the highest incidence, commensurate with their earlier-discussed long-tail acceleration distributions. Lastly, we find in Fig. [11](right) that the INTERACTION [39] and nuPlan [27] agent annotations are well-aligned onto their maps, whereas nuScenes [18] suffers from poor map coverage away from main roads (there are many annotated parked cars next to the main road) and Lyft [19] suffers from high false positive detections next to the main road (the majority of which take the Unknown class).
191
+
192
+ # 5 Conclusions and Recommendations
193
+
194
+ The recent releases of large-scale human trajectory datasets have significantly accelerated the field of AV research. However, their unique data formats and custom developer APIs have complicated multi-dataset research efforts (e.g., [20, 21]). In this work, we present trajdata, a unified trajectory data loader that aims to harmonize data formats, standardize data access APIs, and simplify the process of using multiple AV datasets within the AV research community with a simple, uniform, and efficient data representation and development API. We used trajdata to comprehensively compare existing trajectory datasets, finding that, in terms of annotation self-consistency, drone-based data collection methods yield significantly more accurate birds-eye view bounding box annotations than even state-of-the-art AV perception stacks with LiDAR (albeit with much less spatial coverage), modern
195
+
196
+ ![](images/5d1a4b1c264ca8172c04f66b42ddb90f984695098f6ab92c2b95600a355cd32e.jpg)
197
+ Figure 11: Self-consistency failure rates per dataset and agent type, in the form of collision (left), high vehicle acceleration (middle), and off-road (right) rates.
198
+
199
+ ![](images/c6cb7421603f5194fc9bb3f94c8618f71edb645cce4742a1605acda4513fc04e.jpg)
200
+
201
+ ![](images/0c4cd106c9ed76fbabf9c755e6a468be873f80980ed586b8891d99d5b29ec463.jpg)
202
+
203
+ autolabeling pipelines are nearing human annotation performance, and smaller-scale pedestrian datasets can still be useful for investigations requiring high-agent-density scenarios.
204
+
205
+ As concrete recommendations, we saw that some datasets artificially limit the distance agents are autolabeled. Instead, it would be more useful to the long-range detection community to remove such restrictions, but add autolabeler-output uncertainties to long-range detections, supporting uncertain perception research along the way. Further, incorporating explicit self-consistency checks within autolabeling pipelines and catching, e.g., collisions, prior to release can both improve the autolabeling method as well as the resulting data labels.
206
+
207
+ More broadly, providing researchers with access to more data comprised of various agent types from diverse geographies should help in modeling rare agent types and behaviors, in addition to aiding in the generalization of methods to multiple geographies. However, as we have seen in prior sections, there is an overwhelming bias towards straight line driving, and one capability missing from trajdata is the ability to (re)balance data on a semantic (behavioral) level. Finally, even if lower-level trajectory classes (e.g., driving straight, turning left/right, slowing down, speeding up, etc) are balanced, an important higher-level consideration during original dataset curation time is to ensure that AV datasets explore all geographic regions within an environment, and not only those of certain socioeconomic statuses or transportation access.
208
+
209
+ Future work will address the current limitations of trajdata (e.g., expanding the number of supported datasets and new capabilities such as geometric map element associations to support Waymo-like map formats [17]). Further, incorporating sensor data would also enable perception research as well as joint perception-prediction-planning research, an exciting emerging AV research field.
210
+
211
+ # Acknowledgments and Disclosure of Funding
212
+
213
+ We thank all past and present members of the NVIDIA Autonomous Vehicle Research Group for their code contributions to trajdata and feedback after using it in projects. We additionally thank Leon De Andrade, Alex Naumann, and Stepan Konev for their contributions to trajdata on GitHub.
214
+
215
+ # References
216
+
217
+ [1] A. Rudenko, L. Palmieri, M. Herman, K. M. Kitani, D. M. Gavrila, and K. O. Arras, "Human motion trajectory prediction: A survey," Int. Journal of Robotics Research, vol. 39, no. 8, pp. 895-935, 2020.
218
+ [2] General Motors, "Self-driving safety report," 2018, Available at https://www.gm.com/content/dam/company/docs/us/en/gmcom/gmsafetyreport.pdf.
219
+ [3] Uber Advanced Technologies Group, "A principled approach to safety," 2020, Available at https://uber.app.box.com/v/UberATGSafetyReport.
220
+ [4] Lyft, "Self-driving safety report," 2020, Available at https://2eg1kz1onwfq1djllo2xh4bb-wpengine.netdna-ssl.com/wp-content/uploads/2020/06/Safety_Report_2020.pdf
221
+ [5] Waymo, "Safety report," Waymo LLC, 2021, Available at https://waymo.com/safety/safety-report.
222
+
223
+ [6] Argo AI, "Developing a self-driving system you can trust," Apr. 2021, Available at https://www.argo.ai/wp-content/uploads/2021/04/ArgoSafetyReport.pdf
224
+ [7] Motional, "Voluntary safety self-assessment," 2021, Available at https://drive.google.com/file/d/1JjfQByU_hWvSfkWzQ8PK2ZOZfVCqQGDB/view.
225
+ [8] Zoox, "Safety report volume 2.0," 2021, Available at https://zoox.com/safety/.
226
+ [9] NVIDIA, "Self-driving safety report," 2021, Available at https://images.nvidia.com/content/ self-driving-cars/safety-report/auto-print-self-driving-safety-report-2021-update.pdf.
227
+ [10] T. Kruse, A. K. Pandey, R. Alami, and A. Kirsch, "Human-aware robot navigation: A survey," Robotics and Autonomous Systems, vol. 61, no. 12, pp. 1726-1743, 2013.
228
+ [11] S. F. Chik, C. F. Yeong, E. L. M. Su, T. Y. Lim, Y. Subramaniam, and P. J. H. Chin, “A review of social-aware navigation frameworks for service robot in dynamic human environments,” Journal of Telecommunication, Electronic and Computer Engineering, vol. 8, no. 11, pp. 41–50, 2016.
229
+ [12] P. A. Lasota, T. Fong, and J. A. Shah, "A survey of methods for safe human-robot interaction," Foundations and Trends in Robotics, vol. 5, no. 4, pp. 261–349, 2017.
230
+ [13] nuTonomy, “nuscenes prediction challenge,” https://www.nuscenes.org/prediction?externalData=all&mapData=all&modalities=Any, 2020.
231
+ [14] Lyft Level 5, "Lyft motion prediction for autonomous vehicles," https://www.kaggle.com/competitions/lyft-motion-prediction-autonomous-vehicles, 2020.
232
+ [15] Waymo, "Waymo open dataset motion prediction challenge," https://waymo.com/open/challenges/, 2021.
233
+ [16] Yandex Research, "Shifts challenge: Robustness and uncertainty under real-world distributional shift," https://research.yandex.com/shifts, 2021.
234
+ [17] S. Ettinger, S. Cheng, B. Caine, C. Liu, H. Zhao, S. Pradhan, Y. Chai, B. Sapp, C. Qi, Y. Zhou, Z. Yang, A. Chouard, P. Sun, J. Ngiam, V. Vasudevan, A. McCauley, J. Shlens, and D. Anguelov, "Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset," in IEEE Int. Conf. on Computer Vision, 2021.
235
+ [18] H. Caesar, V. Bankiti, A. H. Lang, S. Vora, V. E. Liong, Q. Xu, A. Krishnan, Y. Pan, G. Baldan, and O. Beijbom, "nuScenes: A multimodal dataset for autonomous driving," in IEEE Conf. on Computer Vision and Pattern Recognition, 2020.
236
+ [19] J. Houston, G. Zuidhof, L. Bergamini, Y. Ye, A. Jain, S. Omari, V. Iglovikov, and P. Ondruska, "One thousand and one hours: Self-driving motion prediction dataset," in Conf. on Robot Learning, 2020.
237
+ [20] T. Gilles, S. Sabatini, D. Tsishkou, B. Stanciulescu, and F. Moutarde, "Uncertainty estimation for cross-dataset performance in trajectory prediction," in IEEE Int. Conf. on Robotics and Automation Workshop on Fresh Perspectives on the Future of Autonomous Driving, 2022.
238
+ [21] B. Ivanovic, J. Harrison, and M. Pavone, "Expanding the deployment envelope of behavior prediction via adaptive meta-learning," in IEEE Int. Conf. on Robotics and Automation, 2023.
239
+ [22] S. Pellegrini, A. Ess, K. Schindler, and L. v. Gool, "You'll never walk alone: Modeling social behavior for multi-target tracking," in IEEE Int. Conf. on Computer Vision, 2009.
240
+ [23] A. Lerner, Y. Chrysanthou, and D. Lischinski, “Crowds by example,” Computer Graphics Forum, vol. 26, no. 3, pp. 655–664, 2007.
241
+ [24] A. Alahi, K. Goel, V. Ramanathan, A. Robicquet, L. Fei-Fei, and S. Savarese, "Social LSTM: Human trajectory prediction in crowded spaces," in IEEE Conf. on Computer Vision and Pattern Recognition, 2016.
242
+
243
+ [25] J. Amirian, B. Zhang, F. V. Castro, J. J. Baldelomar, J.-B. Hayet, and J. Pettré, “OpenTraj: Assessing prediction complexity in human trajectories datasets,” in Asian Conference on Computer Vision, 2020.
244
+ [26] A. Malinin, N. Band, Y. Gal, M. Gales, A. Ganshin, G. Chesnokov, A. Noskov, A. Ploskonosov, L. Prokhorenkova, I. Provilkov, V. Raina, V. Raina, D. Roginskiy, M. Shmatova, P. Tiges, and B. Yangel, “Shifts: A dataset of real distributional shift across multiple large-scale tasks,” in Conf. on Neural Information Processing Systems Datasets and Benchmarks Track, 2021. [Online]. Available: https://openreview.net/forum?id=qM45LHaWM6E
245
+ [27] H. Caesar, J. Kabzan, K. S. Tan, W. K. Fong, E. Wolff, A. Lang, L. Fletcher, O. Beijbom, and S. Omari, "nuPlan: A closed-loop ML-based planning benchmark for autonomous vehicles," 2021, Available at https://arxiv.org/abs/2106.11810.
246
+ [28] P. Kothari, S. Kreiss, and A. Alahi, "Human trajectory forecasting in crowds: A deep learning perspective," IEEE Transactions on Intelligent Transportation Systems, vol. 23, no. 7, pp. 7386-7400, 2022.
247
+ [29] A. Rudenko, L. Palmieri, W. Huang, A. J. Lilienthal, and K. O. Arras, "The atlas benchmark: An automated evaluation framework for human motion prediction," in IEEE Int. Conf. on Robot and Human Interactive Communication, 2022.
248
+ [30] A. Paszke, S. Gross, S. Chintala, G. Chanan, E. Yang, Z. DeVito, Z. Lin, A. Desmaison, L. Antiga, and A. Lerer, "Automatic differentiation in PyTorch," in Conf. on Neural Information Processing Systems - Autodiff Workshop, 2017.
249
+ [31] D. Rempe, Z. Luo, X. B. Peng, Y. Yuan, K. Kitani, K. Kreis, S. Fidler, and O. Litany, "Trace and Pace: Controllable pedestrian animation via guided trajectory diffusion," in IEEE Conf. on Computer Vision and Pattern Recognition, 2023.
250
+ [32] D. Xu, Y. Chen, B. Ivanovic, and M. Pavone, "BITS: Bi-level imitation for traffic simulation," in IEEE Int. Conf. on Robotics and Automation, 2023.
251
+ [33] Z. Zhong, D. Rempe, D. Xu, Y. Chen, S. Veer, T. Che, B. Ray, and M. Pavone, "Guided conditional diffusion for controllable traffic simulation," in IEEE Int. Conf. on Robotics and Automation, 2023.
252
+ [34] F. Christianos, P. Karkus, B. Ivanovic, S. V. Albrecht, and M. Pavone, "Planning with occluded traffic agents using bi-level variational occlusion models," in IEEE Int. Conf. on Robotics and Automation, 2023.
253
+ [35] Y. Chen, P. Karkus, B. Ivanovic, X. Weng, and M. Pavone, "Tree-structured policy planning with learned behavior models," in IEEE Int. Conf. on Robotics and Automation, 2023.
254
+ [36] The Apache Software Foundation, "Apache arrow," 2023, Available at https://github.com/apache/arrow
255
+ [37] S. Gillies, C. van der Wel, J. Van den Bossche, M. W. Taves, J. Arnott, B. C. Ward, and others, "Shapely," 2023, Available at https://github.com/shapely/shapely.
256
+ [38] Google Inc., "Protocol buffers - google's data interchange format," 2023, Available at https://github.com/protocolBuffers/protobuf
257
+ [39] W. Zhan, L. Sun, D. Wang, H. Shi, A. Clausse, M. Naumann, J. Kümmerle, H. Königshof, C. Stiller, A. de La Fortelle, and M. Tomizuka, “INTERACTION Dataset: An international, adversarial and cooperative motion dataset in interactive driving scenarios with semantic maps,” 2019, Available at https://arxiv.org/abs/1910.03088.
258
+ [40] A. Robicquet, A. Sadeghian, A. Alahi, and S. Savarese, "Learning social etiquette: Human trajectory prediction in crowded scenes," in European Conf. on Computer Vision, 2016.
259
+ [41] G. Brockman, V. Cheung, L. Pettersson, J. Schneider, J. Schulman, J. Tang, and W. Zaremba. (2016) OpenAI Gym. Available at https://arxiv.org/abs/1606.01540.
260
+
261
+ [42] Bureau of Transportation Statistics, "National Transportation Statistics. Number of U.S. Aircraft, Vehicles, Vessels, and Other Conveyances," U.S. Dept. of Transportation, Tech. Rep., 2023.
262
+ [43] E. Tolstaya, R. Mahjourian, C. Downey, B. Varadarajan, B. Sapp, and D. Anguelov, "Identifying driver interactions via conditional behavior prediction," in IEEE Int. Conf. on Robotics and Automation, 2021.
263
+ [44] B. G. Simons-Morton, J. D. Ouimet, J. Wang, S. G. Klauer, S. E. Lee, and T. A. Dingus, “Hard braking events among novice teenage drivers by passenger characteristics,” Driving Assessment Conference, vol. 5, pp. 236-242, 6 2009. [Online]. Available: https://pubs.lib.uowa.edu/driving/article/id/28044/
264
+ [45] S. G. Klauer, T. A. Dingus, V. L. Neale, J. D. Sudweeks, and D. J. Ramsey, “Comparing real-world behaviors of drivers with high versus low rates of crashes and near-crashes,” National Highway Traffic Safety Administration, Tech. Rep. DOT HS 811 091, 2009.
trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6df49247ebbc33c7359ae595b489e33ac7d8790688672c95d9dae9f3b903593d
3
+ size 429372
trajdataaunifiedinterfacetomultiplehumantrajectorydatasets/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74a3476cf73a0cf9c86835a6d3d9332de7ce092a31e9408671b656562351ceb1
3
+ size 297652
whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/87c002e3-bc86-4340-8296-e744e39e0d3b_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03d4a8246da1e58975a1700fc7a2193756543a7833a819655a2418764dbd29ff
3
+ size 82804
whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/87c002e3-bc86-4340-8296-e744e39e0d3b_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2336bc8226eba7108103d459ae5c00ffbb8f6b60fdfb2f608bf4e992937d1c0a
3
+ size 102818
whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/87c002e3-bc86-4340-8296-e744e39e0d3b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6084830379102115d0beda4586ee07ee64645ec84175aac01811e3014a102d6
3
+ size 3860189
whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/full.md ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # "Why Not Looking backward?" A Robust Two-Step Method to Automatically Terminate Bayesian Optimization
2
+
3
+ Shuang Li
4
+
5
+ Control and Simulation Center, Harbin Institute of Technology, China. National Key Laboratory of Modeling and Simulation for Complex Systems, China. ShuangLi.hit@outlook.com
6
+
7
+ Ke Li
8
+
9
+ Department of Computer Science University of Exeter, EX4 4RN, Exeter, UK. k.li@exeter.ac.uk
10
+
11
+ Wei Li*
12
+
13
+ Control and Simulation Center, Harbin Institute of Technology, China. National Key Laboratory of Modeling and Simulation for Complex Systems, China. frank@hit.edu.cn
14
+
15
+ # Abstract
16
+
17
+ Bayesian Optimization (BO) is a powerful method for tackling expensive black-box optimization problems. As a sequential model-based optimization strategy, BO iteratively explores promising solutions until a predetermined budget, either iterations or time, is exhausted. The decision on when to terminate BO significantly influences both the quality of solutions and its computational efficiency. In this paper, we propose a simple, yet theoretically grounded, two-step method for automatically terminating BO. Our core concept is to proactively identify if the search is within a convex region by examining previously observed samples. BO is halted once the local regret within this convex region falls below a predetermined threshold. To enhance numerical stability, we propose an approximation method for calculating the termination indicator by solving a bilevel optimization problem. We conduct extensive empirical studies on diverse benchmark problems, including synthetic functions, reinforcement learning, and hyperparameter optimization. Experimental results demonstrate that our proposed method saves up to $\approx 80\%$ computational budget yet is with an order of magnitude smaller performance degradation, comparing against the other peer methods. In addition, our proposed termination method is robust in terms of the setting of its termination criterion.
18
+
19
+ # 1 Introduction
20
+
21
+ "Nature does not hurry, yet everything is accomplished." — Lao Tzu
22
+
23
+ In this paper, we consider the black-box optimization problem (BBOP) defined as follows:
24
+
25
+ $$
26
+ \underset {\mathbf {x} \in \Omega} {\text {m a x i m i z e}} f (\mathbf {x}), \tag {1}
27
+ $$
28
+
29
+ ![](images/cca390747e4818979f818e127f5cb294581998ca0a160c5b4c8f3e1633e9cd5a.jpg)
30
+ Figure 1: Trajectories of termination criteria used in [22], [28] and [24] on Ackley and Levy function where $n = 1$ . Results are collected from 21 independent runs of vanilla BO while the mean value of termination indicator of each termination criterion is plotted as the solid line associated with the confidence interval. Please refer to Section 3.2 for a description of these termination criteria, as well as the meaning of $\kappa_{\mathrm{PI}}$ , $\kappa_{\mathrm{EI}}$ and $\kappa_{\mathrm{diff}}$ .
31
+
32
+ ![](images/055402a32f50bc49d312e1f13f4dc45a03de4e99fbb318d34b5bf6f541ca3034.jpg)
33
+
34
+ ![](images/9b706e645f9444d421cca143f180ca3068ac6fa481586cdee59722a6e48405a4.jpg)
35
+
36
+ where $\mathbf{x} = (x_{1},\dots ,x_{n})^{\top}$ is a decision vector (variable), $\Omega = [x_i^{\mathrm{L}},x_i^{\mathrm{U}}]_{i = 1}^n\subset \mathbb{R}^n$ represents the search space, and $f:\Omega \to \mathbb{R}$ corresponds to the attainable set in the objective space. In real-world scenarios, function evaluations (FEs) of $f(\mathbf{x})$ can be costly, giving rise to expensive BBOPs. Bayesian optimization (BO) has emerged as one of the most effective methods for addressing expensive BBOPs. BO is a sequential model-based optimization technique consisting of two iterative steps: i) employing limited expensive FEs to construct a surrogate model of the physical objective function, such as a Gaussian process (GP) model [35]; and ii) selecting the next point of interest for costly FE by optimizing an acquisition function, e.g., probability of improvement (PI) [18], expected improvement (EI) [16], and upper confidence bound (UCB) [31]. Numerous theoretical and methodological advancements have been made in BO. Interested readers can refer to comprehensive survey papers [29, 11] and a recent textbook [13] for further information.
37
+
38
+ Nevertheless, the question of when to terminate the search process of BO remains a largely underexplored area in the literature. At present, the most prevalent termination criterion is a pre-specified budget, such as the number of FEs or wall-clock time. Though intuitive, this approach neglects the search dynamics inherent to different BBOPs. As a result, this strategy is rigid while it does not offer a general rule for determining an appropriate budget across various problem settings. If the budget is too small, BO may terminate prematurely, yielding a suboptimal solution. On the contrary, an excessive budget may lead to wasted computational resources. Another simple termination method involves stopping BO if the current best solution remains unchanged for a predetermined number of consecutive FEs. However, as highlighted by [24], this strategy also fails to consider the observed data during the sequential model-based optimization process and relies on a pre-defined threshold.
39
+
40
+ Beyond the aforementioned 'naive' approaches, a limited number of dedicated efforts have been made to address the termination of BO. One notable method involves monitoring the progress of BO by termination indicators, such as the maximum of EI [28, 16] or PI [22]. In this approach, BO is terminated when the corresponding termination indicator falls below a pre-specified threshold. Very recently, Makarova et al. proposed using the difference between the minimal of the lower confidence bound (LCB) and UCB as the termination indicator. As illustrated in Figure 1, we observe that all criteria used in these termination approaches exhibit significant oscillation during the optimization process. This can be attributed to: $i$ ) the stochastic nature of BO itself, and $ii$ ) numerical errors arising from the non-convex optimization of acquisition functions. Furthermore, as shown in Figures 1(a) and (b), the variation range of the same criterion can differ substantially when addressing problems with distinct fitness landscapes. These factors make determining a universally applicable threshold in practice challenging, resulting in fragile and less intuitive termination criteria compared to simply establishing a budget. Additionally, we find that these termination criteria are 'myopic', as decision-making is based solely on the observations at the current step, leading to a lagged termination. For instance, consider the selected samples shown in Figure 2; it is difficult, if not impossible, to determine when to terminate BO until $t = 20$ . However, if we look backward to $t = 5$ , it becomes evident that BO is likely to converge by $t = 10$ .
41
+
42
+ Our contributions. In light of the aforementioned challenges, this paper proposes a novel termination method for BO that proactively detects whether the search is located in a convex region of $-f(\mathbf{x})$ by examining previously observed samples. BO is terminated if the local regret within this convex region falls below a predetermined threshold. To improve numerical stability, we introduce an approx-
43
+
44
+ ![](images/54e951de7acdf4f3efffffa3de7ad7c579ef44e0118d05e598409da2e671d584.jpg)
45
+ Figure 2: Search dynamics of vanilla BO on the Ackley function $(n = 1)$ at different time steps after the initialization. In particular, $t = 5$ indicates five new samples are collected after the initialization.
46
+
47
+ ![](images/5450dca40eeac4045ff6595d4b782ad938216e07e8003b2df24c8c6f182eb107.jpg)
48
+
49
+ ![](images/dbf4f47bb03a3036b9bf87085c37a594b0a7b8e673d03f059d3b4441c2b7d680.jpg)
50
+
51
+ imation method for calculating the termination indicator by solving a bilevel optimization problem. Our proposed termination method is simple, yet it offers theoretical guarantees. To demonstrate its effectiveness, we compare the performance of our proposed method against four peer methods on a variety of benchmark problems, encompassing synthetic functions, reinforcement learning, and hyperparameter optimization.
52
+
53
+ # 2 Proposed Method
54
+
55
+ This section starts with a gentle tutorial of vanilla BO. Then, we delineate the implementation of our proposed termination method, followed by a theoretical analysis at the end.
56
+
57
+ # 2.1 Vanilla Bayesian Optimization
58
+
59
+ As a gradient-free optimization method, BO comprises two major steps. The first step involves constructing a surrogate model based on GP to approximate the expensive objective function. Given a set of training data $\mathcal{D} = \{\langle \mathbf{x}^i,f(\mathbf{x}^i)\rangle \}_{i = 1}^N$ , GP learns a latent function $g(\mathbf{x})$ , such that $\forall \mathbf{x}\in \mathcal{D}$ , we have $f(\mathbf{x}) = g(\mathbf{x}) + \epsilon$ , where $\epsilon \sim \mathcal{N}(0,\sigma_{\epsilon}^{2})$ is an i.i.d. Gaussian noise. For each testing input vector $\mathbf{z}^{*}\in \Omega$ , the mean and variance of the target $f(\mathbf{z}^{*})$ are predicted as follows:
60
+
61
+ $$
62
+ \mu (\mathbf {z} ^ {*}) = \mathbf {k} ^ {* ^ {\top}} (K + \sigma_ {\epsilon} ^ {2} I) ^ {- 1} \mathbf {f},
63
+ $$
64
+
65
+ $$
66
+ \sigma^ {2} \left(\mathbf {z} ^ {*}\right) = \mathbf {k} \left(\mathbf {z} ^ {*}, \mathbf {z} ^ {*}\right) - \mathbf {k} ^ {* ^ {\top}} \left(K + \sigma_ {\epsilon} ^ {2} I\right) ^ {- 1} \mathbf {k} ^ {*}, \tag {2}
67
+ $$
68
+
69
+ where $X = (\mathbf{x}^1, \dots, \mathbf{x}^N)^\top$ and $\mathbf{f} = (f(\mathbf{x}^1), \dots, f(\mathbf{x}^N))^\top$ . $\mathbf{k}^*$ is the covariance vector between $X$ and $\mathbf{z}^*$ , and $K$ is the covariance matrix of $X$ . In this paper, we use the Matérn 5/2 kernel as the covariance function to measure the similarity between a pair of data points. The second step consists of an infill criterion based on the optimization of an acquisition function, which determines the next point of merit $\tilde{\mathbf{x}}^*$ to be evaluated by the actual expensive objective function:
70
+
71
+ $$
72
+ \tilde {\mathbf {x}} ^ {*} = \underset {\mathbf {x} \in \Omega} {\operatorname {a r g m a x}} f ^ {\mathrm {a c q}} (\mathbf {x}). \tag {3}
73
+ $$
74
+
75
+ where $f^{\mathrm{acq}}(\mathbf{x}) = \mu (\mathbf{x}) + \omega \sigma (\mathbf{x})$ is the widely used UCB [31] to facilitate our theoretical analysis. Specifically, the parameter $\omega >0$ , determined according to the confidence level set as 0.95 in this paper, controls the trade-off between exploration and exploitation. Subsequently, the next point of merit $\tilde{\mathbf{x}}^{*}$ is used to update the training dataset as $\mathcal{D} = \mathcal{D}\bigcup \{\tilde{\mathbf{x}}^{*}\}$ , and BO iterates between the two aforementioned steps sequentially until a termination criterion is met. The convergence of BO can be evaluated by regret:
76
+
77
+ $$
78
+ r = f \left(\mathbf {x} ^ {\star}\right) - f \left(\tilde {\mathbf {x}} ^ {\star}\right), \tag {4}
79
+ $$
80
+
81
+ where $\mathbf{x}^{\star}$ represents the ground truth global optimum and $\tilde{\mathbf{x}}^{\star} = \operatorname*{argmax}_{\mathbf{x}\in \mathcal{D}}f(\mathbf{x})$ denotes the current best-found solution.
82
+
83
+ # 2.2 Proposed Termination Criterion
84
+
85
+ Inspired by the observations illustrated in Figure 2, we propose a termination method that involves 'looking back' at the last $\tau > 1$ observed points in the dataset $\mathcal{D}$ , and storing these in a temporary archive, denoted as $\tilde{\mathcal{D}}$ . The termination criterion we propose is predicated on two primary conditions.
86
+
87
+ Condition 1. The BO search process is deemed to have converged within a convex hull $\tilde{\Omega}$ if the following condition is satisfied:
88
+
89
+ $$
90
+ \sum_ {j = 1} ^ {\binom {\tau + 1} {2}} \mathbb {1} \left(\mu \left(\frac {\mathbf {x} + \mathbf {x} ^ {\prime}}{2}\right) \geq \frac {f (\mathbf {x}) + f \left(\mathbf {x} ^ {\prime}\right)}{2}\right) = \binom {\tau + 1} {2}, \tag {5}
91
+ $$
92
+
93
+ where $\mathbb{1}(\cdot)$ denotes the indicator function, returning 1 if the argument holds true and 0 otherwise. $\mathbf{x}$ and $\mathbf{x}'$ are points selected randomly and distinctively from $\tilde{\mathcal{D}}$ . The convex hull, $\tilde{\Omega} = [\tilde{x}_i^{\mathrm{L}},\tilde{x}_i^{\mathrm{U}}]_{i = 1}^n$ , is a subset of $\Omega$ , where $\tilde{x}_i^{\mathrm{L}} = \operatorname*{argmin}_{\mathbf{x}\in \tilde{\mathcal{D}}}x_i$ and $\tilde{x}_i^{\mathrm{U}} = \operatorname*{argmax}_{\mathbf{x}\in \tilde{\mathcal{D}}}x_i$ .
94
+
95
+ Condition 2. Assuming Condition 1 is satisfied, and $\tilde{\mathbf{x}}$ denotes the most recently observed point in $\mathcal{D}$ , we calculate the local regret $\tilde{r}$ as follows:
96
+
97
+ $$
98
+ \tilde {r} = \mu (\dot {\mathbf {x}}) - \mu (\tilde {\mathbf {x}}) + \omega (\sigma (\dot {\mathbf {x}}) + \sigma (\tilde {\mathbf {x}})), \tag {6}
99
+ $$
100
+
101
+ where $\dot{\mathbf{x}} = \operatorname*{argmax}_{\mathbf{x}\in \tilde{\Omega}}\mu (\mathbf{x})$ and $\ddot{\mathbf{x}} = \operatorname*{argmax}_{\mathbf{x}\in \tilde{\Omega}}\sigma^2 (\mathbf{x})$ . The BO process terminates if the following inequality is satisfied:
102
+
103
+ $$
104
+ \frac {\tilde {r}}{\omega \sigma_ {\epsilon}} \leq \eta_ {\mathrm {l b}}, \tag {7}
105
+ $$
106
+
107
+ where $\frac{\tilde{r}}{\omega\sigma_{e}}$ is used as the termination indicator, denoted as $\kappa_{\mathrm{lb}}$ , and $\eta_{\mathrm{lb}}$ is a predetermined threshold.
108
+
109
+ Remark 1. The inequality within the indicator function $\mathbb{1}(\cdot)$ in equation (5) is derived from Jensen's inequality [4], which yields a convex function:
110
+
111
+ $$
112
+ - f \left(\alpha \mathbf {x} + (1 - \alpha) \mathbf {x} ^ {\prime}\right) \leq - \alpha f (\mathbf {x}) - (1 - \alpha) f \left(\mathbf {x} ^ {\prime}\right), \tag {8}
113
+ $$
114
+
115
+ where $\alpha \in [0,1]$ and $\mathbf{x},\mathbf{x}' \in \tilde{\Omega}$ . In order to avoid the necessity of additional function evaluations when computing $f\left(\frac{\mathbf{x} + \mathbf{x}'}{2}\right)$ , we substitute $\mu\left(\frac{\mathbf{x} + \mathbf{x}'}{2}\right)$ into equation (5).
116
+
117
+ Remark 2. In equation (6), we employ the widely-used L-BFGS algorithm [6] to compute $\dot{\mathbf{x}}$ and $\ddot{\mathbf{x}}$ . To ensure numerical stability, we suggest the following strategies for initializing the algorithm and defining its termination criterion:
118
+
119
+ 1. For $\dot{\mathbf{x}}$ , $L$ -BFGS is initialized at a point randomly selected from $\tilde{\Omega}$ . The algorithm terminates when $\| \bigtriangledown \mu (\mathbf{x})\| _2\leq \lambda$ . In our work, we set $\lambda = 10^{-6}$ , following Proposition 1.
120
+ 2. For $\ddot{\mathbf{x}}$ , $L$ -BFGS is initialized at the point $\underset {\mathbf{x}\in \tilde{\Omega}}{\operatorname{argmax}}\underline{\sigma}^2 (\mathbf{x})$ , where $\underline{\sigma}^2 (\mathbf{x})$ denotes the lower bound of $\sigma^2 (\mathbf{x})$ over $\tilde{\Omega}$ . The termination criterion is $\| \bigtriangledown \sigma^2 (\mathbf{x})\| _2\leq \lambda$ , as per Proposition 2.
121
+
122
+ Remark 3. Considering equation (7), given that $\frac{\mu(\dot{\mathbf{x}}) - \mu(\ddot{\mathbf{x}})}{\omega\sigma_{\epsilon}}\geq 0$ and $\frac{\sigma(\dot{\mathbf{x}}) + \sigma(\ddot{\mathbf{x}})}{\sigma_{\epsilon}}\geq 2$ , we deduce that $\eta_{\mathrm{lb}}\geq 2$ . The upper bound of $\eta_{\mathrm{lb}}$ is empirically determined, as detailed in Section 4.1.
123
+
124
+ Remark 4. When the GP model is overfitting, BO tends to converge within the local region of the current best solution. In this case, both Condition 1 and Condition 2 are easily met while BO will be terminated prematurely. On the other hand, when the model is underfitting, BO will explore $\Omega$ in a random manner. In this case, satisfying Condition 1 becomes challenging, and BO will face the risk of failing to be terminated. Therefore, we designed three mitigation strategies: 1) restrict the lengthscale to [0.05, 200] during GP training to prevent lengthscales from becoming excessively large or small; 2) normalize the input of training data to [0, 1]; and 3) standardize the output of the training data by centering it on the mean and scaling it by the variance.
125
+
126
+ Proposition 1. Consider $\forall \mathbf{x} \in \tilde{\Omega}$ , where $-\mu(\mathbf{x})$ represents a convex function. If $\|\nabla \mu(\mathbf{x})\|_2 \leq \lambda$ , we can establish:
127
+
128
+ $$
129
+ \mu (\dot {\mathbf {x}}) - \mu (\mathbf {x}) \leq \xi , \tag {9}
130
+ $$
131
+
132
+ where $\lambda = (2m_1\xi)^{1 / 2}$ , $\xi$ is a positive constant, and $m_{1}$ denotes the strong convexity parameter of $-\mu (\mathbf{x})$ [4].
133
+
134
+ Lemma 1. Assume the $GP$ employs a stationary kernel $k(\cdot, \cdot)$ . For $\forall \mathbf{x} \in \tilde{\Omega}$ , the lower bound of $\sigma^2(\mathbf{x})$ is given by:
135
+
136
+ $$
137
+ \underline {{\sigma}} ^ {2} (\mathbf {x}) = k (\mathbf {x}, \mathbf {x}) + c \sum_ {i = 1} ^ {| \mathcal {D} |} k ^ {2} \left(\mathbf {x}, \mathbf {x} ^ {i}\right), \tag {10}
138
+ $$
139
+
140
+ where $c < 0$ is a constant and $\mathbf{x}^i\in \mathcal{D}$ for $i\in \{1,\dots ,|\mathcal{D}|\}$ .
141
+
142
+ Lemma 2. Given Lemma 1, determining $\underset{\mathbf{x} \in \tilde{\Omega}}{\operatorname{argmax}} \underline{\sigma}^2(\mathbf{x})$ is equivalent to solving the following bilevel optimization problem:
143
+
144
+ $$
145
+ \begin{array}{l} \underset {\mathbf {x} \in \tilde {\Omega}} {\mathrm {m i n i m i z e}} d (\mathbf {x}, \mathbf {x} ^ {1}, \mathbf {x} ^ {2}) = \| \mathbf {x} - \mathbf {x} ^ {1} \| _ {2} ^ {2} + \| \mathbf {x} - \mathbf {x} ^ {2} \| _ {2} ^ {2} \\ \text {s u b j e c t} \quad \left\{\mathbf {x} ^ {1}, \mathbf {x} ^ {2} \right\} \quad = \quad \operatorname {a r g m a x} \quad \| \mathbf {x} ^ {1} - \mathbf {x} ^ {2} \| _ {2} ^ {2}, \tag {11} \\ \mathbf {x} ^ {1}, \mathbf {x} ^ {2} \in \mathcal {D} \cap \tilde {\Omega} \\ \mathbf {x} ^ {1} \neq \mathbf {x} ^ {2}, \hat {\Omega} \cap \mathcal {D} = \emptyset \\ \end{array}
146
+ $$
147
+
148
+ where $\hat{\Omega} = [\hat{x}_i^{\mathrm{L}},\hat{x}_i^{\mathrm{U}}]_{i = 1}^n\subset \tilde{\Omega}$ , $\hat{x}_i^{\mathrm{L}} = \min (x_i^1,x_i^2)$ and $\hat{x}_i^{\mathrm{U}} = \max (x_i^1,x_i^2)$ . Given that the lower-level optimization can be addressed via exhaustive search, the analytical solution of (11) is given by $\hat{\mathbf{x}} = (\hat{x}_1^{\mathrm{L}} + \frac{\hat{x}_1^{\mathrm{U}} - \hat{x}_1^{\mathrm{L}}}{2},\dots ,\hat{x}_n^{\mathrm{L}} + \frac{\hat{x}_n^{\mathrm{U}} - \hat{x}_n^{\mathrm{L}}}{2})^\top$
149
+
150
+ Proposition 2. Leveraging Lemma 2, suppose $\underset{\mathbf{x} \in \bar{\Omega}}{\mathrm{minimize}} - \sigma^2(\mathbf{x})$ exhibits convexity in its local optimal regions, the following inequality is satisfied when $\| \nabla \sigma^2(\mathbf{x}) \|_2 \leq \lambda$ :
151
+
152
+ $$
153
+ \sigma^ {2} (\ddot {\mathbf {x}}) - \sigma^ {2} (\mathbf {x}) \leq \beta + \xi , \tag {12}
154
+ $$
155
+
156
+ where $\lambda = (2m_2\xi)^{1 / 2}$ , $\xi >0$ , $m_{2} > 0$ represents the strong convexity parameter of $-\sigma^2 (\mathbf{x})$ in its local optimal regions [4], and $\beta$ is constrained by $0\leq \beta \leq \sigma^{2}(\ddot{\mathbf{x}}) - \sigma^{2}(\hat{\mathbf{x}})$ .
157
+
158
+ # 2.3 Theoretical Analysis of the Proposed Termination Criterion
159
+
160
+ In this subsection, we delve into the theoretical underpinnings of the proposed termination method, focusing on the convergence of BO when the UCB is utilized as the acquisition function.
161
+
162
+ Lemma 3. As per Srinivas et al., the optimization process in BO can be conceptualized as a sampling process from a GP. Hence, for any $\mathbf{x} \in \Omega$ , we have:
163
+
164
+ $$
165
+ \Pr \left(| f (\mathbf {x}) - \mu (\mathbf {x}) | \leq \omega \sigma (\mathbf {x})\right) > \delta , \tag {13}
166
+ $$
167
+
168
+ where $\delta > 0$ signifies the confidence level adhered to by the UCB.
169
+
170
+ Corollary 1. Based on Lemma 3 and Condition 2, we deduce that:
171
+
172
+ $$
173
+ \Pr \left(f ^ {\mathrm {a c q}} \left(\tilde {\mathbf {x}} ^ {\star}\right) + \varepsilon \geq f \left(\mathbf {x} ^ {\star}\right)\right) > \delta , \tag {14}
174
+ $$
175
+
176
+ where $\varepsilon$ is a numerical error when optimizing the acquisition function, $\tilde{\mathbf{x}}^{\star} = \underset {\mathbf{x}\in \Omega}{\mathrm{argmax}}f^{\mathrm{acq}}(\mathbf{x})$ , and $\mathbf{x}^{\star}$ represents the true global optimum. Furthermore,
177
+
178
+ $$
179
+ 0 \leq \varepsilon \leq \mu (\dot {\mathbf {x}}) + \omega \sigma (\ddot {\mathbf {x}}) - f ^ {\mathrm {a c q}} (\tilde {\mathbf {x}} ^ {\star}), \tag {15}
180
+ $$
181
+
182
+ where $\dot{\mathbf{x}},\ddot{\mathbf{x}}$ and $\tilde{\mathbf{x}}^{\star}$ are elements of $\tilde{\Omega}$ , while $\delta >0$ denotes the confidence level of the UCB.
183
+
184
+ Theorem 1. Leveraging Corollary 1, when employing the termination method proposed in this paper, we deduce that the global regret bound of BO as:
185
+
186
+ $$
187
+ \Pr (r \leq 2 \omega \sigma (\tilde {\mathbf {x}} ^ {\star}) + \varepsilon) > \delta , \tag {16}
188
+ $$
189
+
190
+ where $\delta > 0$ signifies the confidence level associated with the UCB.
191
+
192
+ Theorem 2. Building upon Condition 1 and Condition 2, and employing the termination method proposed in this paper, we establish the local regret bound of BO as:
193
+
194
+ $$
195
+ \Pr (f (\mathbf {x} ^ {\star}) - f (\mathbf {x}) \leq \tilde {r}) > \delta , \tag {17}
196
+ $$
197
+
198
+ where $\mathbf{x} \in \tilde{\Omega}$ , $\mathbf{x}^{\star}$ denotes the true global optimum in $\tilde{\Omega}$ , and $\delta > 0$ is the confidence level of the UCB.
199
+
200
+ Remark 5. Drawing from Theorem 1 and Theorem 2, we observe that if $\varepsilon$ can be considered negligible when $\tilde{\mathbf{x}}^{\star}$ is accurately determined by optimizing the UCB, $\tilde{r}$ subsequently represents the upper bound of BO regret within the domain $\Omega$ . Conversely, if $\varepsilon$ cannot be disregarded, $\tilde{r}$ is posited as the upper bound of BO regret within the restricted domain $\tilde{\Omega}$ .
201
+
202
+ # 3 Experimental Settings
203
+
204
+ In this section, we present the experimental setup for our empirical study, which encompasses the benchmark test problems, the peer algorithms, and the performance metrics used for evaluation.
205
+
206
+ # 3.1 Benchmark Problems
207
+
208
+ We evaluate the performance of our proposed method on three types of benchmark problems.
209
+
210
+ - Synthetic functions: We consider Ackley, Levy, and Schwefel functions [33] with $n \in \{2, 5, 10\}$ . The objective function $f(\mathbf{x})$ is contaminated by Gaussian noise $\zeta \sim \mathcal{N}(0.0, 0.2)$ . The maximal number of FEs is set to $N_{\mathrm{FE}} = 50n$ , with $5n$ allocated to initialization.
211
+ - Reinforcement learning (RL): We examine two RL tasks chosen from OpenAI Gym [5]: Lunar Lander with $n = 12$ and Swimmer with $n = 16$ . We set $N_{\mathrm{FE}} = 50n$ , with $5n$ FEs allocated to initialization.
212
+ - Hyperparameter optimization (HPO): We consider 5 HPO tasks picked up from the HPOBench [9] for tuning support vector machine (SVM) with $n = 2$ , multi-layer perceptron (MLP) with $n = 5$ , random forest with $n = 4$ and XGBoost with $n = 8$ . The computational budget is set the same as in the RL tasks.
213
+
214
+ Note that, due to the use of termination criteria, it may not be necessary to exhaust the entire allocated computational budget to terminate BO. To ensure statistical significance, each experiment is independently conducted 21 times with different random seeds.
215
+
216
+ # 3.2 Peer Algorithms
217
+
218
+ As discussed in Section 1, the termination criterion for BO is an understudied topic in the literature. In our experiments, we compare our proposed method with the following four termination methods.
219
+
220
+ - Naive method: This method ceases BO when $\tilde{\mathbf{x}}^{\star}$ stays unchanged for $\kappa_{\mathrm{n}}$ consecutive iterations. Here, $\kappa_{\mathrm{n}}$ is also the termination indicator. In our experiments, we test three settings of the thresholds $\eta_{\mathrm{n}}$ as 150, 337 and 524, respectively.
221
+ - Nguyen's method [28]: In each iteration of BO, the optimization of acquisition function produces the current optimal EI. By using this as the termination indicator, denoted as $\kappa_{\mathrm{EI}}$ , the Nguyen's method terminates BO when it falls below a predetermined threshold $\eta_{\mathrm{EI}}$ . In our experiments, we consider three settings of $\eta_{\mathrm{EI}}$ as 0.01, 0.04 and 0.06, respectively.
222
+ - Lorenz's method [22]: Analogous to the Nguyen's method, the Lorenz's method replaces the EI with PI as the termination indicator, denoted as $\kappa_{\mathrm{PI}}$ . In our experiments, the termination threshold $\eta_{\mathrm{PI}}$ is set as 0.07, 0.2 and 0.33, respectively.
223
+ - Makarova's method [24]: Similar to the previous two methods, the Makarova's method uses the difference between the lower and upper confidence bounds as the termination indicator, denoted as $\kappa_{\mathrm{diff}}$ . It terminates BO when $\kappa_{\mathrm{diff}} \leq \eta_{\mathrm{diff}}$ , a predetermined threshold and is set as 0.26, 0.62 and 0.97, respectively, in our experiments.
224
+ - Our proposed method: According to Condition 1 and Condition 2, our proposed method terminates BO when $\kappa_{\mathrm{lb}}$ falls below a predetermined threshold $\eta_{\mathrm{lb}}$ , which is set as 2.02, 2.05 and 2.08, respectively. Furthermore, we introduce a hyperparameter $\tau$ to control the number of observed samples being looked backward, which is set to $\tau = 10$ in our experiments. The code is available at https://github.com/COLA-Laboratory/OptimalStoping_NeurIPS2023.
225
+
226
+ According to the aforementioned settings, it is evident that the naive method tends to delay termination when a large $\eta_{\mathrm{n}}$ is used. On the other hand, other methods may incur a delayed termination if a small threshold is used. Note that the choices of the corresponding termination thresholds and the sensitivity of $\tau$ are empirically examined in Sections 4.1 and 4.2.
227
+
228
+ # 3.3 Performance Metrics
229
+
230
+ In our experiments, we consider the following three performance metrics to measure the effectiveness of a termination method.
231
+
232
+ - Empirical cumulative probability of a termination indicator:
233
+
234
+ $$
235
+ I _ {\mathrm {c d f}} = \frac {1}{N _ {\mathrm {F E}} \times 2 1} \sum_ {i = 0} ^ {N _ {\mathrm {F E}} \times 2 1} \mathbb {1} (\kappa \leq \tilde {\kappa} _ {i}), \tag {18}
236
+ $$
237
+
238
+ where $\tilde{\kappa}_i = \underline{\kappa} +\frac{(\bar{\kappa} - \underline{\kappa})\times i}{N_{\mathrm{FE}}\times 21}$ , and $i\in \{0,\dots ,N_{\mathrm{FE}}\times 21\}$ . For a given termination method, $\kappa$ represents its termination indicator as outlined in Section 3.2. The minimum and maximum
239
+
240
+ ![](images/3521c2a70ace1324464c86e5f412f005e573607b42392c5e92836bfaaa617b74.jpg)
241
+ Figure 3: Trajectories of $\mathrm{I}_{\mathrm{cdf}}$ collected on different benchmark problems. Here we only show some results without loss of generality, while full results can be found in the supplementary document. Different subplots are (a) our proposed method, (b) Naive method, (c) Nguyen's method, (d) Lorenz's method, and (e) Makarova's method, respectively.
242
+
243
+ ![](images/c27221ff8575cd340714eb9b36f57101e980a3985bff754d6fda52f8dab7da58.jpg)
244
+ Figure 4: Bar charts with error bars of normalized $\tilde{\kappa}_i$ for different termination methods when $\mathrm{I}_{\mathrm{cdf}}$ is set as 0.05, 0.1, 0.2, 0.3, 0.4, and 0.5 respectively.
245
+
246
+ values of $\kappa$ , represented by $\underline{\kappa}$ and $\bar{\kappa}$ respectively, are determined across all 21 repeated experiments on each benchmark problem. If $\mathrm{I}_{\mathrm{cdf}}$ exhibits consistency across a range of benchmark problems, it implies that the threshold choice for the corresponding termination method is consistent and not dependent on the specific problem.
247
+
248
+ - The relative computational cost:
249
+
250
+ $$
251
+ I _ {\text {c o s t}} = \frac {\tilde {N} _ {\mathrm {F E}}}{N _ {\mathrm {F E}}}, \tag {19}
252
+ $$
253
+
254
+ where $\tilde{N}_{\mathrm{FE}}$ is the number of FEs used by a termination criterion when early stopping occurs. A lower value of $\mathrm{I_{cost}}$ indicates a higher degree of computational budget saving.
255
+
256
+ - The relative performance degradation incurred by early stopping:
257
+
258
+ $$
259
+ I _ {\text {p e r f}} = \frac {f (\bar {\mathbf {x}}) - f (\tilde {\mathbf {x}} ^ {\star})}{f (\bar {\mathbf {x}}) - f (\underline {{\mathbf {x}}})}, \tag {20}
260
+ $$
261
+
262
+ where $\bar{\mathbf{x}}$ and $\underline{\mathbf{x}}$ are the best and the worst solutions found by BO when consuming all $N_{\mathrm{FE}}$ FEs. $\tilde{\mathbf{x}}^{\star}$ signifies the best solution found when early stopping is prompted by a termination criterion. A smaller $\mathrm{I}_{\mathrm{perf}}$ value indicates less performance degradation resulting from the application of the corresponding termination criterion.
263
+
264
+ # 4 Empirical Studies
265
+
266
+ In this section, our experiments<sup>2</sup> aim to investigate three aspects: $i$ ) the robustness of the termination threshold for different termination methods; $ii$ ) the trade-off between the computational budget saving versus the performance degradation; and $iii$ ) the sensitivity of $\tau$ in our proposed termination method.
267
+
268
+ # 4.1 Robustness of the Selection of Termination Threshold
269
+
270
+ In this subsection, we use the $\mathrm{I}_{\mathrm{cdf}}$ metric to scrutinize the threshold choice of various termination methods across different problems. As per equation (18), it is evident that $\mathrm{I}_{\mathrm{cdf}} \propto \tilde{\kappa}_i$ . As discussed earlier in Section 3.2, a large $\tilde{\kappa}_i$ can lead to premature early stopping. Consequently, we confine our analysis to instances where $\mathrm{I}_{\mathrm{cdf}} \leq 0.5$ . As shown in Figure 3, the trajectories of $\mathrm{I}_{\mathrm{cdf}}$ for our proposed method appear to converge, whereas those for the other methods diverge with different magnitudes. More specifically, as shown in Figure 3(a), $\tilde{\kappa}_i = 2$ can be regarded as a transition point where $\mathrm{I}_{\mathrm{cdf}} \geq 0.95$ if $\tilde{\kappa}_i \geq 2$ . This empirical observation corroborates the theoretical result derived
271
+
272
+ ![](images/e42c8729e2e08689058f3aa9380a6be7f780f263178b22e8628061e30cfb0b99.jpg)
273
+ (a) Proposed method
274
+
275
+ ![](images/552c467d2ba9f6b01d1dc9af23ec945fa9e56e4eac421b5d02ad13a4cd832454.jpg)
276
+ Number of FEs
277
+ (b) Naive method
278
+
279
+ ![](images/0b25f34a8d0036eb0e42f3e8174283637e6bcd40bf3afd63317c8194728db16c.jpg)
280
+ (c) Nguyen's method
281
+
282
+ ![](images/4202472798bcd6030bbb8999fb4216a3393b31560129a51e474970f9e7445924.jpg)
283
+ (d) Lorenz's method
284
+
285
+ ![](images/b2a95682428bb6955e894591637823f5f3cec72ce6c3886cdef8a3e5f07403cc.jpg)
286
+ (e) Makarova's method
287
+
288
+ ![](images/728c059787c7150325a0ffca31cccdda18e93726f178e1c8ac3e64b3df877c06.jpg)
289
+ Figure 5: Trajectories of different termination indicators versus the number of FEs during the BO process on Ackley $(n = 2)$ and HPO for SVM.
290
+ (a)
291
+
292
+ ![](images/6339c62c9c37cc70294b6f393c1671249c31c7d26b4730576da86ee1d390afad.jpg)
293
+ (a)
294
+ (b)
295
+
296
+ (d) (e)
297
+ Figure 6: Bar charts with error bars of $\mathrm{I}_{\mathrm{cost}}$ and $\mathrm{I}_{\mathrm{perf}}$ obtained by using different settings of termination threshold suggested in Section 3.2, denoted as $\eta_1$ , $\eta_2$ and $\eta_3$ respectively. Subplots (a) to (e) correspond to our proposed, Naïve, Nguyen's, Lorenz's, and Makarova's methods respectively.
298
+
299
+ ![](images/c7a726c35c3641f926cd8b9794256e850fc8f7efbeef9a66b911a4ee4670eb12.jpg)
300
+ Number of FEs
301
+
302
+ ![](images/8e2f27c851fcf41bb7c06244154e35340defec37dfc6d51ec7246f9709391ca5.jpg)
303
+ Number of FEs
304
+ Figure 7: Trajectories of the regret of BO versus the number of FEs during the BO process on five selected problems. Full results can be found in the supplementary document.
305
+
306
+ ![](images/190fa18be5e7a7280ba279bcd6b4fdc66e22a87697695132b7adde748bc79cce.jpg)
307
+ Number of FEs
308
+
309
+ ![](images/7e1cfd392ab0b7c5714cb15864f18e47dc9be4d44f1e2bf9b7f7efbd7bce850d.jpg)
310
+ Number of FEs
311
+
312
+ ![](images/2009ef236cc0e662dcc71add7d3f6faa3840d12f4d1e5a3969e3b15c89e829e3.jpg)
313
+ Number of FEs
314
+
315
+ in Condition 2. In contrast, there do not exist a consistent lower bound for the other termination methods. To further elucidate these observations, we plot the distributions of $\tilde{\kappa}_i$ when $\mathrm{I}_{\mathrm{cdf}}$ ranges from 0.05 to 0.5 in Figure 4. It is clear that the bar charts exhibit the least variation for our proposed method. For the naive method, $\tilde{\kappa}_i$ increases as $\mathrm{I}_{\mathrm{cdf}}$ grows. However, the bars for the other three methods show significant fluctuations, particularly for the Nguyen's and the Lorenz's methods. These observations are further substantiated by the trajectories of the termination indicators throughout the BO process, as shown in Figure 5. We present results for the Ackley and HPO for SVM problems here, while complete results are available in the supplementary document. These plots reveal that the trajectories for our proposed method converge to a certain threshold, while those for the other methods not only diverge but also differ significantly on different problems. Based on this discussion, we use $\mathrm{I}_{\mathrm{cdf}} = 0.05$ as the capping point to guide the selection of the termination threshold for different termination methods: $\eta_{\mathrm{lb}} \in [2, 2.1]$ , $\eta_{\mathrm{n}} \in [57, 617]$ , $\eta_{\mathrm{EI}} \in [3.8 \times 10^{-24}, 0.08]$ , $\eta_{\mathrm{PI}} \in [2 \times 10^{-21}, 0.39]$ , $\eta_{\mathrm{diff}} \in [0.09, 1.15]$ . In our experiments, we apply the Latin hypercube design method [26] to choose three settings as listed in Section 3.2.
316
+
317
+ # 4.2 Computational budget saving versus performance degradation
318
+
319
+ There is a trade-off when early terminating BO, i.e., the performance of BO can be compromised when using less FEs. In this subsection, we employ $\mathrm{I}_{\mathrm{cost}}$ and $\mathrm{I}_{\mathrm{perf}}$ to characterize such trade-off. From the comparison results shown in Figure 6 and Table 1, we can see that although the naïve method achieves the best $\mathrm{I}_{\mathrm{perf}}$ , it consumes almost all FEs. In contrast, our proposed method saves up to $\approx 80\%$ computation budget while the performance degradation is up to a order of magnitude smaller than the other three termination methods. As the trajectories of the regret of BO versus the number of FEs shown in Figure 7, we can see that the other three termination methods suffer from a premature early stopping.
320
+
321
+ Table 1: The statistical comparison results of different termination methods on ${\mathrm{I}}_{\text{cost }}$ and ${\mathrm{I}}_{\text{perf }}$ .
322
+
323
+ <table><tr><td>Metrics</td><td>Thresholds</td><td>Naïve method</td><td>Nguyen&#x27;s method</td><td>Lorenz&#x27;s method</td><td>Makarova&#x27;s method</td><td>Proposed method</td></tr><tr><td rowspan="3">Icost</td><td>η1</td><td>1(0)†</td><td>0.1313(4.48E-1)‡</td><td>0.1244(4.48E-1)‡</td><td>0.7856(4.17E-1)†</td><td>0.6206(3.17E-1)</td></tr><tr><td>η2</td><td>1(0)†</td><td>0.1082(3.5E-2)‡</td><td>0.1053(1.47E-2)‡</td><td>0.1414(3.89E-2)‡</td><td>0.3012(1.94E-1)</td></tr><tr><td>η3</td><td>0.8343(2.51E-1)†</td><td>0.1048(9.01E-3)‡</td><td>0.1044(4.20E-3)‡</td><td>0.1313(3.33E-2)‡</td><td>0.2209(1.12E-1)</td></tr><tr><td rowspan="3">Ip erf</td><td>η1</td><td>0(0)‡</td><td>0.0077(7.28E-2)†</td><td>0.0067(7.71E-2)†</td><td>0(1.56E-2)†</td><td>0(6.81E-3)</td></tr><tr><td>η2</td><td>0(0)‡</td><td>0.0614(1.08E-1)†</td><td>0.0721(1.13E-1)†</td><td>0.0167 (6.51E-2)†</td><td>0(3.35E-2)</td></tr><tr><td>η3</td><td>0(0)‡</td><td>0.0704(1.14E-1)†</td><td>0.0978(1.18E-1)†</td><td>0.0355 (8.08E-2)†</td><td>0.0028(4.17E-2)</td></tr></table>
324
+
325
+ $^\dagger$ denotes the performance of our proposed method is significantly better than the other peers according to the Wilcoxon's rank sum test at a 0.05 significance level; $^\ddagger$ denotes the opposite case.
326
+
327
+ ![](images/5fd460a5dfe569b15939c56a5391231ab9168bcbcd743c67c159a520e7e77dd8.jpg)
328
+ Figure 8: Bar charts with error bars of $\mathrm{I}_{\mathrm{cost}}$ and $\mathrm{I}_{\mathrm{perf}}$ when using $\tau \in \{2i\}_{i=1}^{9}$ in our proposed termination method.
329
+
330
+ ![](images/70968b03f1bd3dd2339c1f42d8f8de6eea74a3fc1cbed27b5c457f87d9b57d49.jpg)
331
+
332
+ # 4.3 Parameter Sensitivity Study
333
+
334
+ In this subsection, we investigate the sensitivity of our proposed termination method with respect to the parameter $\tau$ . We consider various settings of $\tau \in \{2i\}_{i=1}^{9}$ and repeat the experiments on all benchmark problems introduced in Section 3.1. The aggregated comparison results for $\mathrm{I_{cost}}$ and $\mathrm{I_{perf}}$ are illustrated as bar charts with error bars in Figure 8. Specifically, we present the results for $\eta_{\mathrm{lb}} = 2.05$ , while the complete results can be found in the supplementary document. The plots show that the choice of $\tau$ has minimal impact on the results, except for cases with $\tau = 2$ and $\tau = 4$ . This is reasonable, as the termination method may not utilize sufficient previous information when only considering a few observed samples. Additionally, we examine the scenario where the equality constraint in Condition 1, i.e., equation (5) is relaxed. The comparison results in Figure 8 reveal similar observations regarding the settings of $\tau$ . However, we also notice a slight performance degradation and more aggressive early stopping in this case. These findings demonstrate that the Condition 1 helps mitigate the risk of premature early stopping.
335
+
336
+ # 5 Other Partially Related Works
337
+
338
+ Despite the limited number of dedicated studies on termination criteria for BO, various efforts have been made to explore early stopping strategies in different contexts.
339
+
340
+ The first category primarily focuses on detecting change points in sequential processes [34], with applications spanning various fields such as financial analysis [20], bioinformatics [7], and network traffic data analysis [23], among others. However, modeling the automatic termination of BO as a change point detection (CPD) problem may present several challenges. These include: 1) the absence of suitable stopping metrics that can provide signals for CPD in the optimization process of BO; 2) the unknown and uncertain nature of signal distribution, the number of change points, and change point consistency; 3) limited data available for CPD; and 4) the necessity to further evaluate change points in order to determine an appropriate moment for terminating BO.
341
+
342
+ The second category primarily focuses on determining the statistically optimal stopping moment for generalized sequential decision-making processes [14, 13]. For instance, in the classical secretary problem, termination criteria are developed to identify the maximum of an unknown distribution with minimal cost through sequential search [12]. They typically establish relationships between the costs and rewards of decision-making using cost coefficients [8, 2, 3], unknown observation costs [15, 37, 30, 25] or discount factors [36], subsequently deriving statistically optimal stopping conditions. However, quantifying the relationship between the improvement of the fitness and the
343
+
344
+ cost of BO remains challenging. Furthermore, these criteria do not leverage the information provided by the surrogate model, which is crucial in BO.
345
+
346
+ The third category primarily aims to balance exploration and exploitation in the optimization process. Among them, heuristic methods, exemplified by simulated annealing, are widely employed to halt the local search step of optimization algorithms [17, 1, 21]. However, such methods' hyperparameters lack interpretability and must be fine-tuned according to different problem characteristics. Additionally, McLeod et al. propose a regret-based strategy for switching between local and global optimization. Although promising for complex functions, this approach has certain limitations, including reliance on the authors' proposed regret reduction acquisition function and the potential need for additional computational resources to approximate intractable integrals. Furthermore, Eriksson et al. developed a trust-region-based BO that balances exploitation and exploration. This algorithm terminates local search when the trust region size is reduced to zero. However, the termination criteria lack theoretical guarantees and are bound to the proposed trust region maintenance mechanism.
347
+
348
+ # 6 Conclusion
349
+
350
+ In this paper, we developed a simple yet theoretically grounded two-step method for automatically terminating BO. The key insight is to proactively detect the local convex region and it terminates BO whenever the termination indicator built upon the local regret therein falls below a predetermined threshold. Our proposed termination method naturally strikes a balance between the quality of solution found by BO versus its computational efficiency. The proposed termination method is supported by robust theoretical underpinnings, and we have additionally introduced an approximation method to enhance the numerical stability by solving a bilevel optimization problem. Our extensive empirical studies, conducted across a variety of benchmark problems, including synthetic functions, reinforcement learning, and hyperparameter optimization, consistently demonstrated the better performance of our proposed method compared to other state-of-the-art techniques.
351
+
352
+ Besides, experimental results also show that the termination criterion of our proposed method is robust across different problems. This property paves an additional opportunity for our proposed termination method to go beyond automatically terminate BO, but to a broader range of applications, such as early stopping to avoid overfitting in neural network training, change point or anomaly detection in data stream, and even a new perspective to strike the balance between exploitation and exploration under a bandit setting. The primary limitation of the proposed termination criterion is that it requires a predefined termination threshold, which needs to be determined based on prior knowledge or empirical observations. Although a recommended threshold selection range is given here, finding an optimal threshold that suits a wide range of optimization problems remains a challenge.
353
+
354
+ # Author Contributions
355
+
356
+ SL implemented the theoretical derivations and experiments, as well as drafted the manuscript; KL piloted the idea and re-wrote the manuscript; WL proofread the manuscript.
357
+
358
+ # Acknowledgement
359
+
360
+ This work was supported in part by the UKRI Future Leaders Fellowship under Grant MR/S017062/1 and MR/X011135/1; NSFC under Grant 62376056 and 62076056; the Royal Society under Grant IES/R2/212077; the Kan Tong Po Fellowship (KTP/R1/231017); the EPSRC under Grant 2404317; the Amazon Research Award and Alan Turing Fellowship; and the National Natural Science Foundation of China under Grant 62273119.
361
+
362
+ # References
363
+
364
+ [1] Ricardo Baptista and Matthias Poloczek. Bayesian optimization of combinatorial structures. In ICML'18: Proc. of the International Conference on Machine Learning, pages 462-471. PMLR, 2018.
365
+ [2] Bruno Betro and Fabio Schoen. Sequential stopping rules for the multistart algorithm in global optimisation. Mathematical Programming, 38(3):271-286, 1987.
366
+
367
+ [3] Bruno Betro and Fabio Schoen. Optimal and sub-optimal stopping rules for the multistart algorithm in global optimization. Mathematical Programming, 57(1):445-458, 1992.
368
+ [4] Stephen Boyd and Lieven Vandenberghe. Convex optimization. Cambridge university press, 2004.
369
+ [5] Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. OpenAI Gym. Retrieved January 20, 2023, from https://github.com/openai/gym.
370
+ [6] Richard H Byrd, Peihuang Lu, Jorge Nocedal, and Ciyou Zhu. A limited memory algorithm for bound constrained optimization. SIAM Journal on scientific computing, 16(5):1190-1208, 1995.
371
+ [7] Souhil Chakar, E Lebarbier, Céline Lévy-Leduc, and Stéphane Robin. A robust approach for estimating change-points in the mean of an $ar(1)$ process. Bernoulli, 23(2):1408-1447, 2017.
372
+ [8] Herman Chernoff. Sequential design of experiments. The Annals of Mathematical Statistics, 30(3):755-770, 1959.
373
+ [9] Katharina Eggensperger, Philipp Müller, Neeratyoy Mallik, Matthias Feurer, Rene Sass, Aaron Klein, Noor Awad, Marius Lindauer, and Frank Hutter. HPOBench: A collection of reproducible multi-fidelity benchmark problems for HPO. In NeurIPS'21: Proc. of the Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021.
374
+ [10] David Eriksson, Michael Pearce, Jacob Gardner, Ryan D Turner, and Matthias Poloczek. Scalable global optimization via local bayesian optimization. Advances in neural information processing systems, 32, 2019.
375
+ [11] Peter I. Frazier. A tutorial on bayesian optimization. CoRR, abs/1807.02811, 2018.
376
+ [12] PR Freeman. The secretary problem and its extensions: A review. International Statistical Review/Revue Internationale de Statistique, pages 189-206, 1983.
377
+ [13] Roman Garnett. Bayesian Optimization. Cambridge University Press, 2023.
378
+ [14] Daniel G Goldstein, R Preston McAfee, Siddharth Suri, and James R Wright. Learning when to stop searching. Management Science, 66(3):1375-1394, 2020.
379
+ [15] Frank Hutter, Holger H Hoos, and Kevin Leyton-Brown. Sequential model-based optimization for general algorithm configuration. In LION'11: Proc. of the Fifth International Conference on Learning and Intelligent Optimization, pages 507-523. Springer, 2011.
380
+ [16] Donald R. Jones, Matthias Schonlau, and William J. Welch. Efficient global optimization of expensive black-box functions. J. Glob. Optim., 13(4):455-492, 1998.
381
+ [17] Scott Kirkpatrick, C Daniel Gelatt Jr, and Mario P Vecchi. Optimization by simulated annealing. science, 220(4598):671-680, 1983.
382
+ [18] H. J. Kushner. A new method of locating the maximum point of an arbitrary multipeak curve in the presence of noise. J. Basic Eng., 86(1):97-106, 1964.
383
+ [19] P. Langley. Crafting papers on machine learning. In Pat Langley, editor, ICML'00: Proc. of the 17th International Conference on Machine Learning, pages 1207-1216, Stanford, CA, 2000. Morgan Kaufmann.
384
+ [20] Marc Lavielle and Gilles Teyssiere. Adaptive detection of multiple change-points in asset price volatility. In Long memory in economics, pages 129-156. Springer, 2007.
385
+ [21] Daniel James Lizotte. Practical bayesian optimization. 2008.
386
+ [22] Romy Lorenz, Ricardo P Monti, Ines R Violante, Aldo A Faisal, Christoforos Anagnostopoulos, Robert Leech, and Giovanni Montana. Stopping criteria for boosting automatic experimental design using real-time fmri with Bayesian optimization, 2016.
387
+
388
+ [23] Alexandre Lung-Yut-Fong, Céline Lévy-Leduc, and Olivier Cappé. Distributed detection/localization of change-points in high-dimensional network traffic data. Statistics and Computing, 22(2):485-496, 2012.
389
+ [24] Anastasia Makarova, Huibin Shen, Valerio Perrone, Aaron Klein, Jean Baptiste Faddoul, Andreas Krause, Matthias W. Seeger, and Cedric Archambeau. Automatic termination for hyperparameter optimization. In AutoML'22: Proc. of 2022 International Conference on Automated Machine Learning, volume 188 of Proceedings of Machine Learning Research, pages 7/1-21. PMLR, 2022.
390
+ [25] Gustavo Malkomes, Charles Schaff, and Roman Garnett. Bayesian optimization for automated model selection. pages 2892-2900, 2016.
391
+ [26] Michael D. McKay, Richard J. Beckman, and William J. Conover. A comparison of three methods for selecting values of input variables in the analysis of output from a computer code. Technometrics, 42(1):55-61, 2000.
392
+ [27] Mark McLeod, Stephen Roberts, and Michael A Osborne. Optimization, fast and slow: optimally switching between local and bayesian optimization. In ICML'18: Proc. of the International Conference on Machine Learning, pages 3443-3452. PMLR, 2018.
393
+ [28] Vu Nguyen, Sunil Gupta, Santu Rana, Cheng Li, and Svetha Venkatesh. Regret for expected improvement over the best-observed value and stopping condition. In ACML '17: Proc. of The 9th Asian Conference on Machine Learning, volume 77 of Proceedings of Machine Learning Research, pages 279–294. PMLR, 2017.
394
+ [29] Bobak Shahriari, Kevin Swersky, Ziyu Wang, Ryan P. Adams, and Nando de Freitas. Taking the human out of the loop: A review of bayesian optimization. Proc. IEEE, 104(1):148-175, 2016.
395
+ [30] Jasper Snoek, Hugo Larochelle, and Ryan P Adams. Practical bayesian optimization of machine learning algorithms. In NeurIPS'12: Proc. of the Twenty-sixth Conference on Neural Information Processing Systems, pages 2951-2959, 2012.
396
+ [31] Niranjan Srinivas, Andreas Krause, Sham M. Kakade, and Matthias W. Seeger. Gaussian process optimization in the bandit setting: No regret and experimental design. In ICML'10: Proc. of the 27th International Conference on Machine Learning, pages 1015-1022. Omnipress, 2010.
397
+ [32] Niranjan Srinivas, Andreas Krause, Sham M. Kakade, and Matthias W. Seeger. Information-theoretic regret bounds for gaussian process optimization in the bandit setting. IEEE Trans. Inf. Theory, 58(5):3250-3265, 2012.
398
+ [33] S. Surjanovic and D. Bingham. Virtual library of simulation experiments: Test functions and datasets. Retrieved January 20, 2023, from http://www.sfu.ca/~ssurjano.
399
+ [34] Charles Truong, Laurent Oudre, and Nicolas Vayatis. Selective review of offline change point detection methods. Signal Processing, 167:107299, 2020.
400
+ [35] Christopher Williams and Carl Edward Rasmussen. Gaussian processes for machine learning. MIT press Cambridge, MA, 2006.
401
+ [36] Tianyi Zhang, Daniel Russo, and Assaf Zeevi. Learning to stop with surprisingly few samples. In COLT'21: Proc. of the Conference on Learning Theory, pages 3887-3888. PMLR, 2021.
402
+ [37] Shlomo Zilberstein. Using anytime algorithms in intelligent systems. AI magazine, 17(3):73-83, 1996.
whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5dd50b541f71fc46857d7f5f4745f24a82e5101e508cb0fc8204ed6b6d26314
3
+ size 485112
whynotlookingbackwardarobusttwostepmethodtoautomaticallyterminatebayesianoptimization/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5c5d27ba5fa6ca5bac73d48367d9d688bba349a6b60b7686f92c81ead5fe847
3
+ size 555518
xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/6f8bef3c-7b38-4b28-b2b8-1565385fb36c_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b51c55abd90b00455b09d55163a1a6b7375dd353f4f7d2c29dca011e6ff05db
3
+ size 72581
xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/6f8bef3c-7b38-4b28-b2b8-1565385fb36c_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43d8ed1552ef7e65fbd8ba03c6fb899996929b9041c71416d7df2935bc94bf04
3
+ size 87291
xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/6f8bef3c-7b38-4b28-b2b8-1565385fb36c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73a2c313cae19f60b117b5da0f0f789d46b164802930eb0556835ab9133f6186
3
+ size 1190616
xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/full.md ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # xTrimoGene: An Efficient and Scalable Representation Learner for Single-Cell RNA-Seq Data
2
+
3
+ Jing Gong $^{1*}$ Minsheng Hao $^{12*}$ Xingyi Cheng $^{1\dagger}$ Xin Zeng $^{1}$
4
+ Chiming Liu $^{1}$ Jianzhu Ma $^{2}$ Xuegong Zhang $^{2}$ Taifeng Wang $^{1}$ Le Song $^{13\dagger}$ $^{1}$ BioMap Research $^{2}$ Tsinghua University
5
+ $^{3}$ Mohamed bin Zayed University of Artificial Intelligence
6
+
7
+ {gongjing, minsheng_2022, xingyi, zengxin, chiming, taifeng, song1e}@biomap.com, {zhangxg, majianzhu}@tsinghua.edu.cn
8
+
9
+ # Abstract
10
+
11
+ Advances in high-throughput sequencing technology have led to significant progress in measuring gene expressions at the single-cell level. The amount of publicly available single-cell RNA-seq (scRNA-seq) data is already surpassing 50M records for humans with each record measuring 20,000 genes. This highlights the need for unsupervised representation learning to fully ingest these data, yet classical transformer architectures are prohibitive to train on such data in terms of both computation and memory. To address this challenge, we propose a novel asymmetric encoder-decoder transformer for scRNA-seq data, called xTrimoGene $^{\alpha}$ (or xTrimoGene for short) $^{4}$ , which leverages the sparse characteristic of the data to scale up the pre-training. This scalable design of xTrimoGene reduces FLOPs by one to two orders of magnitude compared to classical transformers while maintaining high accuracy, enabling us to train the largest transformer models over the largest scRNA-seq dataset today. Our experiments also show that the performance of xTrimoGene improves as we scale up the model sizes, and it also leads to SOTA performance over various downstream tasks, such as cell type annotation, perturb-seq effect prediction, and drug combination prediction. xTrimoGene model is now available for use as a service via the following link: https://api.biomap.com/xTrimoGene/apply.
12
+
13
+ # 1 Introduction
14
+
15
+ Recently, Artificial Intelligence (AI) technology has demonstrated promising results for addressing scientific problems. This AI4Science paradigm witnessed diverse successful biological and pharmaceutical applications, including protein analysis [17, 20, 38, 35, 1], RNA modeling [4], and genomics modulation [27]. However, most existing AI models have predominantly focused on protein sequences, neglecting the growing volume of high-throughput experimental sequencing data in the form of gene expression values. Single-cell RNA sequencing (scRNA-seq) technology has transformed the field of cell biology and enabled us to understand cell-cell, cell-gene and gene-gene relations at the cellular level [16, 3]. This technique captures the expression levels of thousands of genes in parallel, facilitating the study of cellular heterogeneity [5, 19]. This unveiled information is crucial for understanding complex biological systems and disease progression [16, 3]. Integrating and
16
+
17
+ modeling such large-scale scRNA-seq data can reveal rich cellular information and benefit various biological task learning.
18
+
19
+ Representation learning from scRNA-seq data [9] has been an active area of research in past decades. For example, scVAE [11] and scVI [21] apply a variational autoencoder framework to derive low-dimensional cell embeddings, cscGAN [24] uses a Generative Adversarial Network (GAN) architecture to generate cell-type specific expression profiles, and Saver-X [32] is capable of removing batch effects across datasets. Despite the success of these customized algorithms, they tend to be computationally inefficient and labor-intensive. This prompts us to explore a general-purpose model that first learns underlying knowledge from scRNA-seq data and generalizes it to different tasks in a unified manner. We draw inspiration from the pre-training and fine-tuning paradigm in Natural Language Processing (NLP), which has shown great success in improving various downstream NLP task performance [29, 12, 14]. In light of these findings, we aim to investigate the potential of applying similar approaches to representation learning in scRNA-seq data.
20
+
21
+ The first published pre-trained model for single-cell data is scBERT, which uses a low-rank transformer [36] to analyze the scRNA data. It learns the cellular representation by randomly masking a percent of non-zero gene expression values and tries to recover them. scBERT has achieved state-of-the-art results for cell-type annotation tasks. The study shows the potential of a pre-training strategy for single-cell biology research. However, scBERT has certain limitations in fully utilizing scRNA-seq data properties. These limitations include:
22
+
23
+ (1) Scalability. The large number of genes (almost 20,000) and the sparsity of scRNA-seq data, with nearly $90\%$ of values being zero, lead to many redundant computations (e.g., self-attention between zero tokens). It required approximately $2.65 \times 10^{19}$ FLOPs to train 5 million samples over 5 epochs, which equals almost 20 days of training on an A100 GPU for only an 8.9 million parameter scBERT model. (2) Limited resolution for expression values. scBERT rounds the gene expression values into integer values, which limits the model's ability to distinguish closeness and similarity between gene expression values. For instance, two close values could be mapped to separate embeddings (e.g., 1.99 and 2.01 are mapped to 1 and 2), and two distant values could be mapped to identical embeddings (e.g., 1.99 and 1.01 are mapped to 1). The strategy leads to a loss of resolution and introduces bias during model training, resulting in sub-optimal performance.
24
+
25
+ To address the challenges associated with scRNA-seq data modeling and consider the unique nature of this data (as discussed in Section 2), we present a novel and efficient framework, xTrimoGene, for pre-training large-scale scRNA-seq data. Our framework makes the following key contributions:
26
+
27
+ (1) We design an asymmetrical encoder-decoder architecture to guide the pre-training process, which enables us to learn a high-capacity model for single-cell RNA-seq data. Our model achieves an improvement in the speed of pre-training of over 3 times compared to previous encoder-only models.
28
+ (2) We illustrate that the efficiency and scalability of our model allow us to train the largest single-cell pre-trained model to date, with approximately 100 million parameters for the xTrimoGene-100M model, using a curated scRNA-seq dataset of approximately 50 billion effective gene tokens.
29
+ (3) The pre-trained model xTrimoGene achieved remarkable results in multiple downstream tasks, including cell type annotation, perturbation prediction and synergistic drug combination prediction.
30
+
31
+ # 2 Characteristics of Single-Cell RNA-seq Data
32
+
33
+ scRNA-seq generates a large, sparse expression matrix, where each row represents a cell (sample) and each column a gene (feature). This dataset presents several challenges and requires a specialized architecture to effectively model the data.
34
+
35
+ First, approximately 20,000 genes (columns) are shared across cells. Unlike the corpus in NLP, the genes can be arbitrarily reordered. The relation between genes depends on biological pathways rather than local contexts, where the latter shapes spatial information in Computer Vision (CV) images. Though one can roughly regard each cell (row) as a sentence or an image patch, the 20,000 genes is a vast number compared to the typical sequence length, which is mostly a few hundred and no more than a few thousand [29, 12]. Thus directly applying existing transformer architecture will not work.
36
+
37
+ Second, scRNA-seq matrices are highly sparse (90% zero in a typical dataset [15, 7]). The abundance level of RNA for each gene is measured by counting the unique molecular identifier (UMI) reads in
38
+
39
+ scRNA-seq experiments [16, 3]. However, many genes exhibit low UMI counts due to limited probing efficiency. Therefore, treating scRNA-seq data as an image and utilizing a convolutional neural network to extract features is not feasible, as it introduces a huge number of redundant computations for sparse positions.
40
+
41
+ Third, the normalized gene expression values in scRNA-seq data are continuous scalars, which typically indicate similar gene activity when they have similar values. To transform these scalars into high-dimensional tokens in the data matrix, a representation that preserves the continuous semantics is needed. Manually discretizing the gene expression values is challenging as non-optimal discretization thresholds will bias category assignment. A learned discretization approach or learnable representation, such as the one proposed in [10], is ideal for preserving the continuous semantics of the gene expression values.
42
+
43
+ Taking into the above three major features, we design a new architecture as described in the next section.
44
+
45
+ ![](images/a39c191664734cc2f166c8703e457114275236b796f59f097701d7b4f1ed0d20.jpg)
46
+ Figure 1: The xTrimoGene Framework: (1) Random positions (including both zero and nonzero values) are masked for prediction. (2) Masked and zero-valued positions are filtered out. (3) Remaining unmasked positions are aligned with padding tokens (grey) to ensure maximum length consistency within a batch. (4) Gene expression values and gene embeddings are separately projected into embeddings. (5) These two embeddings are element-wise added. (6) The resulting input is fed into the encoder. (7) The intermediate encoder embedding is combined with embeddings for masked positions and zero embeddings. (8) This combined representation is then fed into the decoder. (9) Decoder embedding is projected to model output with an MLP layer. The MSE loss is calculated between the model output and ground truth values for the masked positions.
47
+
48
+ # 3 xTrimoGene Architecture
49
+
50
+ xTrimoGene is a highly efficient framework for pre-training large-scale single-cell RNA-seq data (illustrated in Figure 1). The training process is based on a regression-masked task, aimed at accurately recovering masked values in the expression matrix. Notably, a specific optimized asymmetrical encoder-decoder framework is employed to accelerate the learning of sparse matrices. This is achieved by feeding only the unmasked non-zero positions (less than $10\%$ of the full length) into the encoder, while the largely masked and zero positions are input into a lightweight decoder with a reduced number of layers and attention heads. In addition, a novel auto-discretization strategy is introduced to project continuous expression values into a latent embedding space. Instead of rounding to the nearest integer, values are directly mapped to the latent space allowing for the representation of closely related values. The xTrimoGene framework consists of the following components:
51
+
52
+ Masking: A portion of the normalized gene expression matrix $V$ is masked for prediction, including both zero and non-zero positions. $c$ denotes cell sample size, and $n$ denotes gene number (19,264 in our setting, see App. 1 for data collection and processing).
53
+
54
+ Filtering: The masked and zero-valued embeddings are filtered out, yielding a variable-length sequence of valuable information that is prepared for encoding.
55
+
56
+ Padding: The remaining unmasked positions are aligned with padding tokens, resulting in a much smaller unmasked-only matrix $V_{\text{unmasked}}$ . $m$ denotes the maximum length of the unmasked sample. We include a scheme to illustrate the processing flow (see App. 2).
57
+
58
+ **Embedding:** Expression value and gene embeddings are separately projected. $d$ denotes the dimension of the embedding. The expression embedding is calculated through an auto-discretization mapping. The gene embedding is retrieved from a randomly initialized lookup table.
59
+
60
+ Combining Expression and Gene Embeddings: The expression and gene embeddings $(E$ and $G)$ are element-wise added to form the input embedding, which is then fed into the encoder of the model.
61
+
62
+ Encoding: The sum of the embeddings is input into the encoder, which implements self-attention mechanisms using a Transformer-like architecture.
63
+
64
+ Extending masked and zero embeddings: The intermediate encoder embedding $I_{encoder}$ is combined with embeddings for masked and zero-value positions.
65
+
66
+ Decoding: The combined embeddings are processed by the decoder, utilizing self-attention mechanisms instead of the typical casual attention used in NLP decoders.
67
+
68
+ Loss Computation: Decoder embedding is projected to model output with an MLP layer. The mean squared error (MSE) loss is computed between the predicted masked values from the model and their corresponding ground truth values.
69
+
70
+ # 3.1 Encoder
71
+
72
+ The scRNA-seq data is characterized by its high sparsity, with cell information largely concentrated in the non-zero expression values. Thus, the encoder is designed to focus only on the non-zero part of the unmasked matrix, $V_{\text{unmasked}}$ . The encoder is based on a traditional multi-head attention transformer and takes the combination of value embedding, $E$ , and gene embedding, $G$ , as its input, $I \in \mathbb{R}^{c \times m \times d}$ . The value and gene embeddings are similar to the word and positional embeddings in natural language modeling, respectively. The value embedding, $E$ , is generated using the auto-discretization strategy discussed previously, while the gene embedding, $G$ , is retrieved from a function $f_{L}$ that maps the gene symbols into the embedded vocabulary.
73
+
74
+ $$
75
+ E = \operatorname {A u t o b i n} \left(V _ {\text {u n m a s k e d}} \odot M _ {\text {n o n z e r o}}\right), G = f _ {L} (\text {g e n e s}), I = E + G \tag {1}
76
+ $$
77
+
78
+ Then the encoder processes the input embeddings $I$ and generates the high-level gene representations $I_{encoder} \in \mathbb{R}^{b \times m \times d}$ via the multi-head attention mechanism.
79
+
80
+ $$
81
+ I _ {\text {e n c o d e r}} = \operatorname {T r m} \left(f _ {Q} (I), f _ {K} (I), f _ {V} (I)\right) \tag {2}
82
+ $$
83
+
84
+ where $f_{Q}, f_{K}, f_{V}$ are the project functions. Trm denotes the Transformer block.
85
+
86
+ It is worth emphasizing that our encoder only operates on a subset of genes, reducing the length of the processed sequence to 1/10 of the original. This allows the full-length transformer to be used without any computational approximations.
87
+
88
+ # 3.2 Decoder
89
+
90
+ Unlike the encoder which focuses on the main information (non-zero expression values) in the cells, the decoder in the system performs full-length feature abstraction and extraction. The input to the decoder, $I_{full}$ , comprises three token types: the output from the encoder, $I_{encoder}$ , the genes with zero expression embs $I_{zero}$ , and the mask token embs $I_{masked}$ . Out of these tokens, genes with zero expression make up 90% of all tokens. The gene embeddings are concatenated with all of these tokens to provide the decoder with gene-specific information for the corresponding mask tokens, followed by a full connection layer.
91
+
92
+ $$
93
+ I _ {f u l l} = W _ {p} \left(I _ {\text {e n c o d e r}} \oplus I _ {\text {z e r o}} \oplus I _ {\text {m a s k e d}}\right) + b _ {p} \tag {3}
94
+ $$
95
+
96
+ where $\oplus$ represents the concatenation operation, and $W_{p}$ and $b_{p}$ are learnable parameters that project the decoder's embedding size.
97
+
98
+ The decoder in the framework is optimized for long-sequence attention calculations and employs the Performer architecture as its backbone. The decoder transforms the input $I_{full}$ into final gene-level embeddings, $I_{decoder} \in \mathbb{R}^{b \times n \times d}$ , and predicts the masked values through a shared linear layer, $W \in \mathbb{R}^{d \times 1}$ , applied across all genes. The operations are expressed as follows:
99
+
100
+ $$
101
+ I _ {\text {d e c o d e r}} = \operatorname {T r m} \left(\left(f _ {Q} \left(I _ {\text {f u l l}}\right), f _ {K} \left(I _ {\text {f u l l}}\right), f _ {V} \left(I _ {\text {f u l l}}\right)\right), \tilde {V} = I _ {\text {d e c o d e r}} \cdot W \right. \tag {4}
102
+ $$
103
+
104
+ The decoder has a smaller model size compared to the encoder, with a smaller embedding size, fewer attention layers, and fewer attention heads. For instance, in the largest model configuration, the layer depth ratio between the encoder and decoder is 2:1 and the head number ratio is 1.5:1 (see App. Table 2). Similarly, the principle of asymmetric encoder-decoder design has been proven powerful in masked autoencoders (MAE) [13], which is tailored for CV data pre-training. Unlike MAE, xTrimoGene utilizes the biased masking strategy to avoid the learning process being dominated by zero tokens. Though the scRNA-seq data is distinct from images, our results show that the performance gains of xTrimoGene are comparable to those of MAE, with more efficient training and better downstream task performance.
105
+
106
+ # 3.3 Auto-discretization strategy
107
+
108
+ Our aim is to transform an expression value $v$ into a hidden embedding, denoted as $e$ . The transformation is achieved using an auto-discretization block. This auto-discretization process involves a random look-up table $T$ defined in $\mathbb{R}^{d \times b}$ . In this representation, $d$ refers to the embedding dimension, while $b$ is the number of bins with a default value of 100. The transformation starts by applying a linear layer to the expression value, given by $v_{1} = v \cdot w_{1}$ , where $w_{1}$ represents the weight vector. The resulting $v_{1}$ is then subjected to a leaky ReLU activation, resulting in $v_{2} = \text{Leaky\_ReLU}(v_{1})$ . Subsequently, a cross-layer projection is applied, represented by $v_{3} = w_{2} \cdot v_{2} + \alpha \cdot v_{2}$ . Here, $w_{2}$ denotes the weight vector, and $\alpha$ is a scaling mixture factor. Next, the bin weights of $v_{3}$ are normalized using the softmax function, resulting in $v_{4} = \text{softmax}(v_{3})$ . Finally, the transformed value is represented as a weighted combination of individual embeddings from the look-up table, given by $e = T \cdot v_{4}$ . It's important to note that the weights in this combination serve as learnable parameters.
109
+
110
+ To validate the effectiveness of the expression value projection, we conducted an analysis of viewing the weight distribution pattern for continuous values. Our results showed that the normalized weight distribution of the close values exhibited smooth transitions and that of the distant values being clearly distinguishable (App. section 3 Figure 1). This supports the conclusion that the auto-discretization strategy effectively represents continuous values with high resolution while preserving relatively rich meaning.
111
+
112
+ We also compared the performance of the proposed auto-discretization strategy with three other discretization methods: (1) Round bin with zero, in which values are rounded to the nearest integer, and zeros are kept as it is, (2) Up bin without zero. Values greater than zero are converted to the nearest ceiling integer, while zero is represented as individual 0. (3) Equal bin. All the values fall into a fixed percentage interval, which is calculated by value distribution and frequency. We evaluated the different strategies on a standard cell clustering task (see App. 4) and found that the proposed auto-discretization strategy outperformed the others (as shown in Figure2 A), demonstrating the importance of high-resolution projections in handling expression values.
113
+
114
+ # 4 Training Strategy
115
+
116
+ We now explain the strategy used to train the asymmetric encoder-decoder transformer. Pre-trained task and masking strategy are outlined, see App. 5 for acceleration strategy.
117
+
118
+ # 4.1 Regression masked task
119
+
120
+ The traditional masked language task is a multi-class classification problem, where the predicting target is a single token with limited, naturally distinct categories. In contrast, the normalized gene expression value is a continuous scalar. To fit the data property, we modify the pre-trained learning
121
+
122
+ ![](images/cc6027effb573b139d532592a35b930d17e46f8db4bc87d9c49a6c0e295363ae.jpg)
123
+ Figure 2: Pre-training strategy ablation study. (A) Performance comparison between auto discretization strategy and other binning methods for expression value projection. The cell clustering task is evaluated and five metrics are displayed. ARI for Adjusted Rand index, NMI for Normalized Mutual Information, HOMO for Homogeneity, CP for Completeness and SIL for Silhouette Coefficient. (B) Performance of pre-trained models with different task modes, including regression and classification settings. The cell clustering task is evaluated. See the main text for details.
124
+
125
+ ![](images/1d7c1f713a71d1c5a0e3b8c073643e2337591ba40ac94adaebdf17e5ff4aa650.jpg)
126
+
127
+ objective to a regression task, aimed at recovering the absolute value of the masked positions. The loss function employed is the MSE between the ground truth and the predicted values:
128
+
129
+ $$
130
+ \operatorname {L o s s} = \frac {1}{(n - m) * c} \sum \left(V _ {i, j} - \tilde {V} _ {i, j}\right) ^ {2} \tag {5}
131
+ $$
132
+
133
+ where $n$ represents the number of all genes, $m$ represents the maximum length of the unmasked positions in a sample, and $c$ represents the number of cells. To evaluate the efficacy of this modification, we compared the regression setting with the classification setting on the cell clustering task. The results indicate that the regression model outperforms the classification model (Figure 2B), providing evidence of the benefits of learning a more fitted representation.
134
+
135
+ # 4.2 Masking strategy
136
+
137
+ We mask both non-zeros and zeros positions though the scRNA-seq expression matrix is highly sparse (where zero percentage is usually over $90\%$ ). As the zero positions percentage is much higher than non-zero positions, the masked ratio can't be the same for the two types. Otherwise, the model tends to predict all zeros and still obtains a low error level. We propose to mask an almost equal number of positions for zero and non-zeros positions (see App. section 6 Table 1). The setting enforces the model to learn embeddings for all values and not to be dominated by zero representation. We found zero values supervision is necessary to boost the performance (App. Figure 2), which demonstrates that some zeros represent the true extremely low expression level. This type of zeros is informative to illustrate how the gene abundant behaves inside the cell.
138
+
139
+ The recovery of masked tokens in NLP is challenging due to the fact that word comprehension relies heavily on long-range interactions rather than local context. Accurate inference of the missing tokens can be achieved at low masking ratios (15%) where the information in the entire sentence is still relatively redundant and encoded by the unmasked tokens. We investigated the density of information needed for the scRNA-seq regression task by training models with different masking ratios (for non-zero values, the ratio was set 10 times higher than for zero values) ranging from 15% to 90% with a 15% interval. The models were then evaluated on the cell clustering task, with the results showing that performance improved first and then degraded as the masking ratio increased. When the masking ratio was close to 30%, the majority of metrics reached a peak (App. Figure 3). We also found current biased masking is optimal (App. Figure 4) and the percentage of [MASK] tokens agrees well with NLP tasks (App. Figure 5). These results suggest that the scRNA-seq expression vector contains more redundant information than a sentence and highlight the role of hidden regulations between genes in constraining the inference of expression values.
140
+
141
+ # 5 Experiments
142
+
143
+ Next we will explain our experimental settings and results. The dataset description can be referred to App. 1.
144
+
145
+ # 5.1 Computational efficiency
146
+
147
+ We quantitatively compared the training cost of xTrimoGene with other two encoder-only models, including full-length attention Transformer and kernel-based approximation Performer (scBERT). For an apple-to/apple comparison, three models are set to approximately 10 million trainable parameters and trained on 5 million samples over 5 epochs. We calculated the corresponding FLOPs, where matrix multiplication operations are considered only. We observed that total FLOPs for Performer (scBERT) decreased to $10\%$ of native Transformer (see Table 1). Notably, xTrimoGenes runs 3-times faster than Performer. The results validate the efficiency of xTrimoGene, which is readily adapted for large-scale data pre-training.
148
+
149
+ Table 1: Computational efficiency comparison between different algorithms. The resource column is normalized by the Transformer row.
150
+
151
+ <table><tr><td>Model name</td><td>Parameter (M)</td><td>Forward + backward (FLOPs/sample)</td><td>Total train (FLOPs)</td><td>Resource</td></tr><tr><td>Transformer</td><td>11.3</td><td>9.86E+12</td><td>2.46E+20</td><td>100%</td></tr><tr><td>Performer</td><td>8.9</td><td>1.06E+12</td><td>2.65E+19</td><td>10.8%</td></tr><tr><td>xTrimoGene</td><td>9.8</td><td>3.35E+11</td><td>8.38E+18</td><td>3.4%</td></tr></table>
152
+
153
+ # 5.2 Scalability
154
+
155
+ The Deep Learning community has shown significant interest in the scalability of proposed models [18, 2]. Vanilla Transformer models are challenging to scale due to their computational time and resource requirements, which increase quadratically with model size. Varieties of attention mechanisms have been proposed to accelerate training speed, a critical factor for model scaling.
156
+
157
+ To test the scale-up ability of xTrimoGene, we pre-trained three models across multiple compute regions and scales (e.g., from 3M to 100M parameters). The detailed hyperparameter setting is displayed in the App. Table 2. The training curve clearly shows all models are steadily down to a lower loss when training steps increase (App. Figure 6). More importantly, the xTrimoGene-100M model obtains a significant improvement over the xTrimoGene-10M model, which is also superior to the xTrimoGene-3M model. The tendency is consistent across different data sizes. The results suggest xTrimoGene framework is robust to scale-up, making it possible and convenient to pre-train larger models with more data.
158
+
159
+ # 5.3 Robustness on high sparse data
160
+
161
+ scRNA-seq data often exhibit varying levels of sparsity, thus it's necessary to assess whether xTrimoGene is robust in handling different sparse data. To verify the robustness, we divided the test samples into subgroups based on cell type and calculated the sparsity level (i.e., percentage of zero values in the expression matrix) and Pearson correlation coefficient between the predicted and actual values. Our results reveal that the correlation gradually decreases as the sparsity level increases, as expected (Figure 3A). However, the correlation remains above 0.8 even when the sparsity level reaches $96\%$ (Figure 3A), indicating the robustness of xTrimoGene. We also compared xTrimoGene's performance with Performer and found that xTrimoGene consistently achieves a higher correlation across most subgroups (Figure 3B). These findings demonstrate that xTrimoGene is robust in handling highly sparse data and outperforms encoder-only architectures.
162
+
163
+ The performance of the encoder-decoder and encoder-only architectures have been comparatively analyzed in the NLP domain, with the former demonstrating effectiveness in language comprehension and the latter in context generation. Apart from comparison on masked value recovery, we further evaluated xTrimoGene against encoder-only Performer on the cell clustering task. The results
164
+
165
+ ![](images/88ba281cdce17df086ef475c2cd17dd55199b68fe71b6aef372e7f1638e2077b.jpg)
166
+ Figure 3: Comparison of performance for different sparse level data. (A) xTrimoGene performance for recovering masked values at different sparse levels. Each dot represents a subset defined by cell type. Sparse level is calculated as the ratio between zero value percentages. Pearson correlation coefficient metric is calculated on masked positions. (B) Performance comparison of xTrimoGene and Performer while recovering masked values at different sparse levels. Dot has the same meaning as (A) but the dot size is proportional to the sparse level. Both the x and y axis denotes the Pearson correlation coefficient metric for a particular algorithm. (C) Comparison of performance for xTrimoGene framework and encoder-only framework. Cell clustering task is evaluated.
167
+
168
+ ![](images/6ce8a2cb3eb50ac8128219982c2f258133c189f8590c8bc937ad6e5608a49d29.jpg)
169
+
170
+ ![](images/aac21c2f54ac92fa870fb558086ed38ca5af4bff9971101e6ebd8debd8648ef1.jpg)
171
+
172
+ demonstrate that xTrimoGene achieves superior performance, reaffirming its proficiency in latent embedding extraction (Figure 3C).
173
+
174
+ # 5.4 Evaluation on downstream tasks
175
+
176
+ Currently, multiple tasks have been established to evaluate different models, including well-defined cell type annotation and recently developed perturbation response prediction tasks. We first assessed the performance of xTrimoGene on these single-cell tasks. Additionally, we explored the potential application on bulk RNA-sequencing data, with a focus on synergistic drug combination prediction.
177
+
178
+ # 5.4.1 Cell type annotation
179
+
180
+ First, we evaluated xTrimoGene's performance on cell type annotation task with Zheng68K [39] and Segerstolpe [31] dataset, which has been widely benchmarked. We compared the xTrimoGene against other several methods, including scBERT [36], ACTINN [23], Scany [34], CellTypist [6], scVI [21] and singleCellNet [37]. For the xTrimoGene model, we added a max-pooling layer and a linear layer to predict cell type labels with fine-tuning mode (see App. 8.1). For other methods, we followed their instruction with the default parameter setting. We observed that xTrimoGene achieves a high Precision and F1 score, surpassing all the other methods (Table 2). The results indicated that xTrimoGene learns a well-represented cellular embedding (visualized in App. Figure 7) by simply aggregating contextual gene embedding.
181
+
182
+ Table 2: The cell annotation performance on the Zheng68K and Segerstolpe dataset. xTrimoGene is evaluated with 100M parameter model.
183
+
184
+ <table><tr><td rowspan="2">Method Name</td><td colspan="2">Zheng68K</td><td colspan="2">Segerstolpe</td></tr><tr><td>Precision</td><td>F1 score</td><td>Precision</td><td>F1 score</td></tr><tr><td>xTrimoGene</td><td>0.7335 ± 0.0226</td><td>0.7354 ± 0.0189</td><td>0.8112 ± 0.0009</td><td>0.8140 ± 0.0008</td></tr><tr><td>scBERT</td><td>0.7029 ± 0.0115</td><td>0.6695 ± 0.0077</td><td>0.6818 ± 0.0736</td><td>0.6703 ± 0.0653</td></tr><tr><td>ACTINN</td><td>0.6720 ± 0.0021</td><td>0.6486 ± 0.0041</td><td>0.7545 ± 0.0018</td><td>0.7219 ± 0.0073</td></tr><tr><td>Scanpy</td><td>0.6111 ± 0.0017</td><td>0.5474 ± 0.0085</td><td>0.6274 ± 0.0000</td><td>0.5398 ± 0.0000</td></tr><tr><td>CellTypist</td><td>0.7454 ± 0.0009</td><td>0.7151 ± 0.0038</td><td>0.7923 ± 0.0003</td><td>0.8117 ± 0.0001</td></tr><tr><td>scVI</td><td>0.4883 ± 0.0005</td><td>0.4843 ± 0.0008</td><td>0.5101 ± 0.0022</td><td>0.5208 ± 0.0016</td></tr><tr><td>singleCellNet</td><td>0.6452 ± 0.0013</td><td>0.5982 ± 0.0027</td><td>0.7551 ± 0.0096</td><td>0.8055 ± 0.0076</td></tr></table>
185
+
186
+ # 5.4.2 Perturbation response prediction
187
+
188
+ Recently, perturb-seq technology was established to screen gene expression response given pooled perturbations at single-cell level [8]. Several algorithms have also been developed to predict perturbation effects [30, 22] at the single cell level, i.e., what is the expression value of genes after perturbation? We compared the native GEARS[30] model with and without incorporating embeddings from xTrimoGene.
189
+
190
+ The normal state (before perturbation) gene expression profile is fed into xTrimoGene and we obtained the context embedding, which replaces raw expression value input in the GEARS model (App. 8.2). All the other settings remain unchanged. The evaluated dataset (Norman et al. [26]) contains both single and double gene perturbation and we thus assess the performance across different perturbation levels. As shown in Figure 4A, GEARS with xTrimoGene embedding scores a lower MSE (decreased $14.8\%$ ) for top20 differential expressed genes across all perturbation scenarios. Notably, the tendency is consistent across different perturbation levels, regardless the perturbed target is seen or not. We also compared against the scBERT embedding and observed a similar trend, where xTrimoGene achieves better results (App. Table 3). The results demonstrated that the pre-training strategy empowers xTrimoGene to capture constraints under various circumstances, including post-perturbations. The application further proved the efficacy and potential of xTrimoGene to boost scRNA-seq based tasks.
191
+
192
+ ![](images/3c7e102cb6538c990c8354bcebbd32f7a41da98b8fd899c84e9771dead95c3f5.jpg)
193
+ Figure 4: (A) The MSE of the top 20 deferentially expressed (DE) genes given by different models on perturbation response prediction. The top 20 DE genes are calculated between the before and post-perturbation expression profiles. "Total" denotes evaluating all test perturbation sets. "1-gene" denotes evaluation on the single gene perturbation subset, where the perturbed target is not seen in the training set. "2-gene" represents the sub-test set for perturbing two genes simultaneously. "seen0", "seen1" and "seen2" denotes zero, one or two perturbed targets are not seen in the training set, respectively. The black line denotes a $95\%$ confidence interval. (B) ROC curve of different models on drug combination synergy prediction task. xTrimoGene denotes replacing the raw expression profile with context embeddings in the DeepDDS framework and others remain unchanged. Refer to App. 8.3 for more details.
194
+
195
+ ![](images/acaca924e006b86371eedf101958ae5b30795481779683b389b1006142623bfc.jpg)
196
+
197
+ # 5.4.3 Synergistic drug combinations prediction
198
+
199
+ The drug synergistic task evaluates how patients or cells respond to a drug combination intervention [25]. However, the generated wet-lab experimental data only covers a tiny search space of possible drug combinations. Multiple models have been proposed to accelerate predicting the synergistic landscape of drugs [28, 33]. For instance, DeepDDS integrates genomic expression profiles and drug chemical information, greatly improving the prediction performance. We further explored whether xTrimoGene is able to generate good latent embedding for this bulk expression data.
200
+
201
+ Similar to the perturbation prediction test, we adapted xTrimoGene to DeepDDS with the intermediate context embedding (see App. 8.3). We also included DeepSynergy and Random Forest for comparison. As illustrated in Figure 4B, utilizing embedding from the xTrimoGene model outperforms all the other models. The result proved that xTrimoGene can accurately capture cell-level representation, even for bulk sequencing data. This also opens the avenue for xTrimoGene to be applied across other biological modeling tasks, especially where bulk-level transcriptome data is available.
202
+
203
+ # 6 Conclusion
204
+
205
+ xTrimoGene is a new, efficient framework for learning scRNA-seq data. It proposes an asymmetric encoder-decoder framework that takes advantage of the sparse gene expression matrix and establishes the projection strategy of continuous values with a higher resolution. The results show that xTrimoGene is scalable and performs well on tasks like cell type annotation, perturbation response prediction, and synergistic drug combination prediction. The experiments demonstrate the efficacy of pre-training in single-cell biology. xTrimoGene is potentially adapted to other types of cell modeling analysis, including rare cell detection (App. 8.4), batch effect removal and regulatory network construction.
206
+
207
+ Certain limitations exist for xTrimoGene and further work is desired to advance the design. At present, xTrimoGene major utilizes gene expression values during the pre-training stage, overlooking varieties of other related meta-information like sample condition (health/disease), cell type, tissue type, sequencing platform, etc. These rich annotations are biologically meaningful and highly correlated with the expression pattern within a cell. The memory consumption for inference with the xTrimoGene-100M model is approximately 50GB, whose hardware requirement (Nvidia A100 80G GPU) is beyond some academic labs, thus computational or memory-efficient engineering techniques tend to advance the model pre-training and application.
208
+
209
+ xTrimoGene has been integrated into BioMap's single-cell analysis platform, functioning as a fundamental and essential model (as depicted in the App. Figure 9). The pre-trained model services have been publicly available. In the future, with the increase of data, larger pre-trained models are expected to drive more advancements in various downstream task learning.
210
+
211
+ # Author Contributions and Acknowledgments
212
+
213
+ Le Song led the project by designing its scope, conceptualizing ideas, integrating resources, and making decisions on techniques. Xingyi Cheng played a key role in the development of the xTrimoGene framework, auto-discretization and unsupervised objectives, contributing concrete ideas and pseudocode, along with code review. Jing Gong and Mingsheng Hao (Research Intern at BioMap) were primarily responsible for conducting pre-training and downstream experiments, serving as the first authors of the paper. Their work covered areas such as model scaling, cell type annotation, perturbation response prediction, and synergistic drug combinations prediction. Xin Zeng made significant contributions to the code of the xTrimoGene framework, worked with an early performer version, and conducted initial downstream experiments. Chiming Liu oversaw the engineering aspects of the project, including the implementation of the data pipeline and FLOPs computation. Jianzhu Ma, Xuegong Zhang, Taifeng Wang, and Le Song conceived the project and provided invaluable guidance for the project and contributed their expertise in computational biology knowledge. Taifeng Wang also played a pivotal role in pushing for the model's service implementation. Finally, Jing Gong, Mingsheng Hao, Xingyi Cheng, and Le Song collectively contributed to writing this paper.
214
+
215
+ In addition, we would like to express our gratitude to the individuals at BioMap who have made contributions to our project. Chenrui Xu and Yucheng Guo played roles in data preprocessing and integration. Zhaoren He's expertise in data analysis and application greatly enhanced our work, and we deeply appreciate his contributions.
216
+
217
+ This work was supported by the Ministry of Science and Technology of the People's Republic of China (2022YFF1203004), the Beijing Municipal Science & Technology Commission and the Administrative Commission of Zhongguancun Science Park(Z221100003522022). And this work was also funded by BioMap.
218
+
219
+ # References
220
+
221
+ [1] Nadav Brandes, Dan Ofer, Yam Peleg, Nadav Rappoport, and Michal Linial. ProteinBERT: a universal deep-learning model of protein sequence and function. Bioinformatics, 38(8):2102-2110, 02 2022.
222
+ [2] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.
223
+
224
+ [3] Geng Chen, Baitang Ning, and Tieliu Shi. Single-cell rna-seq technologies and related computational data analysis. Frontiers in genetics, page 317, 2019.
225
+ [4] Jiayang Chen, Zhihang Hu, Siqi Sun, Qingxiong Tan, Yixuan Wang, Qinze Yu, Licheng Zong, Liang Hong, Jin Xiao, Tao Shen, et al. Interpretablerna foundation model from unannotated data for highly accuraterna structure and function predictions.bioRxiv, pages 2022-08, 2022.
226
+ [5] Sijie Chen, Yanting Luo, Haoxiang Gao, Fanhong Li, Yixin Chen, Jiaqi Li, Renke You, Minsheng Hao, Haiyang Bian, Xi Xi, et al. heca: The cell-centric assembly of a cell atlas. Iscience, 25(5):104318, 2022.
227
+ [6] C. Dominguez Conde, C. Xu, L. B. Jarvis, D. B. Rainbow, S. B. Wells, T. Gomes, S. K. Howlett, O. Suchanek, K. Polanski, H. W. King, L. Mamanova, N. Huang, P. A. Szabo, L. Richardson, L. Bolt, E. S. Fasouli, K. T. Mahbubani, M. Prete, L. Tuck, N. Richoz, Z. K. Tuong, L. Campos, H. S. Mousa, E. J. Needham, S. Pritchard, T. Li, R. Elmentaite, J. Park, E. Rahmani, D. Chen, D. K. Menon, O. A. Bayraktar, L. K. James, K. B. Meyer, N. Yosef, M. R. Clatworthy, P. A. Sims, D. L. Farber, K. Saeb-Parsy, J. L. Jones, and S. A. Teichmann. Cross-tissue immune cell analysis reveals tissue-specific features in humans. Science, 376(6594):eabl5197, 2022.
228
+ [7] Jiarui Ding, Xian Adiconis, Sean K. Simmons, Monika S. Kowalczyk, Cynthia C. Hession, Nemanja D. Marjanovic, Travis K. Hughes, Marc H. Wadsworth, Tyler Burks, Lan T. Nguyen, John Y. H. Kwon, Boaz Barak, William Ge, Amanda J. Kedaigle, Shaina Carroll, Shuqiang Li, Nir Hacohen, Orit Rozenblatt-Rosen, Alex K. Shalek, Alexandra-Chloe Villani, Aviv Regev, and Joshua Z. Levin. Systematic comparison of single-cell and single-nucleus rna-sequencing methods. Nature Biotechnology, 38(6):737-746, Jun 2020.
229
+ [8] Atray Dixit, Oren Parnas, Biyu Li, Jenny Chen, Charles P Fulco, Livnat Jerby-Arnon, Nemanja D Marjanovic, Danielle Dionne, Tyler Burks, Raktima Raychowdhury, et al. Perturb-seq: dissecting molecular circuits with scalable single-cell RNA profiling of pooled genetic screens. cell, 167(7):1853-1866, 2016.
230
+ [9] Mario Flores, Zhentao Liu, Tinghe Zhang, Md Musaddaqui Hasib, Yu-Chiao Chiu, Zhenqing Ye, Karla Paniagua, Sumin Jo, Jianqiu Zhang, Shou-Jiang Gao, et al. Deep learning tackles single-cell analysis—a survey of deep learning for scrna-seq analysis. Briefings in bioinformatics, 23(1):bbab531, 2022.
231
+ [10] Yury Gorishniy, Ivan Rubachev, and Artem Babenko. On embeddings for numerical features in tabular deep learning. Advances in Neural Information Processing Systems, 35:24991-25004, 2022.
232
+ [11] Christopher Heje Gronbech, Maximillian Fornitz Vording, Pascal N Timshel, Casper Kaae Sønderby, Tune H Pers, and Ole Winther. scVAE: variational auto-encoders for single-cell gene expression data. Bioinformatics, 36(16):4415–4422, 05 2020.
233
+ [12] Xu Han, Zhengyan Zhang, Ning Ding, Yuxian Gu, Xiao Liu, Yuqi Huo, Jiezhong Qiu, Yuan Yao, Ao Zhang, Liang Zhang, Wentao Han, Minlie Huang, Qin Jin, Yanyan Lan, Yang Liu, Zhiyuan Liu, Zhiwu Lu, Xipeng Qiu, Ruihua Song, Jie Tang, Ji-Rong Wen, Jinhui Yuan, Wayne Xin Zhao, and Jun Zhu. Pre-trained models: Past, present and future. AI Open, 2:225-250, 2021.
234
+ [13] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022.
235
+ [14] Dan Hendrycks, Kimin Lee, and Mantas Mazeika. Using pre-training can improve model robustness and uncertainty. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 2712-2721. PMLR, 09-15 Jun 2019.
236
+ [15] Ruochen Jiang, Tianyi Sun, Dongyuan Song, and Jingyi Jessica Li. Statistics or biology: the zero-inflation controversy about scrna-seq data. Genome Biology, 23(1):31, Jan 2022.
237
+ [16] Dragomirka Jovic, Xue Liang, Hua Zeng, Lin Lin, Fengping Xu, and Yonglun Luo. Single-cell RNA sequencing technologies and applications: A brief overview. Clinical and Translational Medicine, 12(3):e694, 2022.
238
+
239
+ [17] John Jumper, Richard Evans, Alexander Pritzel, Tim Green, Michael Figurnov, Olaf Ronneberger, Kathryn Tunyasuvunakool, Russ Bates, Augustin Žídek, Anna Potapenko, et al. Highly accurate protein structure prediction with alphafold. Nature, 596(7873):583-589, 2021.
240
+ [18] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020.
241
+ [19] Mengwei Li, Xiaomeng Zhang, Kok Siong Ang, Jingjing Ling, Raman Sethi, Nicole Yee Shin Lee, Florent Ginghoux, and Jinmiao Chen. Disco: a database of deeply integrated human single-cell omics data. Nucleic acids research, 50(D1):D596-D602, 2022.
242
+ [20] Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Nikita Smetanin, Robert Verkuil, Ori Kabeli, Yaniv Shmueli, et al. Evolutionary-scale prediction of atomic-level protein structure with a language model. Science, 379(6637):1123-1130, 2023.
243
+ [21] Romain Lopez, Jeffrey Regier, Michael B Cole, Michael I Jordan, and Nir Yosef. Deep generative modeling for single-cell transcriptomics. Nature methods, 15(12):1053-1058, 2018.
244
+ [22] Mohammad Lotfollahi, Anna Klimovskaia Susmelj, Carlo De Donno, Yuge Ji, Ignacio L Ibarra, F Alexander Wolf, Nafissa Yakubova, Fabian J Theis, and David Lopez-Paz. Compositional perturbation autoencoder for single-cell response modeling. BioRxiv, 2021.
245
+ [23] Feiyang Ma and Matteo Pellegrini. Actinn: automated identification of cell types in single cell RNA sequencing. Bioinformatics, 36(2):533-538, 2020.
246
+ [24] Mohamed Marouf, Pierre Machart, Vikas Bansal, Christoph Kilian, Daniel S. Magruder, and Stefan Bonn Christian F. Krebs. Realistic in silico generation and augmentation of single-cellrna-seq data using generative adversarial networks. Nature Communications, 11(1)(166):786- 793, 2020.
247
+ [25] Reza Bayat Mokhtari, Tina S Homayouni, Narges Baluch, Evgeniya Morgatskaya, Sushil Kumar, Bikul Das, and Herman Yeger. Combination therapy in combating cancer. Oncotarget, 8(23):38022, 2017.
248
+ [26] Thomas M. Norman, Max A. Horlbeck, Joseph M. Repogle, Alex Y. Ge, Albert Xu, Marco Jost, Luke A. Gilbert, and Jonathan S. Weissman. Exploring genetic interaction manifolds constructed from rich single-cell phenotypes. Science, 365(6455):786-793, 2019.
249
+ [27] Ahmad Pesaranghader, Stan Matwin, Marina Sokolova, Jean-Christophe Grenier, Robert G Beiko, and Julie Hussin. deepSimDEF: deep neural embeddings of gene products and gene ontology terms for functional analysis of genes. Bioinformatics, 38(11):3051–3061, 05 2022.
250
+ [28] Kristina Preuer, Richard P I Lewis, Sepp Hochreiter, Andreas Bender, Krishna C Bulusu, and Günter Klambauer. DeepSynergy: predicting anti-cancer drug synergy with Deep Learning. Bioinformatics, 34(9):1538-1546, 12 2017.
251
+ [29] Xipeng Qiu, Tianxiang Sun, Yige Xu, Yunfan Shao, Ning Dai, and Xuanjing Huang. Pre-trained models for natural language processing: A survey. Science China Technological Sciences, 63(10):1872-1897, 2020.
252
+ [30] Yusuf Roohani, Kexin Huang, and Jure Leskovec. Gears: Predicting transcriptional outcomes of novel multi-gene perturbations. BioRxiv, pages 2022-07, 2022.
253
+ [31] Åsa Segerstolpe, Athanasia Palasantza, Pernilla Eliasson, Eva-Marie Andersson, Anne-Christine Andreasson, Xiaoyan Sun, Simone Picelli, Alan Sabirsh, Maryam Clausen, Magnus K Bjursell, David M Smith, Maria Kasper, Carina Åmmälä, and Rickard Sandberg. Single-cell transcriptome profiling of human pancreatic islets in health and type 2 diabetes. Cell Metab., 24(4):593-607, October 2016.
254
+ [32] Jingshu Wang, Divyansh Agarwal, Mo Huang, Gang Hu, Zilu Zhou, Chengzhong Ye, and Nancy R Zhang. Data denoising with transfer learning in single-cell transcriptomics. Nature methods, 16(9):875-878, 2019.
255
+
256
+ [33] Jinxian Wang, Xuejun Liu, Siyuan Shen, Lei Deng, and Hui Liu. Deepdds: deep graph neural network with attention mechanism to predict synergistic drug combinations. *Briefings in Bioinformatics*, 23(1):bbab390, 2022.
257
+ [34] F Alexander Wolf, Philipp Angerer, and Fabian J Theis. Scanpy: large-scale single-cell gene expression data analysis. Genome biology, 19:1-5, 2018.
258
+ [35] Yijia Xiao, Jiezhong Qiu, Ziang Li, Chang-Yu Hsieh, and Jie Tang. Modeling protein using large-scale pretrain language model. arXiv preprint arXiv:2108.07435, 2021.
259
+ [36] Fan Yang, Wenchuan Wang, Fang Wang, Yuan Fang, Duyu Tang, Junzhou Huang, Hui Lu, and Jianhua Yao. scbert as a large-scale pretrained deep language model for cell type annotation of single-cell rna-seq data. Nature Machine Intelligence, 4(10):852-866, 2022.
260
+ [37] Patrick Cahan Yuqi Tan. Singlecellnet: A computational tool to classify single cell rna-seq data across platforms and across species. Cell Systems, 19(2):207-213, 2019.
261
+ [38] Ningyu Zhang, Zhen Bi, Xiaozhuan Liang, Siyuan Cheng, Haosen Hong, Shumin Deng, Jiazhang Lian, Qiang Zhang, and Huajun Chen. Ontoprotein: Protein pretraining with gene ontology embedding. arXiv preprint arXiv:2201.11147, 2022.
262
+ [39] Grace XY Zheng, Jessica M Terry, Phillip Belgrader, Paul Ryvkin, Zachary W Bent, Ryan Wilson, Solongo B Ziraldo, Tobias D Wheeler, Geoff P McDermott, Junjie Zhu, et al. Massively parallel digital transcriptional profiling of single cells. Nature communications, 8(1):14049, 2017.
xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5c8f73fd854438e1906f548e008cd73fb517ed7d7d0caebd735cc7339922d9b
3
+ size 305540
xtrimogeneanefficientandscalablerepresentationlearnerforsinglecellrnaseqdata/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:148094e26b0d7ca3ad7a5b854381e60c5d9564357da00a60ca7cdce8bfdc3a55
3
+ size 318748
youonlycondenseoncetworulesforpruningcondenseddatasets/dbfde00c-7c4b-48aa-b68d-8b0f07b73866_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a26d64dad5fdd51c595dc15c62c6df8905065a78c29a9d45702ff154c097d3c0
3
+ size 87407
youonlycondenseoncetworulesforpruningcondenseddatasets/dbfde00c-7c4b-48aa-b68d-8b0f07b73866_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9c7f77f440defa17742ea4c5dfe6ffcbc012fb1f5d0bcc855aaf606f75799e3
3
+ size 108620
youonlycondenseoncetworulesforpruningcondenseddatasets/dbfde00c-7c4b-48aa-b68d-8b0f07b73866_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0914320602b6aebe6d7cda87d51b70aebe8463426627bc5a18b42accfbb2d6f8
3
+ size 2498292
youonlycondenseoncetworulesforpruningcondenseddatasets/full.md ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # You Only Condense Once: Two Rules for Pruning Condensed Datasets
2
+
3
+ Yang He, Lingao Xiao, Joey Tianyi Zhou
4
+
5
+ CFAR, Agency for Science, Technology and Research, Singapore
6
+
7
+ IHPC, Agency for Science, Technology and Research, Singapore
8
+
9
+ {He_Yang, Joey_Zhou}@cfar.a-star.edu.sg
10
+
11
+ # Abstract
12
+
13
+ Dataset condensation is a crucial tool for enhancing training efficiency by reducing the size of the training dataset, particularly in on-device scenarios. However, these scenarios have two significant challenges: 1) the varying computational resources available on the devices require a dataset size different from the pre-defined condensed dataset, and 2) the limited computational resources often preclude the possibility of conducting additional condensation processes. We introduce You Only Condense Once (YOCO) to overcome these limitations. On top of one condensed dataset, YOCO produces smaller condensed datasets with two embarrassingly simple dataset pruning rules: Low LBPE Score and Balanced Construction. YOCO offers two key advantages: 1) it can flexibly resize the dataset to fit varying computational constraints, and 2) it eliminates the need for extra condensation processes, which can be computationally prohibitive. Experiments validate our findings on networks including ConvNet, ResNet and DenseNet, and datasets including CIFAR-10, CIFAR-100 and ImageNet. For example, our YOCO surpassed various dataset condensation and dataset pruning methods on CIFAR-10 with ten Images Per Class (IPC), achieving $6.98 - 8.89\%$ and $6.31 - 23.92\%$ accuracy gains, respectively. The code is available at: https://github.com/he-y/you-only-condense-once.
14
+
15
+ # 1 Introduction
16
+
17
+ Deep learning models often require vast amounts of data to achieve optimal performance. This data-hungry nature of deep learning algorithms, coupled with the growing size and complexity of datasets, has led to the need for more efficient dataset handling techniques. Dataset condensation is a promising approach that enables models to learn from a smaller and more representative subset of the entire dataset. Condensed datasets are especially utilized in on-device scenarios, where limited computational resources and storage constraints necessitate the use of a compact training set.
18
+
19
+ However, these on-device scenarios have two significant constraints. First, the diverse and fluctuating computational resources inherent in these scenarios necessitate a level of flexibility in the size of the dataset. But the requirement of flexibility is not accommodated by the fixed sizes of previous condensed datasets. Second, the limited computational capacity in these devices also makes extra condensation processes impractical, if not impossible. Therefore, the need for adaptability in the size of the condensed dataset becomes increasingly crucial. Furthermore, this adaptability needs to be realized without introducing another computationally intensive condensation process.
20
+
21
+ We introduce You Only Condense Once (YOCO) to enable the flexible resizing (pruning) of condensed datasets to fit varying on-device scenarios without extra condensation process (See Fig. 1). The first rule of our proposed method involves a metric to evaluate the importance of training samples in the context of dataset condensation.
22
+
23
+ From the gradient of the loss function, we develop the Logit-Based Prediction Error (LBPE) score to rank training samples. This metric quantifies the neural network's difficulty in recognizing each sample. Specifically, training samples with low LBPE scores are considered easy as they indicate that the model's prediction is close to the true label. These samples exhibit simpler patterns, easily captured by the model. Given the condensed datasets' small size, prioritizing easier samples with low LBPE scores is crucial to avoid overfitting.
24
+
25
+ A further concern is that relying solely on a metric-based ranking could result in an imbalanced distribution of classes within the dataset. Imbalanced
26
+
27
+ ![](images/0d4a3b72f83dcab9e78c42c2f595f5038f21ac0d2847027897a65f49bab5d40c.jpg)
28
+ Figure 1: Previous methods (left) require extra condensation processes, but ours (right) do not.
29
+
30
+ datasets can lead to biased predictions, as models tend to focus on the majority class, ignoring the underrepresented minority classes. This issue has not been given adequate attention in prior research about dataset pruning [3, 41, 34], but it is particularly important when dealing with condensed datasets. In order to delve deeper into the effects of class imbalance, we explore Rademacher Complexity [31], a widely recognized metric for model complexity that is intimately connected to generalization error and expected loss. Based on the analysis, we propose Balanced Construction to ensure that the condensed dataset is both informative and balanced.
31
+
32
+ The key contributions of our work are: 1) To the best of our knowledge, it's the first work to provide a solution to adaptively adjust the size of a condensed dataset to fit varying computational constraints. 2) After analyzing the gradient of the loss function, we propose the LBPE score to evaluate the sample importance and find out easy samples with low LBPE scores are suitable for condensed datasets. 3) Our analysis of the Rademacher Complexity highlights the challenges of class imbalance in condensed datasets, leading us to construct balanced datasets.
33
+
34
+ # 2 Related Works
35
+
36
+ # 2.1 Dataset Condensation/Distillation
37
+
38
+ Dataset condensation, or distillation, synthesizes a compact image set to maintain the original dataset's information. Wang et al. [45] pioneer an approach that leverages gradient-based hyperparameter optimization to model network parameters as a function of this synthetic data. Building on this, Zhao et al. [53] introduce gradient matching between real and synthetic image-trained models. Kim et al. [19] further extend this, splitting synthetic data into $n$ factor segments, each decoding into $n^2$ training images. Zhao & Bilen [50] apply consistent differentiable augmentation to real and synthetic data, thus enhancing information distillation. Cazenavette et al. [1] propose to emulate the long-range training dynamics of real data by aligning learning trajectories. Liu et al. [24] advocate matching only representative real dataset images, selected based on latent space cluster centroid distances. Additional research avenues include integrating a contrastive signal [21], matching distribution or features [52, 44], matching multi-level gradients [16], minimizing accumulated trajectory error [7], aligning loss curvature [39], parameterizing datasets [6, 23, 51, 43], and optimizing dataset condensation [27, 32, 33, 42, 55, 25, 49, 4, 26]. Nevertheless, the fixed size of condensed datasets remains an unaddressed constraint in prior work.
39
+
40
+ # 2.2 Dataset Pruning
41
+
42
+ Unlike dataset condensation that alters image pixels, dataset pruning preserves the original data by selecting a representative subset. Entropy [3] keeps hard samples with maximum entropy (uncertainty [22, 38]), using a smaller proxy network. Forgetting [41] defines forgetting events as an accuracy drop at consecutive epochs, and hard samples with the most forgetting events are important. AUM [35] identifies data by computing the Area Under the Margin, the difference between the true label logits and the largest other logits. Memorization [11] prioritizes a sample if it significantly improves the probability of correctly predicting the true label. GraNd [34] and EL2N [34] classify samples as hard based on the presence of large gradient norm and large norm of error vectors, respectively. CCS [54] extends previous methods by pruning hard samples and using stratified sampling to
43
+
44
+ achieve good coverage of data distributions at a large pruning ratio. Moderate [47] selects moderate samples (neither hard nor easy) that are close to the score median in the feature space. Optimization-based [48] chooses samples yielding a strictly constrained generalization gap. In addition, other dataset pruning (or coreset selection) methods [8, 28, 36, 17, 30, 18, 2, 29, 9, 10, 46, 15] are widely used for Active Learning [38, 37]. However, the previous methods consider full datasets and often neglect condensed datasets.
45
+
46
+ # 3 Method
47
+
48
+ # 3.1 Preliminaries
49
+
50
+ We denote a dataset by $S = (x_{i},y_{i})_{i = 1}^{N}$ , where $x_{i}$ represents the $i^{th}$ input and $y_{i}$ represents the corresponding true label. Let $\mathcal{L}(p(\mathbf{w},x),y)$ be a loss function that measures the discrepancy between the predicted output $p(\mathbf{w},x)$ and the true label $y$ . The loss function is parameterized by a weight vector $\mathbf{w}$ , which we optimize during the training process.
51
+
52
+ We consider a time-indexed sequence of weight vectors, $\mathbf{w}_t$ , where $t = 1,\dots ,T$ . This sequence represents the evolution of the weights during the training process. The gradient of the loss function with respect to the weights at time $t$ is given by $g_{t}(x,y) = \nabla_{\mathbf{w}_{t}}\mathcal{L}(p(\mathbf{w}_{t},x),y)$ .
53
+
54
+ # 3.2 Identifying Important Training Samples
55
+
56
+ Our goal is to propose a measure that quantifies the importance of a training sample. We begin by analyzing the gradient of the loss function with respect to the weights $\mathbf{w}_t$ :
57
+
58
+ $$
59
+ \nabla_ {\mathbf {w} _ {t}} \mathcal {L} (p (\mathbf {w} _ {t}, x), y) = \frac {\partial \mathcal {L} (p (\mathbf {w} _ {t} , x) , y)}{\partial p (\mathbf {w} _ {t} , x)} \cdot \frac {\partial p (\mathbf {w} _ {t} , x)}{\partial \mathbf {w} _ {t}}. \tag {1}
60
+ $$
61
+
62
+ We aim to determine the impact of training samples on the gradient of the loss function, as the gradient plays a critical role in the training process of gradient-based optimization methods. Note that our ranking method is inspired by EL2N [34], but we interpret it in a different way by explicitly considering the dataset size $|S|$ .
63
+
64
+ Definition 1 (Logit-Based Prediction Error - LBPE): The Logit-Based Prediction Error (LBPE) of a training sample $(x,y)$ at time $t$ is given by:
65
+
66
+ $$
67
+ \mathrm {L B P E} _ {t} (x, y) = \mathbb {E} | p (\mathbf {w} _ {t}, x) - y | _ {2}, \tag {2}
68
+ $$
69
+
70
+ where $\mathbf{w}_t$ is the weights at time $t$ , and $p(\mathbf{w}_t, x)$ represents the prediction logits.
71
+
72
+ Lemma 1 (Gradient and Importance of Training Samples): The gradient of the loss function $\nabla_{\mathbf{w}_t}\mathcal{L}(p(\mathbf{w}_t,x),y)$ for a dataset $S$ is influenced by the samples with prediction errors.
73
+
74
+ Proof of Lemma 1: Consider two datasets $S$ and $S_{\neg j}$ , where $S_{\neg j}$ is obtained by removing the sample $(x_j, y_j)$ from $S$ . Let the gradients of the loss function for these two datasets be $\nabla_{\mathbf{w}_t}^S \mathcal{L}$ and $\nabla_{\mathbf{w}_t}^{S \neg j} \mathcal{L}$ , respectively. The difference between the gradients is given by (see Appendix A.1 for proof):
75
+
76
+ $$
77
+ \Delta \nabla_ {\mathbf {w} _ {t}} \mathcal {L} = \frac {- 1}{| S | (| S | - 1)} \sum_ {(x, y) \in S _ {\neg j}} \frac {\partial \mathcal {L} (p \left(\mathbf {w} _ {t} , x\right) , y)}{\partial p \left(\mathbf {w} _ {t} , x\right)} \cdot \frac {\partial p \left(\mathbf {w} _ {t} , x\right)}{\partial \mathbf {w} _ {t}} + \frac {1}{| S |} \frac {\partial \mathcal {L} (p \left(\mathbf {w} _ {t} , x _ {j}\right) , y _ {j})}{\partial p \left(\mathbf {w} _ {t} , x _ {j}\right)} \cdot \frac {\partial p \left(\mathbf {w} _ {t} , x _ {j}\right)}{\partial \mathbf {w} _ {t}} \tag {3}
78
+ $$
79
+
80
+ Let us denote the error term as: $e_j = p(\mathbf{w}_t, x_j) - y_j$ , the LBPE score for sample $(x_j, y_j)$ is given by $\mathrm{LBPE}_t(x_j, y_j) = \mathbb{E}|e_j|_2$ , and the difference in gradients related to the sample $(x_j, y_j)$ can be rewritten as:
81
+
82
+ $$
83
+ \frac {1}{| S |} \frac {\partial \mathcal {L} \left(e _ {j}\right)}{\partial e _ {j}} \cdot \frac {\partial p \left(\mathbf {w} _ {t} , x _ {j}\right)}{\partial \mathbf {w} _ {t}}. \tag {4}
84
+ $$
85
+
86
+ If the sample $(x_{j},y_{j})$ has a lower LBPE score, it implies that the error term $e_j$ is smaller. Let's consider the mean squared error (MSE) loss function, which is convex. The MSE loss function is
87
+
88
+ Algorithm 1 Compute LBPE score for samples over epochs
89
+ Require: Training dataset $S$ and its size $|S|$ , weights $\mathbf{w}_t$ , true labels $y$ , model's predicted probabilities $p(\mathbf{w}_t, x)$ , number of epochs $E$ , Epochs with Top-K accuracy
90
+ 1: Initialize matrix: LBPE = torch.zeros((E, |S|)) ▷ LBPE scores over samples and epochs
91
+ 2: Initialize accuracy: ACC = torch.zeros(E) ▷ Track the accuracy over epochs
92
+ 3: for each epoch $t$ in range $E$ do ▷ Loop through each epoch
93
+ 4: for each sample index $i$ in $S$ do ▷ Loop through each sample in the dataset
94
+ 5: Compute error term for sample $i$ at epoch $t$ : $e_{i,t} = p(\mathbf{w}_t, x_i) - y_i$
95
+ 6: Compute LBPE score for sample $i$ at epoch $t$ with MSE loss: $\mathrm{LBPE}_{i,t} = \mathbb{E}_{MSE}|e_{i,t}|_2$
96
+ 7: end for
97
+ 8: Compute accuracy at epoch $t$ : $\mathrm{ACC}_t$
98
+ 9: end for
99
+ 10: Top_K ← argsort(ACC)[-k:] ▷ Find the epochs with the Top-K accuracy
100
+ 11: AVG_LBPE ← mean(LBPE[Top_k,:]) ▷ Average LBPE score over Top-K epochs
101
+ 12: return AVG_LBPE
102
+
103
+ defined as $\mathcal{L}(e_j) = \frac{1}{2} (e_j)^2$ . Consequently, the derivative of the loss function $\frac{\partial\mathcal{L}(e_j)}{\partial e_j} = e_j$ would be smaller for samples with smaller LBPE scores, leading to a smaller change in the gradient $\Delta \nabla_{\mathbf{w}_t}\mathcal{L}$ .
104
+
105
+ Rule 1: For a small dataset, a sample with a lower LBPE score will be more important. Let $S$ be a dataset of size $|S|$ , partitioned into subsets $S_{easy}$ (lower LBPE scores) and $S_{hard}$ (higher LBPE scores).
106
+
107
+ Case 1: Small Dataset - When the dataset size $|S|$ is small, the model's capacity to learn complex representations is limited. Samples in $S_{easy}$ represent prevalent patterns in the data, and focusing on learning from them leads to a lower average expected loss. This enables the model to effectively capture the dominant patterns within the limited dataset size. Moreover, the gradients of the loss function for samples in $S_{easy}$ are smaller, leading to faster convergence and improved model performance within a limited number of training iterations.
108
+
109
+ Case 2: Large Dataset - When the dataset size $|S|$ is large, the model has the capacity to learn complex representations, allowing it to generalize well to both easy and hard samples. As the model learns from samples in both $S_{easy}$ and $S_{hard}$ , its overall performance improves, and it achieves higher accuracy on hard samples. Training on samples in $S_{hard}$ helps the model learn more discriminative features, as they often lie close to the decision boundary.
110
+
111
+ Therefore, in the case of a small dataset, samples with lower LBPE scores are more important.
112
+
113
+ The use of the LBPE importance metric is outlined in Algorithm 1. LBPE scores over the epochs with the Top-K training accuracy are averaged. The output of this algorithm is the average LBPE score.
114
+
115
+ # 3.3 Balanced Construction
116
+
117
+ In this section, we prove that a more balanced class distribution yields a lower expected loss.
118
+
119
+ Definition 2.1 (Dataset Selection $S_A$ and $S_B$ ): $S_A$ is to select images from each class based on their LBPE scores such that the selection is balanced across classes, and $S_B$ is to select images purely based on their LBPE scores without considering the class balance. Formally, we have:
120
+
121
+ $$
122
+ S _ {A} = \left(x _ {i}, y _ {i}\right): x _ {i} \in \mathcal {X} _ {k}, \text {a n d} \mathrm {L B P E} _ {t} \left(x _ {i}, y _ {i}\right) \leq \tau_ {k}, \quad S _ {B} = \left(x _ {i}, y _ {i}\right): \mathrm {L B P E} _ {t} \left(x _ {i}, y _ {i}\right) \leq \tau \tag {5}
123
+ $$
124
+
125
+ where $\mathcal{X}_k$ denotes the set of images from class $k$ , and $\tau_k$ is a threshold for class $k$ , $\tau$ is a global threshold. Then $S_A$ is a more balanced dataset compared to $S_B$ .
126
+
127
+ Definition 2.2 (Generalization Error): The generalization error of a model is the difference between the expected loss on the training dataset and the expected loss on an unseen test dataset:
128
+
129
+ $$
130
+ \operatorname {G e n E r r} (\mathbf {w}) = \mathbb {E} \left[ L _ {\text {t e s t}} (\mathbf {w}) \right] - \mathbb {E} \left[ L _ {\text {t r a i n}} (\mathbf {w}) \right]. \tag {6}
131
+ $$
132
+
133
+ Definition 2.3 (Rademacher Complexity): The Rademacher complexity [31] of a hypothesis class $\mathcal{H}$ for a dataset $S$ of size $N$ is defined as:
134
+
135
+ $$
136
+ \mathcal {R} _ {N} (\mathcal {H}) = \underset {\sigma} {\mathbb {E}} \left[ \sup _ {h \in \mathcal {H}} \frac {1}{N} \sum_ {i = 1} ^ {N} \sigma_ {i} h \left(\mathbf {x} _ {i}\right) \right], \tag {7}
137
+ $$
138
+
139
+ # Algorithm 2 Balanced Dataset Construction
140
+
141
+ Require: Condensed dataset $S = (x_{i},y_{i})_{i = 1}^{m}$ with classes K, LBPE scores LBPE, class-specific thresholds $\pmb{\tau} = \tau_{k}\pmb{\tau}_{k = 1}^{K}$ to ensure an equal number of samples for each class
142
+
143
+ 1: Initialize $S_A = \emptyset$ ▷ Initialize the balanced subset as an empty set
144
+ 2: for each class $k \in \mathbf{K}$ do
145
+ 3: $I_{\text{sel}} \gets \{i : y_i = k \text{ and } \mathbf{LBP} \mathbf{E}_t(x_i, y_i) \leq \tau_k\}$ ▷ Find indices of samples for class $k$
146
+ 4: $S_A\gets S_A\cup (x_i,y_i):i\in I_{sel}$ Add the selected samples to the balanced subset
147
+ 5: end for
148
+ 6: return $S_A$ $\triangleright$ Return the balanced subset
149
+
150
+ where $\sigma_{i}$ are independent Rademacher random variables taking values in $-1, 1$ with equal probability.
151
+
152
+ Lemma 2.1 (Generalization Error Bound): With a high probability, the generalization error is upper-bounded by the Rademacher complexity of the hypothesis class:
153
+
154
+ $$
155
+ \operatorname {G e n E r r} (\mathbf {w}) \leq 2 \mathcal {R} _ {N} (\mathcal {H}) + \mathcal {O} \left(\frac {1}{\sqrt {N}}\right), \tag {8}
156
+ $$
157
+
158
+ where $\mathcal{O}$ represents the order of the term.
159
+
160
+ Lemma 2.2 (Rademacher Complexity Comparison): The Rademacher complexity of dataset $S_A$ is less than that of dataset $S_B$ :
161
+
162
+ $$
163
+ \mathcal {R} _ {N _ {A}} (\mathcal {H}) \leq \mathcal {R} _ {N _ {B}} (\mathcal {H}). \tag {9}
164
+ $$
165
+
166
+ Theorem 2.1: The expected loss for the dataset $S_A$ is less than or equal to $S_B$ when both models achieve similar performance on their respective training sets.
167
+
168
+ Proof of Theorem 2.1: Using Lemma 2.1 and Lemma 2.2, we have:
169
+
170
+ $$
171
+ \operatorname {G e n E r r} \left(\mathbf {w} _ {A}\right) \leq \operatorname {G e n E r r} \left(\mathbf {w} _ {B}\right). \tag {10}
172
+ $$
173
+
174
+ Assuming that both models achieve similar performance on their respective training sets, the training losses are approximately equal:
175
+
176
+ $$
177
+ \mathbb {E} \left[ L _ {\text {t r a i n}} \left(\mathbf {w} _ {A}\right) \right] \approx \mathbb {E} \left[ L _ {\text {t r a i n}} \left(\mathbf {w} _ {B}\right) \right]. \tag {11}
178
+ $$
179
+
180
+ Given this assumption, we can rewrite the generalization error inequality as:
181
+
182
+ $$
183
+ \mathbb {E} \left[ L _ {\text {t e s t}} (\mathbf {w} _ {A}) \right] - \mathbb {E} \left[ L _ {\text {t r a i n}} (\mathbf {w} _ {A}) \right] \leq \mathbb {E} \left[ L _ {\text {t e s t}} (\mathbf {w} _ {B}) \right] - \mathbb {E} \left[ L _ {\text {t r a i n}} (\mathbf {w} _ {B}) \right]. \tag {12}
184
+ $$
185
+
186
+ Adding $\mathbb{E}[L_{\mathrm{train}}(\mathbf{w}_A)]$ to both sides, we get:
187
+
188
+ $$
189
+ \mathbb {E} \left[ L _ {\text {t e s t}} \left(\mathbf {w} _ {A}\right) \right] \leq \mathbb {E} \left[ L _ {\text {t e s t}} \left(\mathbf {w} _ {B}\right) \right]. \tag {13}
190
+ $$
191
+
192
+ This result indicates that the balanced dataset $S_A$ is better than $S_B$ .
193
+
194
+ Theorem 2.2: Let $S_F$ and $S_C$ be the full and condensed datasets, respectively, and let both $S_F$ and $S_C$ have an imbalanced class distribution with the same degree of imbalance. Then, the influence of the imbalanced class distribution on the expected loss is larger for the condensed dataset $S_C$ than for the full dataset $S_F$ .
195
+
196
+ Proof of Theorem 2.2: We compare the expected loss for the full and condensed datasets, taking into account their class imbalances. Let $L(h)$ denote the loss function for the hypothesis $h$ . Let $\mathbb{E}[L(h)|S]$ denote the expected loss for the hypothesis $h$ on the dataset $S$ . Let $n_{kF}$ and $n_{kC}$ denote the number of samples in class $k$ for datasets $S_F$ and $S_C$ , respectively. Let $m_F$ and $m_C$ denote the total number of samples in datasets $S_F$ and $S_C$ , respectively. Let $r_k = \frac{n_{kF}}{m_F} = \frac{n_{kC}}{m_C}$ be the class ratio for each class $k$ in both datasets. The expected loss for $S_F$ and $S_C$ can be written as:
197
+
198
+ $$
199
+ \mathbb {E} [ L (h) | S _ {F} ] = \sum_ {k = 1} ^ {K} r _ {k} \mathbb {E} [ l (h (x), y) | \mathcal {X} _ {k} ], \quad \mathbb {E} [ L (h) | S _ {C} ] = \sum_ {k = 1} ^ {K} r _ {k} \mathbb {E} [ l (h (x), y) | \mathcal {X} _ {k} ], \tag {14}
200
+ $$
201
+
202
+ To show this, let's compare the expected loss per sample in each dataset:
203
+
204
+ $$
205
+ \frac {\mathbb {E} [ L (h) \mid S _ {C} ]}{m _ {C}} > \frac {\mathbb {E} [ L (h) \mid S _ {F} ]}{m _ {F}}. \tag {15}
206
+ $$
207
+
208
+ This implies that the influence of the imbalanced class distribution is larger for $S_{C}$ than for $S_{F}$ .
209
+
210
+ Rule 2: Balanced class distribution should be utilized for the condensed dataset. The construction of a balanced class distribution based on LBPE scores is outlined in Algorithm 2. Its objective is to create an equal number of samples for each class to ensure a balanced dataset.
211
+
212
+ # 4 Experiments
213
+
214
+ # 4.1 Experiment Settings
215
+
216
+ IPC stands for "Images Per Class". $\mathrm{IPC}_{\mathbf{F}\rightarrow \mathbf{T}}$ means flexibly resize the condensed dataset from size $\mathbf{F}$ to size T. More detailed settings can be found in Appendix B.1.
217
+
218
+ Dataset Condensation Settings. The CIFAR-10 and CIFAR-100 datasets [20] are condensed via ConvNet-D3 [12], and ImageNet-10 [5] via ResNet10-AP [13], both following IDC [19]. IPC includes 10, 20, or 50, depending on the experiment. For both networks, the learning rate is 0.01 with 0.9 momentum and 0.0005 weight decay. The SGD optimizer and a multi-step learning rate scheduler are used. The training batch size is 64, and the network is trained for $2000 \times 100$ epochs for CIFAR-10/CIFAR-100 and $500 \times 100$ epochs for ImageNet-10.
219
+
220
+ YOCO Settings. 1) LBPE score selection. To reduce computational costs, we derive the LBPE score from training dynamics of early $E$ epochs. To reduce variance, we use the LBPE score from the top- $K$ training epochs with the highest accuracy. For CIFAR-10, we set $E = 100$ and $K = 10$ for all the $\mathrm{IPC}_{\mathbf{F}}$ and $\mathrm{IPC}_{\mathbf{T}}$ . For CIFAR-100 and ImageNet-10, we set $E = 200$ and $K = 10$ for all the $\mathrm{IPC}_{\mathbf{F}}$ and $\mathrm{IPC}_{\mathbf{T}}$ . 2) Balanced construction. We use $S_{A}$ in Eq. 5 to achieve a balanced construction. Following IDC [19], we leverage a multi-formation framework to increase the synthetic data quantity while preserving the storage budget. Specifically, an IDC-condensed image is composed of $n^2$ patches. Each patch is derived from one original image with the resolution scaled down by a factor of $1/n^2$ . Here, $n$ is referred to as the "factor" in the multi-formation process. For CIFAR-10 and CIFAR-100 datasets, $n = 2$ ; for ImageNet-10 dataset, $n = 3$ . We create balanced classes according to these patches. As a result, all the classes have the same number of samples. 3) Flexible resizing. For datasets with $\mathrm{IPC}_{\mathbf{F}} = 10$ and $\mathrm{IPC}_{\mathbf{F}} = 20$ , we select $\mathrm{IPC}_{\mathbf{T}}$ of 1, 2, and 5. For $\mathrm{IPC}_{\mathbf{F}} = 50$ , we select $\mathrm{IPC}_{\mathbf{T}}$ of 1, 2, 5, and 10. For a condensed dataset with $\mathrm{IPC}_{\mathbf{F}}$ , the performance of its flexible resizing is indicated by the average accuracy across different $\mathrm{IPC}_{\mathbf{T}}$ values.
221
+
222
+ Comparison Baselines. We have two sets of baselines for comparison: 1) dataset condensation methods including IDC[19], DREAM[24], MTT [1], DSA [50] and KIP [32] and 2) dataset pruning methods including SSP [40], Entropy [3], AUM [35], Forgetting [41], EL2N [34], and CCS [54]. For dataset condensation methods, we use a random subset as the baseline. For dataset pruning methods, their specific metrics are used to rank and prune datasets to the required size.
223
+
224
+ # 4.2 Primary Results
225
+
226
+ Tab. 1 provides a comprehensive comparison of different methods for flexibly resizing datasets from an initial $\mathrm{IPC}_{\mathbf{F}}$ to a target $\mathrm{IPC}_{\mathbf{T}}$ . In this table, we have not included ImageNet results on DREAM [24] since it only reports on Tiny ImageNet with a resolution of $64\times 64$ , in contrast to ImageNet's $224\times 224$ . The third column of the table shows the accuracy of the condensed dataset at the parameter $\mathrm{IPC}_{\mathbf{F}}$ . We then flexibly resize the dataset from $\mathrm{IPC}_{\mathbf{F}}$ to $\mathrm{IPC}_{\mathbf{T}}$ . The blue area represents the average accuracy across different $\mathrm{IPC}_{\mathbf{T}}$ values. For instance, consider the CIFAR-10 dataset with $\mathrm{IPC}_{\mathbf{F}} = 10$ . Resizing it to $\mathrm{IPC}_{\mathbf{F}} = 1, 2$ , and 5 using our method yields accuracies of $42.28\%$ , $46.67\%$ , and $55.96\%$ , respectively. The average accuracy of these three values is $48.30\%$ . This value surpasses the $37.08\%$ accuracy of SSP [40] by a considerable margin of $11.22\%$ .
227
+
228
+ Ablation Study. Tab. 2 shows the ablation study of the LBPE score and the balanced construction across dataset condensation methods. In the first row, the baseline results are shown where neither the LBPE score nor the balanced construction is applied. "Balanced only" (second row) indicates the selection method is random selection and the class distribution is balanced. "LBPE only" (third row) means the
229
+
230
+ Table 2: Ablation study on two rules. (CIFAR-10: $\mathrm{IPC}_{10\rightarrow 1}$ )
231
+
232
+ <table><tr><td>LBPE</td><td>Balanced</td><td>IDC [19]</td><td>DREAM [24]</td><td>MTT [1]</td><td>KIP [32]</td></tr><tr><td>-</td><td>-</td><td>28.23</td><td>30.87</td><td>19.75</td><td>14.06</td></tr><tr><td>-</td><td>✓</td><td>30.19</td><td>32.83</td><td>19.09</td><td>16.27</td></tr><tr><td>✓</td><td>-</td><td>39.38</td><td>37.30</td><td>20.37</td><td>15.78</td></tr><tr><td>✓</td><td>✓</td><td>42.28</td><td>42.29</td><td>22.02</td><td>22.24</td></tr></table>
233
+
234
+ Table 1: IPC means “images per class”. Flexibly resize dataset from $\mathrm{IPC}_{\mathbf{F}}$ to $\mathrm{IPC}_{\mathbf{T}}$ ( $\mathrm{IPC}_{\mathbf{F} \rightarrow \mathbf{T}}$ ). The blue areas represent the average accuracy of listed $\mathrm{IPC}_{\mathbf{T}}$ datasets for different values of $\mathbf{T}$ . The gray areas indicate the accuracy difference between the corresponding methods and ours.
235
+
236
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">\( {\mathrm{{IPC}}}_{\mathrm{F}} \)</td><td rowspan="2">Acc.</td><td rowspan="2">\( {\mathrm{{IPC}}}_{\mathrm{T}} \)</td><td colspan="2">Condensation</td><td colspan="7">Pruning Method</td></tr><tr><td>IDC[19]</td><td>DREAM[24]</td><td>SSP[40]</td><td>Entropy[3]</td><td>AUM[35]</td><td>Forg.[41]</td><td>EL2N[34]</td><td>CCS[54]</td><td>Ours</td></tr><tr><td rowspan="11">CIFAR-10</td><td rowspan="5">10</td><td rowspan="5">67.50</td><td>1</td><td>28.23</td><td>30.87</td><td>27.83</td><td>30.30</td><td>13.30</td><td>16.68</td><td>16.95</td><td>33.54</td><td>42.28</td></tr><tr><td>2</td><td>37.10</td><td>38.88</td><td>34.95</td><td>38.88</td><td>18.44</td><td>22.13</td><td>23.26</td><td>39.20</td><td>46.67</td></tr><tr><td>5</td><td>52.92</td><td>54.23</td><td>48.47</td><td>52.85</td><td>41.40</td><td>45.49</td><td>46.58</td><td>53.23</td><td>55.96</td></tr><tr><td>Avg.</td><td>39.42</td><td>41.33</td><td>37.08</td><td>40.68</td><td>24.38</td><td>28.10</td><td>28.93</td><td>41.99</td><td>48.30</td></tr><tr><td>Diff.</td><td>-8.89</td><td>-6.98</td><td>-11.22</td><td>-7.63</td><td>-23.92</td><td>-20.20</td><td>-19.37</td><td>-6.31</td><td>-</td></tr><tr><td rowspan="6">50</td><td rowspan="6">74.50</td><td>1</td><td>29.45</td><td>27.61</td><td>28.99</td><td>17.95</td><td>7.21</td><td>12.23</td><td>7.95</td><td>31.28</td><td>38.77</td></tr><tr><td>2</td><td>34.27</td><td>36.11</td><td>34.51</td><td>24.46</td><td>8.67</td><td>12.17</td><td>9.47</td><td>38.71</td><td>44.54</td></tr><tr><td>5</td><td>45.85</td><td>48.28</td><td>46.38</td><td>34.12</td><td>12.85</td><td>15.55</td><td>16.03</td><td>48.19</td><td>53.04</td></tr><tr><td>10</td><td>57.71</td><td>59.11</td><td>56.81</td><td>47.61</td><td>22.92</td><td>27.01</td><td>31.33</td><td>56.80</td><td>61.10</td></tr><tr><td>Avg.</td><td>41.82</td><td>42.78</td><td>41.67</td><td>31.04</td><td>12.91</td><td>16.74</td><td>16.20</td><td>43.75</td><td>49.36</td></tr><tr><td>Diff.</td><td>-7.54</td><td>-6.58</td><td>-7.69</td><td>-18.33</td><td>-36.45</td><td>-32.62</td><td>-33.17</td><td>-5.62</td><td>-</td></tr><tr><td rowspan="16">CIFAR-100</td><td rowspan="5">10</td><td rowspan="5">45.40</td><td>1</td><td>14.78</td><td>15.05</td><td>14.94</td><td>11.28</td><td>3.64</td><td>6.45</td><td>5.12</td><td>18.97</td><td>22.57</td></tr><tr><td>2</td><td>22.49</td><td>21.78</td><td>20.65</td><td>16.78</td><td>5.93</td><td>10.03</td><td>8.15</td><td>25.27</td><td>29.09</td></tr><tr><td>5</td><td>34.90</td><td>35.54</td><td>30.48</td><td>29.96</td><td>17.32</td><td>21.45</td><td>22.40</td><td>36.01</td><td>38.51</td></tr><tr><td>Avg.</td><td>24.06</td><td>24.12</td><td>22.02</td><td>19.34</td><td>8.96</td><td>12.64</td><td>11.89</td><td>26.75</td><td>30.06</td></tr><tr><td>Diff.</td><td>-6.00</td><td>-5.93</td><td>-8.03</td><td>-10.72</td><td>-21.09</td><td>-17.41</td><td>-18.17</td><td>-3.31</td><td>-</td></tr><tr><td rowspan="5">20</td><td rowspan="5">49.50</td><td>1</td><td>13.92</td><td>13.26</td><td>14.65</td><td>5.75</td><td>2.96</td><td>7.59</td><td>4.59</td><td>18.72</td><td>23.74</td></tr><tr><td>2</td><td>20.62</td><td>20.41</td><td>20.27</td><td>8.63</td><td>3.96</td><td>10.64</td><td>6.18</td><td>24.08</td><td>29.93</td></tr><tr><td>5</td><td>31.21</td><td>31.81</td><td>30.34</td><td>17.51</td><td>8.25</td><td>17.63</td><td>11.76</td><td>32.81</td><td>38.02</td></tr><tr><td>Avg.</td><td>21.92</td><td>21.83</td><td>21.75</td><td>10.63</td><td>5.06</td><td>11.95</td><td>7.51</td><td>25.20</td><td>30.56</td></tr><tr><td>Diff.</td><td>-8.65</td><td>-8.74</td><td>-8.81</td><td>-19.93</td><td>-25.51</td><td>-18.61</td><td>-23.05</td><td>-5.36</td><td>-</td></tr><tr><td rowspan="6">50</td><td rowspan="6">52.60</td><td>1</td><td>13.41</td><td>13.36</td><td>15.90</td><td>1.86</td><td>2.79</td><td>9.03</td><td>4.21</td><td>19.05</td><td>23.47</td></tr><tr><td>2</td><td>20.38</td><td>19.97</td><td>21.26</td><td>2.86</td><td>3.04</td><td>12.66</td><td>5.01</td><td>24.32</td><td>29.59</td></tr><tr><td>5</td><td>29.92</td><td>29.88</td><td>29.63</td><td>6.04</td><td>4.56</td><td>20.23</td><td>7.24</td><td>31.93</td><td>37.52</td></tr><tr><td>10</td><td>37.79</td><td>37.85</td><td>36.97</td><td>13.31</td><td>8.56</td><td>29.11</td><td>11.72</td><td>38.05</td><td>42.79</td></tr><tr><td>Avg.</td><td>25.38</td><td>25.27</td><td>25.94</td><td>6.02</td><td>4.74</td><td>17.76</td><td>7.05</td><td>28.34</td><td>33.34</td></tr><tr><td>Diff.</td><td>-7.97</td><td>-8.08</td><td>-7.40</td><td>-27.33</td><td>-28.61</td><td>-15.59</td><td>-26.30</td><td>-5.01</td><td>-</td></tr><tr><td rowspan="10">ImageNet-10</td><td rowspan="5">10</td><td rowspan="5">72.80</td><td>1</td><td>44.93</td><td>-</td><td>45.69</td><td>40.98</td><td>17.84</td><td>32.07</td><td>41.00</td><td>44.27</td><td>53.91</td></tr><tr><td>2</td><td>57.84</td><td>-</td><td>58.47</td><td>52.04</td><td>29.13</td><td>44.89</td><td>54.47</td><td>56.53</td><td>59.69</td></tr><tr><td>5</td><td>67.20</td><td>-</td><td>63.11</td><td>64.60</td><td>44.56</td><td>55.13</td><td>65.87</td><td>67.36</td><td>64.47</td></tr><tr><td>Avg.</td><td>56.66</td><td>-</td><td>55.76</td><td>52.54</td><td>30.51</td><td>44.03</td><td>53.78</td><td>56.05</td><td>59.36</td></tr><tr><td>Diff.</td><td>2.70</td><td>-</td><td>3.60</td><td>6.82</td><td>28.85</td><td>15.33</td><td>5.58</td><td>3.31</td><td>-</td></tr><tr><td rowspan="5">20</td><td rowspan="5">76.60</td><td>1</td><td>42.00</td><td>-</td><td>43.13</td><td>36.13</td><td>14.51</td><td>24.98</td><td>24.09</td><td>34.64</td><td>53.07</td></tr><tr><td>2</td><td>53.93</td><td>-</td><td>54.82</td><td>46.91</td><td>19.09</td><td>31.27</td><td>33.16</td><td>42.22</td><td>58.96</td></tr><tr><td>5</td><td>59.56</td><td>-</td><td>61.27</td><td>56.44</td><td>27.78</td><td>36.44</td><td>46.02</td><td>57.11</td><td>64.38</td></tr><tr><td>Avg.</td><td>51.83</td><td>-</td><td>53.07</td><td>46.49</td><td>20.46</td><td>30.90</td><td>34.42</td><td>44.66</td><td>58.80</td></tr><tr><td>Diff.</td><td>6.97</td><td>-</td><td>5.73</td><td>12.31</td><td>38.34</td><td>27.90</td><td>24.38</td><td>14.14</td><td>-</td></tr></table>
237
+
238
+ LBPE score is used for ranking samples, and the selection results are purely based on the LBPE score without considering the class balance. "LBPE + Balanced" indicates that both elements are included for sample selection. The empirical findings conclusively affirm the effectiveness of the two rules, which constitute the principal contributions of our YOCO method.
239
+
240
+ Standard Deviation of Experiments. Different training dynamics and network initializations impact the final results. Therefore, the reported results are averaged over three different training dynamics, and each training dynamic is evaluated based on three different network initializations. See Appendix B.2 for the primary results table with standard deviation.
241
+
242
+ # 4.3 Analysis of Two Rules
243
+
244
+ # 4.3.1 Analysis of LBPE Score for Sample Ranking
245
+
246
+ Tab. 3 illustrates the robust performance of our YOCO method across diverse network structures, including ConvNet, ResNet, and DenseNet, demonstrating its strong generalization ability. Additionally, we present different sample ranking metrics from dataset pruning methods on these networks, demonstrating that our method outperforms both random selection and other data pruning methods.
247
+
248
+ In Tab. 4, we experiment with prioritizing easy samples over hard ones. We achieve this by reversing the importance metrics introduced by AUM [35], Forg. [41], and EL2N [34] that originally prioritize
249
+
250
+ Table 3: Accuracies on different network structures and different sample ranking metrics. (IDC [19] condensed CIFAR-10: $\mathrm{IPC}_{10\rightarrow 1}$ )
251
+
252
+ <table><tr><td></td><td>ConvNet [12]</td><td>ResNet [13]</td><td>DenseNet [14]</td></tr><tr><td>Random</td><td>28.23</td><td>24.14</td><td>24.63</td></tr><tr><td>SSP[40]</td><td>27.83</td><td>24.64</td><td>24.75</td></tr><tr><td>Entropy[3]</td><td>30.30</td><td>30.53</td><td>29.93</td></tr><tr><td>AUM[35]</td><td>13.30</td><td>15.04</td><td>14.56</td></tr><tr><td>Forg.[41]</td><td>16.68</td><td>16.75</td><td>17.43</td></tr><tr><td>EL2N[34]</td><td>16.95</td><td>19.98</td><td>21.43</td></tr><tr><td>Ours</td><td>42.28</td><td>34.53</td><td>34.29</td></tr></table>
253
+
254
+ Table 4: Prioritizing easy samples is better for different dataset pruning and dataset condensation methods. "R?" represents whether to reverse the metrics which prioritize hard samples. (CIFAR-10: $\mathrm{IPC}_{10\rightarrow 1}$ )
255
+
256
+ <table><tr><td>Method</td><td>R?</td><td>IDC[19]</td><td>DREAM[24]</td><td>MTT[1]</td><td>DSA[50]</td></tr><tr><td>AUM [35]</td><td>-</td><td>13.30</td><td>14.43</td><td>15.33</td><td>14.25</td></tr><tr><td>AUM [35]</td><td>✓</td><td>37.97</td><td>38.18</td><td>16.63</td><td>18.23</td></tr><tr><td>Forg. [41]</td><td>-</td><td>16.68</td><td>16.26</td><td>18.82</td><td>16.55</td></tr><tr><td>Forg. [41]</td><td>✓</td><td>36.69</td><td>36.15</td><td>16.65</td><td>17.03</td></tr><tr><td>EL2N [34]</td><td>-</td><td>16.95</td><td>18.13</td><td>16.98</td><td>13.14</td></tr><tr><td>EL2N [34]</td><td>✓</td><td>33.11</td><td>34.36</td><td>19.01</td><td>21.29</td></tr><tr><td>Ours</td><td>-</td><td>42.28</td><td>42.29</td><td>22.02</td><td>22.40</td></tr></table>
257
+
258
+ Table 5: Balanced construction works on different dataset pruning methods. “ $\mathcal{B}$ ?” represents whether to use balanced construction. The subscript $^+\mathrm{value}$ indicates the accuracy gain from balanced construction. (IDC [19] condensed CIFAR-10: $\mathrm{IPC}_{10\rightarrow T}$ )
259
+
260
+ <table><tr><td>\(IPC_T\)</td><td>\(\mathcal{B}?\)</td><td>Random</td><td>SSP [40]</td><td>Entropy [3]</td><td>AUM [35]</td><td>Forg. [41]</td><td>EL2N [34]</td><td>Ours</td></tr><tr><td rowspan="2">IPC1</td><td>-</td><td>28.23</td><td>27.83</td><td>30.30</td><td>13.30</td><td>16.68</td><td>16.95</td><td>37.63</td></tr><tr><td>\(\checkmark\)</td><td>\(30.05+1.82\)</td><td>\(33.21+5.38\)</td><td>\(33.67+3.37\)</td><td>\(15.64+2.34\)</td><td>\(19.09+2.41\)</td><td>\(18.43+1.48\)</td><td>\(42.28+4.65\)</td></tr><tr><td rowspan="2">IPC2</td><td>-</td><td>37.10</td><td>34.95</td><td>38.88</td><td>18.44</td><td>22.13</td><td>23.26</td><td>42.99</td></tr><tr><td>\(\checkmark\)</td><td>\(39.44+2.34\)</td><td>\(40.57+5.62\)</td><td>\(42.17+3.29\)</td><td>\(23.84+5.40\)</td><td>\(28.06+5.93\)</td><td>\(26.54+3.28\)</td><td>\(46.67+3.68\)</td></tr><tr><td rowspan="2">IPC5</td><td>-</td><td>52.92</td><td>48.47</td><td>52.85</td><td>41.40</td><td>45.49</td><td>46.58</td><td>53.86</td></tr><tr><td>\(\checkmark\)</td><td>\(52.64-0.28\)</td><td>\(49.44+0.97\)</td><td>\(54.73+1.88\)</td><td>\(47.23+5.83\)</td><td>\(48.02+2.53\)</td><td>\(48.86+2.28\)</td><td>\(55.96+2.10\)</td></tr></table>
261
+
262
+ hard samples. Our results indicate that across various condensed datasets, including IDC [19], DREAM [24], MTT [1], and DSA [50], there is a distinct advantage in prioritizing easier samples over harder ones. These findings lend support to our Rule 1.
263
+
264
+ # 4.3.2 Analysis of Balanced Construction
265
+
266
+ Fig. 2 presents the class distributions with and without a balanced construction for different datasets and different $\mathrm{IPC}_{\mathbf{F}\rightarrow \mathbf{T}}$ . As explained in YOCO settings, our balanced construction is based on the multi-formation framework from IDC [19]. Therefore, the x-axis represents the count of images after multi-formation instead of the condensed images. It is evident that a ranking strategy relying solely on the LBPE score can result in a significant class imbalance, particularly severe in the ImageNet dataset. As depicted in Fig. 2(f), three classes have no image patches left. Our balanced construction method effectively mitigates this issue. Notably, in the case of ImageNet- $10_{10\to 1}$ , the balanced construction boosts the accuracy by an impressive $19.37\%$ .
267
+
268
+ To better understand the impact of balanced class distribution on various dataset pruning methods, we conducted a comparative analysis, as presented in Tab. 5. Clearly, achieving a balanced class distribution significantly enhances the performance of all examined methods. Remarkably, our proposed method consistently outperforms others under both imbalanced and balanced class scenarios, further substantiating the efficacy of our approach.
269
+
270
+ # 4.4 Other Analysis
271
+
272
+ Sample Importance Rules Differ between Condensed Dataset and Full Dataset. In Fig. 3, we compare Sample Importance Rules for Condensed Datasets $\left(\mathrm{IPC}_{10}, \mathrm{IPC}_{50}\right)$ and the Full Dataset $\left(\mathrm{IPC}_{5000}\right)$ , by adjusting the pruning ratio from $10\%$ to $90\%$ . Unmarked solid lines mean prioritizing easy samples, dashed lines suggest prioritizing hard samples, while marked solid lines depict the accuracy disparity between the preceding accuracies. Therefore, the grey region above zero indicates "Prefer easy samples" (Rule\_easy), while the blue region below zero represents "Prefer hard samples" (Rule\_hard). We have two observations. First, as the pruning ratio increases, there is a gradual transition from Rule\_hard to Rule\_easy. Second, the turning point of this transition depends on the dataset size. Specifically, the turning points for $\mathrm{IPC}_{10}$ , $\mathrm{IPC}_{50}$ , and $\mathrm{IPC}_{5000}$ occur at pruning
273
+
274
+ ![](images/6a063a2d4c08d6e583d686e2512a1b87f81303571f9ca05037452bca6d00c16a.jpg)
275
+
276
+ ![](images/563343b152475ca37578f151c89481a093377c2871051007a93d70a73ae23049.jpg)
277
+
278
+ ![](images/52d4c3c370bc5f718c892b9870459f7d6c9b56ec535a243e5c1e3f10d360b29c.jpg)
279
+
280
+ ![](images/534bbc2e6d4b86bbd42134c5160a04e8398e12ce0321918637accdea345e212d.jpg)
281
+ (d) CIFAR- $10_{50\to 1}$
282
+
283
+ ![](images/bf539d00b301528a65c0521335011ea2163a52917aa5e0123a65f3240f4d24e6.jpg)
284
+ (e) CIFAR-100 $_{10 \to 1}$
285
+
286
+ ![](images/ac9592403d704661f2373ef886086713a0f7fa9986a6041bbc8ae546ef54dcc3.jpg)
287
+ (f) ImageNet- $10_{10\to 1}$
288
+
289
+ ![](images/76d60d0958396bb129caac68dc83a9930b10c157822d5e15bdd7ef4ba4211876.jpg)
290
+ Figure 2: Balanced and imbalanced selection by ranking samples with LBPE score. $\mathrm{Dataset}_{\mathbf{F} \rightarrow \mathbf{T}}$ denotes resizing the dataset from $\mathrm{IPC}_{\mathbf{F}}$ to $\mathrm{IPC}_{\mathbf{T}}$ . Accuracies for each setting are also listed in the legend. (IDC [19] condensed datasets)
291
+ Figure 3: Different sample importance rules between condensed datasets and full datasets.
292
+
293
+ ratios of $24\%$ , $38\%$ , and $72\%$ , respectively. These experimental outcomes substantiate our Rule 1 that condensed datasets should adhere to Rule<sub>easy</sub>.
294
+
295
+ Performance Gap from Multi-formation. We would like to explain the huge performance gap between multi-formation-based methods (IDC [19] and DREAM [24]) and other methods (MTT [1], KIP [33], and DSA [50]) in Tab. 2 and Tab. 4. The potential reason is that a single image can be decoded to $2^{2} = 4$ low-resolution images via multi-formation. As a result, methods employing multi-formation generate four times as many images compared to those that do not use multi-formation. The illustration is shown in Fig. 4.
296
+
297
+ ![](images/34884b31635fc1ae6de40875fb655ba0cbf24c86cd4fcd955dd9a44043d28803.jpg)
298
+ Figure 4: Illustration of the multi-formation with a factor of 2. (Taken from IDC [19])
299
+
300
+ Why Use LBPE Score from the Top- $K$ Training Epochs with the Highest Accuracy? As shown in Eq. 2, different training epoch $t$ leads to a different LBPE score. Fig. 5 illustrates the accuracy of the dataset selected via the LBPE score across specific training epochs. We select LBPE scores from the initial 100 epochs out of 1000 original epochs to reduce computational costs. We have two observations. First, the model's accuracy during the first few epochs is substantially low. LBPE scores derived from these early-stage epochs might not accurately represent the samples' true importance since the model is insufficiently trained. Second, there's significant variance in accuracy even after 40 epochs, leading to potential instability in the LBPE score selection. To address this, we average LBPE scores from epochs with top- $K$ accuracy, thereby reducing variability and ensuring a more reliable sample importance representation.
301
+
302
+ Speculating on Why LBPE Score Performs Better at Certain Epochs? In Fig. 7, we present the distribution of LBPE scores at various training epochs, with scores arranged in ascending order for each class to facilitate comparison across epochs. Our experiment finds the LBPE scores decrease as the epoch number increases. The superior accuracy of $\mathrm{LBPE}_{90}$ is due to two reasons. First, the model at the $90_{th}$ epoch is more thoroughly trained than the model at the first epoch, leading to more accurate LBPE scores. Second, the $\mathrm{LBPE}_{90}$ score offers a more uniform distribution and a wider range [0, 1], enhancing sample distinction. In contrast, the $\mathrm{LBPE}_{1000}$ score is mostly concentrated within a narrow range [0, 0.1] for the majority of classes, limiting differentiation among samples. More possible reasons will be explored in future studies.
303
+
304
+ Visualization. Fig. 6 visualizes the easy and hard samples identified by our YOCO method. We notice that most easy samples have a distinct demarcation between the object and its background. This is particularly evident in the classes of "vacuum cleaner" and "cocktail shaker". The easy samples in
305
+
306
+ ![](images/7939d08127815522f6d7ee516d6796a964dafbbd01f40e836bdc3d3578d3fc35.jpg)
307
+ Figure 5: Accuracy of the dataset selected with LBPE score at specific epochs.
308
+
309
+ ![](images/04a170c3eada3b614b75290672784421ff85af4d18ecd64c8256cf26dcf9726e.jpg)
310
+ Figure 6: Visualization of hard and easy samples of ImageNet dataset selected by our method. Both the original and condensed images are shown for comparison.
311
+
312
+ ![](images/c94b1719fda761af67bfa8a637c790c8169fa2c3c326d98147c4b5ec91ee0a26.jpg)
313
+ Figure 7: LBPE scores at different epochs (LBPE<sub>epoch</sub>) for ten classes of the CIFAR-10 dataset.
314
+
315
+ these two classes have clean backgrounds, while the hard samples have complex backgrounds. The visualization provides evidence of our method's ability to identify easy and hard samples.
316
+
317
+ # 5 Conclusion, Limitation and Future Work
318
+
319
+ We introduce You Only Condense Once (YOCO), a novel approach that resizes condensed datasets flexibly without an extra condensation process, enabling them to adjust to varying computational constraints. YOCO comprises two key rules. First, YOCO employs the Logit-Based Prediction Error (LBPE) score to rank the importance of training samples and emphasizes the benefit of prioritizing easy samples with low LBPE scores. Second, YOCO underscores the need to address the class imbalance in condensed datasets and utilizes Balanced Construction to solve the problem. Our experiments validated YOCO's effectiveness across different networks and datasets. These insights offer valuable directions for future dataset condensation and dataset pruning research.
320
+
321
+ We acknowledge several limitations and potential areas for further investigation. First, although our method uses early training epochs to reduce computational costs, determining the sample importance in the first few training epochs or even before training is interesting for future work. Second, we only utilize the LBPE score to establish the importance of samples within the dataset. However, relying on a single metric might not be the optimal approach. There are other importance metrics, such as SSP [40] and AUM [35], that could be beneficial to integrate into our methodology. Third, as our current work only covers clean datasets like CIFAR-10, the performance of our method on noisy datasets requires further investigation.
322
+
323
+ The border impact is shown in Appendix C.
324
+
325
+ # 6 Acknowledgement
326
+
327
+ This work is partially supported by Joey Tianyi Zhou's A*STAR SERC Central Research Fund (Use-inspired Basic Research), the Singapore Government's Research, Innovation and enterprise 2020 Plan (Advanced Manufacturing and Engineering domain) under Grant A18A1b0045, and A*STAR CFAR Internship Award for Research Excellence (CIARE). The computational work for this article was partially performed on resources of the National Supercomputing Centre (NSCC), Singapore (https://www.nscc.sg).
328
+
329
+ # References
330
+
331
+ [1] G. Cazenavette, T. Wang, A. Torralba, A. A. Efros, and J.-Y. Zhu. Dataset distillation by matching training trajectories. In Proc. IEEE Conf. Comput. Vis. Pattern Recog., 2022.
332
+ [2] K. Chitta, J. M. Álvarez, E. Haussmann, and C. Farabet. Training data subset search with ensemble active learning. IEEE Transactions on Intelligent Transportation Systems, 23(9):14741-14752, 2021.
333
+ [3] C. Coleman, C. Yeh, S. Mussmann, B. Mirzasoleiman, P. Bailis, P. Liang, J. Leskovec, and M. Zaharia. Selection via proxy: Efficient data selection for deep learning. In Proc. Int. Conf. Learn. Represent., 2020.
334
+ [4] J. Cui, R. Wang, S. Si, and C.-J. Hsieh. Scaling up dataset distillation to imagenet-1k with constant memory. arXiv preprint arXiv:2211.10586, 2022.
335
+ [5] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proc. IEEE Conf. Comput. Vis. Pattern Recog., pages 248–255, 2009.
336
+ [6] Z. Deng and O. Russakovsky. Remember the past: Distilling datasets into addressable memories for neural networks. In Proc. Adv. Neural Inform. Process. Syst., 2022.
337
+ [7] J. Du, Y. Jiang, V. T. Tan, J. T. Zhou, and H. Li. Minimizing the accumulated trajectory error to improve dataset distillation. In Proc. IEEE Conf. Comput. Vis. Pattern Recog., 2023.
338
+ [8] M. Ducoffe and F. Precioso. Adversarial active learning for deep networks: a margin based approach. arXiv preprint arXiv:1802.09841, 2018.
339
+ [9] D. Feldman and M. Langberg. A unified framework for approximating and clustering data. In Proceedings of the Forty-Third Annual ACM Symposium on Theory of Computing, page 569-578, 2011.
340
+ [10] D. Feldman, M. Schmidt, and C. Sohler. Turning big data into tiny data: Constant-size coresets for k-means, pca, and projective clustering. SIAM Journal on Computing, 49(3):601-657, 2020.
341
+ [11] V. Feldman and C. Zhang. What neural networks memorize and why: Discovering the long tail via influence estimation. In Proc. Adv. Neural Inform. Process. Syst., pages 2881-2891, 2020.
342
+ [12] S. Gidaris and N. Komodakis. Dynamic few-shot visual learning without forgetting. In Proc. IEEE Conf. Comput. Vis. Pattern Recog., pages 4367-4375, 2018.
343
+ [13] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proc. IEEE Conf. Comput. Vis. Pattern Recog., pages 770-778, 2016.
344
+ [14] G. Huang, Z. Liu, L. Van Der Maaten, and K. Q. Weinberger. Densely connected convolutional networks. In Proc. IEEE Conf. Comput. Vis. Pattern Recog., pages 4700-4708, 2017.
345
+ [15] L. Huang and N. K. Vishnoi. Coresets for clustering in euclidean spaces: Importance sampling is nearly optimal. In Proceedings of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, page 1416-1429, 2020.
346
+ [16] Z. Jiang, J. Gu, M. Liu, and D. Z. Pan. Delving into effective gradient matching for dataset condensation. arXiv preprint arXiv:2208.00311, 2022.
347
+
348
+ [17] K. Killamsetty, S. Durga, G. Ramakrishnan, A. De, and R. Iyer. Grad-match: Gradient matching based data subset selection for efficient deep model training. In Proc. Int. Conf. Mach. Learn., pages 5464-5474, 2021.
349
+ [18] K. Killamsetty, D. Sivasubramanian, G. Ramakrishnan, and R. Iyer. Glister: Generalization based data subset selection for efficient and robust learning. In Proc. AAAI Conf. Artif. Intell., pages 8110-8118, 2021.
350
+ [19] J.-H. Kim, J. Kim, S. J. Oh, S. Yun, H. Song, J. Jeong, J.-W. Ha, and H. O. Song. Dataset condensation via efficient synthetic-data parameterization. In Proc. Int. Conf. Mach. Learn., 2022.
351
+ [20] A. Krizhevsky, G. Hinton, et al. Learning multiple layers of features from tiny images. Technical report, CiteSeer, 2009.
352
+ [21] S. Lee, S. Chun, S. Jung, S. Yun, and S. Yoon. Dataset condensation with contrastive signals. In Proc. Int. Conf. Mach. Learn., pages 12352-12364, 2022.
353
+ [22] D. D. Lewis. A sequential algorithm for training text classifiers: Corrigendum and additional data. In Acm Sigir Forum, pages 13-19, 1995.
354
+ [23] S. Liu, K. Wang, X. Yang, J. Ye, and X. Wang. Dataset distillation via factorization. In Proc. Adv. Neural Inform. Process. Syst., 2022.
355
+ [24] Y. Liu, J. Gu, K. Wang, Z. Zhu, W. Jiang, and Y. You. DREAM: Efficient dataset distillation by representative matching. arXiv preprint arXiv:2302.14416, 2023.
356
+ [25] N. Loo, R. Hasani, A. Amini, and D. Rus. Efficient dataset distillation using random feature approximation. In Proc. Adv. Neural Inform. Process. Syst., 2022.
357
+ [26] N. Loo, R. Hasani, M. Lechner, and D. Rus. Dataset distillation with convexified implicit gradients. arXiv preprint arXiv:2302.06755, 2023.
358
+ [27] J. Lorraine, P. Vicol, and D. Duvenaud. Optimizing millions of hyperparameters by implicit differentiation. In International Conference on Artificial Intelligence and Statistics, pages 1540-1552, 2020.
359
+ [28] K. Margatina, G. Vernikos, L. Barrault, and N. Aletras. Active learning by acquiring contrastive examples. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 650–663, 2021.
360
+ [29] K. Meding, L. M. S. Buschoff, R. Geirhos, and F. A. Wichmann. Trivial or impossible — dichotomous data difficulty masks model differences (on imagenet and beyond). In Proc. Int. Conf. Learn. Represent., 2022.
361
+ [30] B. Mirzasoleiman, J. Bilmes, and J. Leskovec. Coresets for data-efficient training of machine learning models. In Proc. Int. Conf. Mach. Learn., pages 6950-6960, 2020.
362
+ [31] M. Mohri and A. Rostamizadeh. Rademacher complexity bounds for non-iid processes. In Proc. Adv. Neural Inform. Process. Syst., 2008.
363
+ [32] T. Nguyen, Z. Chen, and J. Lee. Dataset meta-learning from kernel ridge-regression. In Proc. Int. Conf. Learn. Represent., 2021.
364
+ [33] T. Nguyen, R. Novak, L. Xiao, and J. Lee. Dataset distillation with infinitely wide convolutional networks. In Proc. Adv. Neural Inform. Process. Syst., pages 5186-5198, 2021.
365
+ [34] M. Paul, S. Ganguli, and G. K. Dziugaite. Deep learning on a data diet: Finding important examples early in training. In Proc. Adv. Neural Inform. Process. Syst., pages 20596-20607, 2021.
366
+ [35] G. Pleiss, T. Zhang, E. Elenberg, and K. Q. Weinberger. Identifying mislabeled data using the area under the margin ranking. In Proc. Adv. Neural Inform. Process. Syst., pages 17044-17056, 2020.
367
+
368
+ [36] O. Pooladzandi, D. Davini, and B. Mirzasoleiman. Adaptive second order coresets for data-efficient machine learning. In Proc. Int. Conf. Mach. Learn., pages 17848-17869, 2022.
369
+ [37] P. Ren, Y. Xiao, X. Chang, P.-Y. Huang, Z. Li, B. B. Gupta, X. Chen, and X. Wang. A survey of deep active learning. ACM Comput. Surv., 54(9), oct 2021.
370
+ [38] B. Settles. Active Learning. Springer International Publishing, 2012.
371
+ [39] S. Shin, H. Bae, D. Shin, W. Joo, and I.-C. Moon. Loss-curvature matching for dataset selection and condensation. In International Conference on Artificial Intelligence and Statistics, pages 8606-8628, 2023.
372
+ [40] B. Sorscher, R. Geirhos, S. Shekhar, S. Ganguli, and A. Morcos. Beyond neural scaling laws: beating power law scaling via data pruning. In Proc. Adv. Neural Inform. Process. Syst., pages 19523-19536, 2022.
373
+ [41] M. Toneva, A. Sordoni, R. T. des Combes, A. Trischler, Y. Bengio, and G. J. Gordon. An empirical study of example forgetting during deep neural network learning. In Proc. Int. Conf. Learn. Represent., 2019.
374
+ [42] P. Vicol, J. P. Lorraine, F. Pedregosa, D. Duvenaud, and R. B. Grosse. On implicit bias in overparameterized bilevel optimization. In Proc. Int. Conf. Mach. Learn., pages 22234-22259, 2022.
375
+ [43] K. Wang, J. Gu, D. Zhou, Z. Zhu, W. Jiang, and Y. You. Dim: Distilling dataset into generative model. arXiv preprint arXiv:2303.04707, 2023.
376
+ [44] K. Wang, B. Zhao, X. Peng, Z. Zhu, S. Yang, S. Wang, G. Huang, H. Bilen, X. Wang, and Y. You. Cafe: Learning to condense dataset by aligning features. In Proc. IEEE Conf. Comput. Vis. Pattern Recog., pages 12196-12205, 2022.
377
+ [45] T. Wang, J.-Y. Zhu, A. Torralba, and A. A. Efros. Dataset distillation. arXiv preprint arXiv:1811.10959, 2018.
378
+ [46] M. Welling. Herding dynamical weights to learn. In Proc. Int. Conf. Mach. Learn., pages 1121-1128, 2009.
379
+ [47] X. Xia, J. Liu, J. Yu, X. Shen, B. Han, and T. Liu. Moderate coreset: A universal method of data selection for real-world data-efficient deep learning. In Proc. Int. Conf. Learn. Represent., 2023.
380
+ [48] S. Yang, Z. Xie, H. Peng, M. Xu, M. Sun, and P. Li. Dataset pruning: Reducing training data by examining generalization influence. In Proc. Int. Conf. Learn. Represent., 2023.
381
+ [49] L. Zhang, J. Zhang, B. Lei, S. Mukherjee, X. Pan, B. Zhao, C. Ding, Y. Li, and D. Xu. Accelerating dataset distillation via model augmentation. In Proc. IEEE Conf. Comput. Vis. Pattern Recog., 2023.
382
+ [50] B. Zhao and H. Bilen. Dataset condensation with differentiable siamese augmentation. In Proc. Int. Conf. Mach. Learn., pages 12674-12685, 2021.
383
+ [51] B. Zhao and H. Bilen. Synthesizing informative training samples with GAN. In NeurIPS 2022 Workshop on Synthetic Data for Empowering ML Research, 2022.
384
+ [52] B. Zhao and H. Bilen. Dataset condensation with distribution matching. In Proc. IEEE Winter Conf. Appl. Comput. Vis., pages 6514-6523, 2023.
385
+ [53] B. Zhao, K. R. Mopuri, and H. Bilen. Dataset condensation with gradient matching. In Proc. Int. Conf. Learn. Represent., 2021.
386
+ [54] H. Zheng, R. Liu, F. Lai, and A. Prakash. Coverage-centric coreset selection for high pruning rates. In Proc. Int. Conf. Learn. Represent., 2023.
387
+ [55] Y. Zhou, E. Nezhadarya, and J. Ba. Dataset distillation using neural feature regression. In Proc. Adv. Neural Inform. Process. Syst., 2022.
youonlycondenseoncetworulesforpruningcondenseddatasets/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:430fb3b87f7f581be26686807234ad39a461f1518be818181af25e588ef8497f
3
+ size 668109
youonlycondenseoncetworulesforpruningcondenseddatasets/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d2357b04123bf641bf8c692947791b276f9a5ed94dce4a39d5f3e03ba3b91c9
3
+ size 533760
yourrepresentationsareinthenetworkcomposableandparalleladaptationforlargescalemodels/e5ad109c-cd83-439a-9c26-a0e687ebd28a_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20e7dbfb6384309701b42562485e65adc383ad4a6a909d64c95bd7b0a7065ae7
3
+ size 174886
yourrepresentationsareinthenetworkcomposableandparalleladaptationforlargescalemodels/e5ad109c-cd83-439a-9c26-a0e687ebd28a_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18faaa2fa0397c1531f100b6a7224d393b143d79a60fb8da18b6814c0189c657
3
+ size 207448