Chelsea707 commited on
Commit
1445562
·
verified ·
1 Parent(s): bef30ca

Add Batch 2d3a13f9-935b-4adc-8a47-08e9842f56f0 data

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +63 -0
  2. 2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/180cefdd-aef7-4856-b7a0-a519d632e615_content_list.json +0 -0
  3. 2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/180cefdd-aef7-4856-b7a0-a519d632e615_model.json +0 -0
  4. 2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/180cefdd-aef7-4856-b7a0-a519d632e615_origin.pdf +3 -0
  5. 2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/full.md +360 -0
  6. 2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/images.zip +3 -0
  7. 2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/layout.json +0 -0
  8. 2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/9963c4ed-836b-4f69-81c2-3bec8faa181d_content_list.json +0 -0
  9. 2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/9963c4ed-836b-4f69-81c2-3bec8faa181d_model.json +0 -0
  10. 2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/9963c4ed-836b-4f69-81c2-3bec8faa181d_origin.pdf +3 -0
  11. 2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/full.md +498 -0
  12. 2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/images.zip +3 -0
  13. 2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/layout.json +0 -0
  14. 2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/f6938fce-0e4f-4932-bd85-a1090dddb4f2_content_list.json +0 -0
  15. 2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/f6938fce-0e4f-4932-bd85-a1090dddb4f2_model.json +0 -0
  16. 2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/f6938fce-0e4f-4932-bd85-a1090dddb4f2_origin.pdf +3 -0
  17. 2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/full.md +490 -0
  18. 2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/images.zip +3 -0
  19. 2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/layout.json +0 -0
  20. 2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/2c3324bd-ac05-4b1f-babe-e2a0781cbbf4_content_list.json +0 -0
  21. 2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/2c3324bd-ac05-4b1f-babe-e2a0781cbbf4_model.json +0 -0
  22. 2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/2c3324bd-ac05-4b1f-babe-e2a0781cbbf4_origin.pdf +3 -0
  23. 2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/full.md +431 -0
  24. 2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/images.zip +3 -0
  25. 2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/layout.json +0 -0
  26. 2024/$_pi$2vec_ Policy Representation with Successor Features/bcc4039e-8090-4f89-a7c1-3608bf3d1ab2_content_list.json +0 -0
  27. 2024/$_pi$2vec_ Policy Representation with Successor Features/bcc4039e-8090-4f89-a7c1-3608bf3d1ab2_model.json +0 -0
  28. 2024/$_pi$2vec_ Policy Representation with Successor Features/bcc4039e-8090-4f89-a7c1-3608bf3d1ab2_origin.pdf +3 -0
  29. 2024/$_pi$2vec_ Policy Representation with Successor Features/full.md +368 -0
  30. 2024/$_pi$2vec_ Policy Representation with Successor Features/images.zip +3 -0
  31. 2024/$_pi$2vec_ Policy Representation with Successor Features/layout.json +0 -0
  32. 2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/20d67cbe-7abd-4c48-bd5a-48ae865927bd_content_list.json +0 -0
  33. 2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/20d67cbe-7abd-4c48-bd5a-48ae865927bd_model.json +0 -0
  34. 2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/20d67cbe-7abd-4c48-bd5a-48ae865927bd_origin.pdf +3 -0
  35. 2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/full.md +0 -0
  36. 2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/images.zip +3 -0
  37. 2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/layout.json +0 -0
  38. 2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/a3c37e28-0a3e-4151-b905-1ec9b50296fa_content_list.json +1656 -0
  39. 2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/a3c37e28-0a3e-4151-b905-1ec9b50296fa_model.json +0 -0
  40. 2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/a3c37e28-0a3e-4151-b905-1ec9b50296fa_origin.pdf +3 -0
  41. 2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/full.md +304 -0
  42. 2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/images.zip +3 -0
  43. 2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/layout.json +0 -0
  44. 2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/05dead7a-7267-4e56-9f6a-9da24e57926b_content_list.json +0 -0
  45. 2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/05dead7a-7267-4e56-9f6a-9da24e57926b_model.json +0 -0
  46. 2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/05dead7a-7267-4e56-9f6a-9da24e57926b_origin.pdf +3 -0
  47. 2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/full.md +481 -0
  48. 2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/images.zip +3 -0
  49. 2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/layout.json +0 -0
  50. 2024/3D-Aware Hypothesis & Verification for Generalizable Relative Object Pose Estimation/9e8e6d05-c99a-48b4-8cad-02bd3c61a78d_content_list.json +1860 -0
.gitattributes CHANGED
@@ -3820,3 +3820,66 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
3820
  2024/Robust[[:space:]]agents[[:space:]]learn[[:space:]]causal[[:space:]]world[[:space:]]models/685463c5-4834-42ae-bfde-5d73737d974f_origin.pdf filter=lfs diff=lfs merge=lfs -text
3821
  2024/SWE-bench_[[:space:]]Can[[:space:]]Language[[:space:]]Models[[:space:]]Resolve[[:space:]]Real-world[[:space:]]Github[[:space:]]Issues_/bc5bdb9f-c9e3-49ab-9ebf-02c3e438b710_origin.pdf filter=lfs diff=lfs merge=lfs -text
3822
  2024/Self-Alignment[[:space:]]with[[:space:]]Instruction[[:space:]]Backtranslation/22080868-a29f-4015-a22b-5b214f71fc64_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3820
  2024/Robust[[:space:]]agents[[:space:]]learn[[:space:]]causal[[:space:]]world[[:space:]]models/685463c5-4834-42ae-bfde-5d73737d974f_origin.pdf filter=lfs diff=lfs merge=lfs -text
3821
  2024/SWE-bench_[[:space:]]Can[[:space:]]Language[[:space:]]Models[[:space:]]Resolve[[:space:]]Real-world[[:space:]]Github[[:space:]]Issues_/bc5bdb9f-c9e3-49ab-9ebf-02c3e438b710_origin.pdf filter=lfs diff=lfs merge=lfs -text
3822
  2024/Self-Alignment[[:space:]]with[[:space:]]Instruction[[:space:]]Backtranslation/22080868-a29f-4015-a22b-5b214f71fc64_origin.pdf filter=lfs diff=lfs merge=lfs -text
3823
+ 2024/\#InsTag_[[:space:]]Instruction[[:space:]]Tagging[[:space:]]for[[:space:]]Analyzing[[:space:]]Supervised[[:space:]]Fine-tuning[[:space:]]of[[:space:]]Large[[:space:]]Language[[:space:]]Models/180cefdd-aef7-4856-b7a0-a519d632e615_origin.pdf filter=lfs diff=lfs merge=lfs -text
3824
+ 2024/$_alpha$TC-VAE_[[:space:]]On[[:space:]]the[[:space:]]relationship[[:space:]]between[[:space:]]Disentanglement[[:space:]]and[[:space:]]Diversity/9963c4ed-836b-4f69-81c2-3bec8faa181d_origin.pdf filter=lfs diff=lfs merge=lfs -text
3825
+ 2024/$_infty$-Diff_[[:space:]]Infinite[[:space:]]Resolution[[:space:]]Diffusion[[:space:]]with[[:space:]]Subsampled[[:space:]]Mollified[[:space:]]States/f6938fce-0e4f-4932-bd85-a1090dddb4f2_origin.pdf filter=lfs diff=lfs merge=lfs -text
3826
+ 2024/$_mathbb{D}^2$[[:space:]]Pruning_[[:space:]]Message[[:space:]]Passing[[:space:]]for[[:space:]]Balancing[[:space:]]Diversity[[:space:]]&[[:space:]]Difficulty[[:space:]]in[[:space:]]Data[[:space:]]Pruning/2c3324bd-ac05-4b1f-babe-e2a0781cbbf4_origin.pdf filter=lfs diff=lfs merge=lfs -text
3827
+ 2024/$_pi$2vec_[[:space:]]Policy[[:space:]]Representation[[:space:]]with[[:space:]]Successor[[:space:]]Features/bcc4039e-8090-4f89-a7c1-3608bf3d1ab2_origin.pdf filter=lfs diff=lfs merge=lfs -text
3828
+ 2024/$t^3$-Variational[[:space:]]Autoencoder_[[:space:]]Learning[[:space:]]Heavy-tailed[[:space:]]Data[[:space:]]with[[:space:]]Student's[[:space:]]t[[:space:]]and[[:space:]]Power[[:space:]]Divergence/20d67cbe-7abd-4c48-bd5a-48ae865927bd_origin.pdf filter=lfs diff=lfs merge=lfs -text
3829
+ 2024/3D[[:space:]]Feature[[:space:]]Prediction[[:space:]]for[[:space:]]Masked-AutoEncoder-Based[[:space:]]Point[[:space:]]Cloud[[:space:]]Pretraining/a3c37e28-0a3e-4151-b905-1ec9b50296fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
3830
+ 2024/3D[[:space:]]Reconstruction[[:space:]]with[[:space:]]Generalizable[[:space:]]Neural[[:space:]]Fields[[:space:]]using[[:space:]]Scene[[:space:]]Priors/05dead7a-7267-4e56-9f6a-9da24e57926b_origin.pdf filter=lfs diff=lfs merge=lfs -text
3831
+ 2024/3D-Aware[[:space:]]Hypothesis[[:space:]]&[[:space:]]Verification[[:space:]]for[[:space:]]Generalizable[[:space:]]Relative[[:space:]]Object[[:space:]]Pose[[:space:]]Estimation/9e8e6d05-c99a-48b4-8cad-02bd3c61a78d_origin.pdf filter=lfs diff=lfs merge=lfs -text
3832
+ 2024/A[[:space:]]2-Dimensional[[:space:]]State[[:space:]]Space[[:space:]]Layer[[:space:]]for[[:space:]]Spatial[[:space:]]Inductive[[:space:]]Bias/bf3fa3c9-0b9d-4f98-b3cc-7fe1a7a0cd98_origin.pdf filter=lfs diff=lfs merge=lfs -text
3833
+ 2024/A[[:space:]]Benchmark[[:space:]]Study[[:space:]]on[[:space:]]Calibration/f0503aef-d009-4eee-acf2-1c3cdd98f4f3_origin.pdf filter=lfs diff=lfs merge=lfs -text
3834
+ 2024/A[[:space:]]Black-box[[:space:]]Approach[[:space:]]for[[:space:]]Non-stationary[[:space:]]Multi-agent[[:space:]]Reinforcement[[:space:]]Learning/ffb5ca5e-2883-41d3-ba56-da537031db69_origin.pdf filter=lfs diff=lfs merge=lfs -text
3835
+ 2024/A[[:space:]]Branching[[:space:]]Decoder[[:space:]]for[[:space:]]Set[[:space:]]Generation/f4d1eb44-e6f7-4f2d-9432-5cf407da2348_origin.pdf filter=lfs diff=lfs merge=lfs -text
3836
+ 2024/A[[:space:]]Characterization[[:space:]]Theorem[[:space:]]for[[:space:]]Equivariant[[:space:]]Networks[[:space:]]with[[:space:]]Point-wise[[:space:]]Activations/f86f1a74-d292-4182-ae2b-04cab50fa55c_origin.pdf filter=lfs diff=lfs merge=lfs -text
3837
+ 2024/A[[:space:]]Cognitive[[:space:]]Model[[:space:]]for[[:space:]]Learning[[:space:]]Abstract[[:space:]]Relational[[:space:]]Structures[[:space:]]from[[:space:]]Memory-based[[:space:]]Decision-Making[[:space:]]Tasks/6df9f6bf-5f10-40fc-accb-6eb2db61b037_origin.pdf filter=lfs diff=lfs merge=lfs -text
3838
+ 2024/A[[:space:]]Data-Driven[[:space:]]Measure[[:space:]]of[[:space:]]Relative[[:space:]]Uncertainty[[:space:]]for[[:space:]]Misclassification[[:space:]]Detection/17dfddf2-a1ee-484b-b1fa-2c5d58d0b382_origin.pdf filter=lfs diff=lfs merge=lfs -text
3839
+ 2024/A[[:space:]]Differentially[[:space:]]Private[[:space:]]Clustering[[:space:]]Algorithm[[:space:]]for[[:space:]]Well-Clustered[[:space:]]Graphs/408f661f-4944-4044-aa25-a2d462110446_origin.pdf filter=lfs diff=lfs merge=lfs -text
3840
+ 2024/A[[:space:]]Discretization[[:space:]]Framework[[:space:]]for[[:space:]]Robust[[:space:]]Contextual[[:space:]]Stochastic[[:space:]]Optimization/9e2d000b-3c1b-4fc8-a35a-878f48b4dad5_origin.pdf filter=lfs diff=lfs merge=lfs -text
3841
+ 2024/A[[:space:]]Dynamical[[:space:]]View[[:space:]]of[[:space:]]the[[:space:]]Question[[:space:]]of[[:space:]]Why/3a104f56-517b-4018-95a9-b8264586dba0_origin.pdf filter=lfs diff=lfs merge=lfs -text
3842
+ 2024/A[[:space:]]Fast[[:space:]]and[[:space:]]Provable[[:space:]]Algorithm[[:space:]]for[[:space:]]Sparse[[:space:]]Phase[[:space:]]Retrieval/30a94d51-1513-4408-b51a-1333c087ce64_origin.pdf filter=lfs diff=lfs merge=lfs -text
3843
+ 2024/A[[:space:]]Flexible[[:space:]]Generative[[:space:]]Model[[:space:]]for[[:space:]]Heterogeneous[[:space:]]Tabular[[:space:]]EHR[[:space:]]with[[:space:]]Missing[[:space:]]Modality/8b827a38-de6f-4168-bd61-769b9f2720da_origin.pdf filter=lfs diff=lfs merge=lfs -text
3844
+ 2024/A[[:space:]]Foundation[[:space:]]Model[[:space:]]for[[:space:]]Error[[:space:]]Correction[[:space:]]Codes/183a220c-82e5-417f-b9b5-34306109860d_origin.pdf filter=lfs diff=lfs merge=lfs -text
3845
+ 2024/A[[:space:]]Framework[[:space:]]for[[:space:]]Inference[[:space:]]Inspired[[:space:]]by[[:space:]]Human[[:space:]]Memory[[:space:]]Mechanisms/654ca6fe-5f7f-4d80-8599-077c2e7b8ab4_origin.pdf filter=lfs diff=lfs merge=lfs -text
3846
+ 2024/A[[:space:]]Good[[:space:]]Learner[[:space:]]can[[:space:]]Teach[[:space:]]Better_[[:space:]]Teacher-Student[[:space:]]Collaborative[[:space:]]Knowledge[[:space:]]Distillation/8b06c279-e113-4822-801c-3e4b7bddefd6_origin.pdf filter=lfs diff=lfs merge=lfs -text
3847
+ 2024/A[[:space:]]Graph[[:space:]]is[[:space:]]Worth[[:space:]]1-bit[[:space:]]Spikes_[[:space:]]When[[:space:]]Graph[[:space:]]Contrastive[[:space:]]Learning[[:space:]]Meets[[:space:]]Spiking[[:space:]]Neural[[:space:]]Networks/15b5bb50-afd9-403e-bb08-390add502e99_origin.pdf filter=lfs diff=lfs merge=lfs -text
3848
+ 2024/A[[:space:]]Hard-to-Beat[[:space:]]Baseline[[:space:]]for[[:space:]]Training-free[[:space:]]CLIP-based[[:space:]]Adaptation/a3ca01c3-145f-4bff-902d-10e50e7aa01d_origin.pdf filter=lfs diff=lfs merge=lfs -text
3849
+ 2024/A[[:space:]]Lie[[:space:]]Group[[:space:]]Approach[[:space:]]to[[:space:]]Riemannian[[:space:]]Batch[[:space:]]Normalization/229e9001-4ac5-4e71-9e7c-ffce82ce0e5d_origin.pdf filter=lfs diff=lfs merge=lfs -text
3850
+ 2024/A[[:space:]]Linear[[:space:]]Algebraic[[:space:]]Framework[[:space:]]for[[:space:]]Counterfactual[[:space:]]Generation/66884264-3a8e-44c0-b2b6-c265948ff4f2_origin.pdf filter=lfs diff=lfs merge=lfs -text
3851
+ 2024/A[[:space:]]Multi-Level[[:space:]]Framework[[:space:]]for[[:space:]]Accelerating[[:space:]]Training[[:space:]]Transformer[[:space:]]Models/586ed87a-8e60-4456-abee-0252d4981fe2_origin.pdf filter=lfs diff=lfs merge=lfs -text
3852
+ 2024/A[[:space:]]Neural[[:space:]]Framework[[:space:]]for[[:space:]]Generalized[[:space:]]Causal[[:space:]]Sensitivity[[:space:]]Analysis/ed5c69cf-62c9-4ae1-9a9f-c17ad4ecb2db_origin.pdf filter=lfs diff=lfs merge=lfs -text
3853
+ 2024/A[[:space:]]Newborn[[:space:]]Embodied[[:space:]]Turing[[:space:]]Test[[:space:]]for[[:space:]]Comparing[[:space:]]Object[[:space:]]Segmentation[[:space:]]Across[[:space:]]Animals[[:space:]]and[[:space:]]Machines/0e2b79e8-0aff-4e34-af0b-a41a6cd9b991_origin.pdf filter=lfs diff=lfs merge=lfs -text
3854
+ 2024/A[[:space:]]Paradigm[[:space:]]Shift[[:space:]]in[[:space:]]Machine[[:space:]]Translation_[[:space:]]Boosting[[:space:]]Translation[[:space:]]Performance[[:space:]]of[[:space:]]Large[[:space:]]Language[[:space:]]Models/9c07f626-9ee5-4159-a1af-a69eead9032f_origin.pdf filter=lfs diff=lfs merge=lfs -text
3855
+ 2024/A[[:space:]]Plug-and-Play[[:space:]]Image[[:space:]]Registration[[:space:]]Network/d2ffbfea-778a-4862-a6b1-b840850df651_origin.pdf filter=lfs diff=lfs merge=lfs -text
3856
+ 2024/A[[:space:]]Policy[[:space:]]Gradient[[:space:]]Method[[:space:]]for[[:space:]]Confounded[[:space:]]POMDPs/4e9dd26e-19b4-4b4f-8aa4-c9ffa945257b_origin.pdf filter=lfs diff=lfs merge=lfs -text
3857
+ 2024/A[[:space:]]Precise[[:space:]]Characterization[[:space:]]of[[:space:]]SGD[[:space:]]Stability[[:space:]]Using[[:space:]]Loss[[:space:]]Surface[[:space:]]Geometry/2540c477-ffda-4390-b83c-f3ea03ed952e_origin.pdf filter=lfs diff=lfs merge=lfs -text
3858
+ 2024/A[[:space:]]Primal-Dual[[:space:]]Approach[[:space:]]to[[:space:]]Solving[[:space:]]Variational[[:space:]]Inequalities[[:space:]]with[[:space:]]General[[:space:]]Constraints/3f720b2d-df96-41bc-b52d-c240145f034b_origin.pdf filter=lfs diff=lfs merge=lfs -text
3859
+ 2024/A[[:space:]]Probabilistic[[:space:]]Framework[[:space:]]for[[:space:]]Modular[[:space:]]Continual[[:space:]]Learning/6a87cadf-2cbe-4c12-a644-91c047f38d93_origin.pdf filter=lfs diff=lfs merge=lfs -text
3860
+ 2024/A[[:space:]]Progressive[[:space:]]Training[[:space:]]Framework[[:space:]]for[[:space:]]Spiking[[:space:]]Neural[[:space:]]Networks[[:space:]]with[[:space:]]Learnable[[:space:]]Multi-hierarchical[[:space:]]Model/c5d70279-fcbc-4e02-b3c2-424330a0f4f7_origin.pdf filter=lfs diff=lfs merge=lfs -text
3861
+ 2024/A[[:space:]]Quadratic[[:space:]]Synchronization[[:space:]]Rule[[:space:]]for[[:space:]]Distributed[[:space:]]Deep[[:space:]]Learning/f13b2a66-3612-4494-9084-e876f71cd9cc_origin.pdf filter=lfs diff=lfs merge=lfs -text
3862
+ 2024/A[[:space:]]ROBUST[[:space:]]DIFFERENTIAL[[:space:]]NEURAL[[:space:]]ODE[[:space:]]OPTIMIZER/ddbf4e48-f951-4f1d-ae09-e728d8de0597_origin.pdf filter=lfs diff=lfs merge=lfs -text
3863
+ 2024/A[[:space:]]Recipe[[:space:]]for[[:space:]]Improved[[:space:]]Certifiable[[:space:]]Robustness/9b2b4748-1c9a-4841-b7c1-2fe0b1c4071f_origin.pdf filter=lfs diff=lfs merge=lfs -text
3864
+ 2024/A[[:space:]]Restoration[[:space:]]Network[[:space:]]as[[:space:]]an[[:space:]]Implicit[[:space:]]Prior/bd628acd-2f31-4d10-9b48-cf5dd382c998_origin.pdf filter=lfs diff=lfs merge=lfs -text
3865
+ 2024/A[[:space:]]Semantic[[:space:]]Invariant[[:space:]]Robust[[:space:]]Watermark[[:space:]]for[[:space:]]Large[[:space:]]Language[[:space:]]Models/eaaecf25-ca84-4957-b983-8cbd596cdd68_origin.pdf filter=lfs diff=lfs merge=lfs -text
3866
+ 2024/A[[:space:]]Simple[[:space:]]Interpretable[[:space:]]Transformer[[:space:]]for[[:space:]]Fine-Grained[[:space:]]Image[[:space:]]Classification[[:space:]]and[[:space:]]Analysis/7cccd26a-b9f7-4cae-80ec-ee34cf97bf4b_origin.pdf filter=lfs diff=lfs merge=lfs -text
3867
+ 2024/A[[:space:]]Simple[[:space:]]Romance[[:space:]]Between[[:space:]]Multi-Exit[[:space:]]Vision[[:space:]]Transformer[[:space:]]and[[:space:]]Token[[:space:]]Reduction/71d97339-9906-4827-acc1-b30bef226184_origin.pdf filter=lfs diff=lfs merge=lfs -text
3868
+ 2024/A[[:space:]]Simple[[:space:]]and[[:space:]]Effective[[:space:]]Pruning[[:space:]]Approach[[:space:]]for[[:space:]]Large[[:space:]]Language[[:space:]]Models/ce1dd238-1913-44ea-b4f4-863d908749b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
3869
+ 2024/A[[:space:]]Simple[[:space:]]and[[:space:]]Scalable[[:space:]]Representation[[:space:]]for[[:space:]]Graph[[:space:]]Generation/c443ea49-0082-4241-9f30-4f7ec9935c5d_origin.pdf filter=lfs diff=lfs merge=lfs -text
3870
+ 2024/A[[:space:]]Stable,[[:space:]]Fast,[[:space:]]and[[:space:]]Fully[[:space:]]Automatic[[:space:]]Learning[[:space:]]Algorithm[[:space:]]for[[:space:]]Predictive[[:space:]]Coding[[:space:]]Networks/6b7cee20-5289-4188-83ae-d1940800edcc_origin.pdf filter=lfs diff=lfs merge=lfs -text
3871
+ 2024/A[[:space:]]Statistical[[:space:]]Analysis[[:space:]]of[[:space:]]Wasserstein[[:space:]]Autoencoders[[:space:]]for[[:space:]]Intrinsically[[:space:]]Low-dimensional[[:space:]]Data/7f11a767-909f-4b58-baad-30681d27d786_origin.pdf filter=lfs diff=lfs merge=lfs -text
3872
+ 2024/A[[:space:]]Study[[:space:]]of[[:space:]]Bayesian[[:space:]]Neural[[:space:]]Network[[:space:]]Surrogates[[:space:]]for[[:space:]]Bayesian[[:space:]]Optimization/76a8161e-ebfd-4157-9de1-d08ed9050b14_origin.pdf filter=lfs diff=lfs merge=lfs -text
3873
+ 2024/Self-RAG_[[:space:]]Learning[[:space:]]to[[:space:]]Retrieve,[[:space:]]Generate,[[:space:]]and[[:space:]]Critique[[:space:]]through[[:space:]]Self-Reflection/ca16f607-837d-411d-89a9-7f629e130fff_origin.pdf filter=lfs diff=lfs merge=lfs -text
3874
+ 2024/Small-scale[[:space:]]proxies[[:space:]]for[[:space:]]large-scale[[:space:]]Transformer[[:space:]]training[[:space:]]instabilities/0ed47efc-9957-4e37-a8a6-c73becb2384d_origin.pdf filter=lfs diff=lfs merge=lfs -text
3875
+ 2024/Statistically[[:space:]]Optimal[[:space:]]$K$-means[[:space:]]Clustering[[:space:]]via[[:space:]]Nonnegative[[:space:]]Low-rank[[:space:]]Semidefinite[[:space:]]Programming/ee124896-870b-414c-b295-bfe7d09558dc_origin.pdf filter=lfs diff=lfs merge=lfs -text
3876
+ 2024/The[[:space:]]mechanistic[[:space:]]basis[[:space:]]of[[:space:]]data[[:space:]]dependence[[:space:]]and[[:space:]]abrupt[[:space:]]learning[[:space:]]in[[:space:]]an[[:space:]]in-context[[:space:]]classification[[:space:]]task/ab41650c-8151-46fe-9782-ee8040834816_origin.pdf filter=lfs diff=lfs merge=lfs -text
3877
+ 2024/Topological[[:space:]]data[[:space:]]analysis[[:space:]]on[[:space:]]noisy[[:space:]]quantum[[:space:]]computers/4050e4d6-5e8a-4fbb-bcb1-90bcd241b0e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
3878
+ 2024/Towards[[:space:]]a[[:space:]]statistical[[:space:]]theory[[:space:]]of[[:space:]]data[[:space:]]selection[[:space:]]under[[:space:]]weak[[:space:]]supervision/f909d896-bcbc-4360-963f-c1f56cadc46f_origin.pdf filter=lfs diff=lfs merge=lfs -text
3879
+ 2024/Understanding[[:space:]]In-Context[[:space:]]Learning[[:space:]]in[[:space:]]Transformers[[:space:]]and[[:space:]]LLMs[[:space:]]by[[:space:]]Learning[[:space:]]to[[:space:]]Learn[[:space:]]Discrete[[:space:]]Functions/33d16e5c-994a-446b-aec8-0f22e2086730_origin.pdf filter=lfs diff=lfs merge=lfs -text
3880
+ 2024/Unified[[:space:]]Generative[[:space:]]Modeling[[:space:]]of[[:space:]]3D[[:space:]]Molecules[[:space:]]with[[:space:]]Bayesian[[:space:]]Flow[[:space:]]Networks/35fa0dc5-1473-4475-9e49-b5e5904e1338_origin.pdf filter=lfs diff=lfs merge=lfs -text
3881
+ 2024/Unprocessing[[:space:]]Seven[[:space:]]Years[[:space:]]of[[:space:]]Algorithmic[[:space:]]Fairness/0f861e7f-daa2-4459-a6eb-94101eb7bfce_origin.pdf filter=lfs diff=lfs merge=lfs -text
3882
+ 2024/ValUES_[[:space:]]A[[:space:]]Framework[[:space:]]for[[:space:]]Systematic[[:space:]]Validation[[:space:]]of[[:space:]]Uncertainty[[:space:]]Estimation[[:space:]]in[[:space:]]Semantic[[:space:]]Segmentation/c80d645d-be6e-4830-8f44-2f6b5665d88b_origin.pdf filter=lfs diff=lfs merge=lfs -text
3883
+ 2024/Vision[[:space:]]Transformers[[:space:]]Need[[:space:]]Registers/2b4e628a-8665-433c-95f0-a84c74e697de_origin.pdf filter=lfs diff=lfs merge=lfs -text
3884
+ 2024/Würstchen_[[:space:]]An[[:space:]]Efficient[[:space:]]Architecture[[:space:]]for[[:space:]]Large-Scale[[:space:]]Text-to-Image[[:space:]]Diffusion[[:space:]]Models/0d29c5ad-6fc5-49c7-863d-812b172bc233_origin.pdf filter=lfs diff=lfs merge=lfs -text
3885
+ 2024/Zipformer_[[:space:]]A[[:space:]]faster[[:space:]]and[[:space:]]better[[:space:]]encoder[[:space:]]for[[:space:]]automatic[[:space:]]speech[[:space:]]recognition/18dd180e-f663-439a-afda-c1658e7e8317_origin.pdf filter=lfs diff=lfs merge=lfs -text
2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/180cefdd-aef7-4856-b7a0-a519d632e615_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/180cefdd-aef7-4856-b7a0-a519d632e615_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/180cefdd-aef7-4856-b7a0-a519d632e615_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0f2d13d6324dcd1853bd1c0c74c04aa74424f4ab157c9c9db8b6019a7d494fa
3
+ size 2751899
2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/full.md ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # INSTAG: INSTRUCTION TAGGING FOR ANALYZING SUPERVISED FINE-TUNING OF LARGE LANGUAGE MODELS
2
+
3
+ Keming Lu* & Hongyi Yuan*†& Zheng Yuan & Runji Lin†
4
+
5
+ Alibaba DAMO Academy
6
+
7
+ {lukeming.lkm,yuanhongyi.yhy,yuanzheng.yuanshen,linrunji.lrj} $@$ alibaba-inc.com
8
+
9
+ Junyang Lin & Chuanqi Tan & Chang Zhou & Jingren Zhou
10
+
11
+ Alibaba DAMO Academy
12
+
13
+ {junyang.ljy,chuanqi.tcq,ericzhou.zc,jingren.zhou}@alibaba-inc.com
14
+
15
+ # ABSTRACT
16
+
17
+ Pre-trained large language models (LLMs) can understand and align with human instructions by supervised fine-tuning (SFT). It is commonly believed that diverse and complex SFT data are of the essence to enable good instruction-following abilities. However, such diversity and complexity are obscure and lack quantitative analyses. In this work, we propose INSTAG, an open-set instruction tagging method, to identify semantics and intentions of human instructions by tags that provide access to definitions and quantified analyses of instruction diversity and complexity. We obtain 6.6K fine-grained tags to describe instructions from popular open-sourced SFT datasets comprehensively. We find that the abilities of aligned LLMs benefit from more diverse and complex instructions in SFT data. Based on this observation, we propose a data sampling procedure based on INSTAG, and select 6K diverse and complex samples from open-source datasets for SFT. The resulting models, TAGLM, outperform open-source models based on considerably larger SFT data evaluated by MT-Bench, echoing the importance of instruction diversity and complexity and the effectiveness of INSTAG. INSTAG has robust potential to be extended to more applications beyond the data selection as it provides an effective way to analyze the distribution of instructions.
18
+
19
+ # 1 INTRODUCTION
20
+
21
+ The contemporary chatbots, such as GPT-4 (OpenAI, 2023), have brought to the forefront of artificial generative intelligence with their superior and versatile abilities in real-world task solving. Such abilities are unlocked by fine-tuning pre-trained large language models (LLMs) to align human preference, and well-aligned LLMs can precisely recognize human intentions and properly formalize responses expressed in natural languages. There have been proposed various techniques to achieve such alignment of enabling pre-trained models to comprehend and execute diverse instructions effectively, including supervised fine-tuning (SFT) (Taori et al., 2023; Chiang et al., 2023), rejection sampling (Yuan et al., 2023b; Song et al., 2023; Rafailov et al., 2023), and reinforcement learning with human feedback (RLHF) (Bai et al., 2022a; Ouyang et al., 2022; Touvron et al., 2023b).
22
+
23
+ Especially, SFT for alignment is widely studied by recent research, which is generally formalized in a multi-turn utterance manner, and each turn is composed of a human query and a corresponding response well-aligned with human preference (Wang et al., 2023d). Achieving alignment with human preference through SFT necessitates collecting a broad range of training data which is typically gathered through crowd-sourcing (Ouyang et al., 2022; Bai et al., 2022a; Touvron et al., 2023b) or by distilling from other LLMs (Taori et al., 2023; Ding et al., 2023). Recent research indicates that such training data for alignment should be diverse and complex, covering various domains, tasks,
24
+
25
+ semantics, and formats (Xu et al., 2023a; Mukherjee et al., 2023; Wang et al., 2023b). Such diversity and complexity are mainly determined by the query formation. Various methods are proposed and claimed to improve the diversity and complexity of the queries and advance the performance of the SFT-aligned LLMs (Wang et al. 2023c; Xu et al. 2023a; Ding et al. 2023; inter alia). However, how to quantify the diversity and complexity of queries is significantly understudied.
26
+
27
+ To shed light on this topic, we propose using a tagging system to feature and categorize samples in SFT datasets. Given the versatile tasks that the aligned LLMs are expected to handle, an equally versatile tag system is necessary to distinguish open-world human queries. However, building an open, fine-grained tagging system manually is infeasible to scale for large datasets. To this end, we propose INSTAG, an automatic Instruction Tagging method empowered by proprietary high-performing chatbot ChatGPT. Leveraging such a well-aligned chatbot, INSTAG designs a framework to automatically prompt ChatGPT to assign tags to training sample queries. INSTAG achieves the increased quality of the tag assignment by deliberately prompting ChatGPT to explain each tag assigned and including a systematic tag normalization procedure. We apply INSTAG to an extensive collection of open-source SFT datasets and build open-set, fine-grained tags which, as we observed, can reflect the semantics and intentions beneath human queries in SFT datasets. Through the scope of tags, we conduct a detailed and quantified analysis of existing open-source datasets, providing insights into query distributions in terms of diversity and complexity. Such analyses reveal that diverse and complex queries induce high alignment performance through SFT. Following this insight, we propose a data selector based on INSTAG, including a complexity-first diverse sampling method that can extract the most complex queries in a diverse distribution. LLMs fine-tuned with data selected by the INSTAG selector perform well on the popular benchmark MT-Bench (Zheng et al., 2023), supporting our previous query distribution insights.
28
+
29
+ The contributions of this work are mainly three-fold. Firstly, we propose using open-set fine-grained intention tags as instruction diversity and complexity metrics. To this end, we develop INSTAG, an automated annotator that leverages the instruction-following abilities of proprietary chatbots and employs tag normalization methods. Secondly, we analyze existing open-source SFT datasets and provide insights into query diversity and complexity. Finally, we design a data selector based on INSTAG and apply it to the latest open-source datasets. The resulting best LLMs, TAGLM-13b-v1.0 and TAGLM-13b-v2.0 respectively based on LLaMA (Touvron et al., 2023a) and LLaMA-2 (Touvron et al., 2023b), trained with selected data achieve scores of 6.44 and 6.55 on the benchmark MT-Bench, respectively, surpassing a group of LLMs aligned with considerably more SFT data. Our contributions are verified with rich experiments and multifaceted analysis. Most notably, INSTAG exhibits its robust potential to offer deeper insights into LLMs alignment, extending beyond the data selection introduced in our work.
30
+
31
+ # 2 RELATED WORKS
32
+
33
+ LLMs with Human Alignment. Through supervised fine-tuning (SFT), rejection sampling, or reinforcement learning (Ouyang et al., 2022; Bai et al., 2022a;b; Yuan et al., 2023b; Rafailov et al., 2023; Song et al., 2023; Touvron et al., 2023b), LLMs can obtain versatile abilities for understanding and following diversified human queries expressed in natural languages, aligning with human intentions. Recent research mainly focused on SFT to align LLMs with human intentions and has contributed essential practices to developing open-resourced well-aligned LLMs, which is adequately summarized by Zhao et al. (2023). Several prominent works collected SFT data through human annotated demonstrations (Ouyang et al., 2022; Touvron et al., 2023b), online user logs of proprietary LLMs (Chiang et al., 2023; Wang et al., 2023a; Kopf et al., 2023), or prompting proprietary high-performing LLMs (OpenAI, 2023) to generate or rewrite samples (Taori et al. 2023; Ding et al. 2023; Xu et al. 2023a; Mukherjee et al. 2023; inter alia). Different LLMs fine-tuned on the datasets have aligned with human preference and exhibited good performance in various real-world tasks.
34
+
35
+ Data for Human Alignment. It has been highlighted that the performance of aligned LLMs is affected by the quality of the SFT data. Such data quality pertains to the level of responses (Peng et al., 2023; Chiang et al., 2023), the difficulty of tasks presented (Mukherjee et al., 2023), the complexity of queries (Xu et al., 2023a), the diversity of semantics (Ding et al., 2023; Taori et al., 2023), and the scale of sample amounts (Zhou et al., 2023). Taori et al. (2023) used Self-Instruct (Wang et al., 2023c) to generate diversified queries for SFT and Xu et al. (2023a) proposed Evol-Instruct to complexify simple queries for better human alignment. Mukherjee et al. (2023) used propri-
36
+
37
+ ![](images/eada8409fc600d6b51a63ba64c872abe3e9ee118c2cc611b2dc6468b643f2f00.jpg)
38
+ Figure 1: Overview of INSTAG. We use ChatGPT to annotate fine-grained tags for a series of open-source datasets. This figure presents two cases of open tagging annotations from ShareGPT and UltraChat. A tag normalization, including multiple denoising and aggregation methods, is then applied to the original tagging results. Finally, the quality of the tag set, as shown in the word cloud, is evaluated by human and LLM annotators, focusing on the tagging precision and consistency.
39
+
40
+ etary high-performing LLMs to rewrite the queries and responses of samples from FLAN collection (Longpre et al., 2023) and observed improvement of LLMs in conventional NLP task solving. Ding et al. (2023) proposed UltraChat using manually designed diverse anchor concepts and entities to generate multi-turn data by inducing conversations in ChatGPT. OpenChat (Wang et al., 2023a) and Vicuna (Chiang et al., 2023) are both current open-sourced LLMs with cutting-edge instruction following abilities, and both models are trained on the user logs of GPT-4 from ShareGPT. As evaluated in Wang et al. (2023b), the success of fine-tuning on ShareGPT demonstrates that queries from user logs are of higher diversity and the responses generated from GPT-4 are of better quality, resulting in superior instruction following the abilities. Zhou et al. (2023) found that a small amount of high-quality data is sufficient for LLMs to excel at human alignment.
41
+
42
+ Although current research proposed more diversified and complex SFT data and made significant progress in developing well-aligned LLMs with human intentions, existing works have yet to discuss how to quantify the diversity and complexity of queries. Taking advantage of the high-performing ChatGPT, we annotate existing data samples with tags. We quantify the diversity and complexity of the training data for the first time and study the data mixture for better alignment.
43
+
44
+ # 3 INSTAG
45
+
46
+ This section presents an automatic instruction tagging method INSTAG and its preliminary analyses. We first define fine-grained intention tags and present the open tagging process with LLMs (§3.1). Then, we design a systematic normalization method to denoise the raw tags from previous annotations (§3.2). We also fully evaluate the tagging quality to ensure INSTAG generates precise and consistent intention tags (§3.3). Finally, we use INSTAG to analyze open-source SFT datasets (§3.4).
47
+
48
+ # 3.1 OPEN-SET FINE-GRAINED TAGGING
49
+
50
+ Instructions, or queries in prompting modern chatbots, serve as expressions of user intentions, which can often be multifaceted and highly intricate. To illustrate, we showcase an instruction from the ShareGPT dataset in Fig. 1, where the user submits a coding request specifying desired output formats and error-handling methods. To better parse such instructions, employing fine-grained tags to identify fine-grained intentions rather than relying on generalized, coarse-grained classes is essential. However, although fine-grained intention tags offer a more detailed understanding of instruction distribution, they also present challenges in annotation and normalization. Therefore, we propose
51
+
52
+ <table><tr><td>Inconsistency</td><td>Examples</td><td>Output</td></tr><tr><td>Lexical Noise Rule Aggregation</td><td>Information Retrieval, information_retrieval, information retrieve</td><td>information retrieval</td></tr><tr><td>Uncontrolled Granularity Semantic Aggregation</td><td>information request, request for information, request for additional information, request for more information, additional information request, specific information request</td><td>information request</td></tr><tr><td>Spurious Correlations Association Aggregation</td><td>(mathematics, math problem), (loop, for loop)</td><td>mathematics, for loop</td></tr></table>
53
+
54
+ Table 1: Inconsistency in intention tagging results from open-set annotations. Inconsistencies can be addressed with three aggregation methods described in §3.2.
55
+
56
+ an open-set tagging with ChatGPT without providing a predefined ontology of tags and a normalization technique to address these issues. We prefer an open setting since a closed set is not flexible enough to cover versatile intentions in open chatting. Our prompt for tagging is shown in Tab. 5. We provide few-shot examples in the prompt to hint ChatGPT to provide tags in a specific JSON format for accurate parsing. As shown in Fig. 1, we separately annotate each query in a chat session and require ChatGPT to explain tags for the convenience of quality evaluation briefly.
57
+
58
+ # 3.2 TAG NORMALIZATION
59
+
60
+ The production of intention tags through ChatGPT in an open setting presents a challenge in ensuring consistency, as no predefined ontology is provided, resulting in noise in the raw tagging outcomes. The number of original raw tags for open-sourced datasets is over 12,000, showing ChatGPT can provide diverse and fine-grained annotations. However, we notice the original tagging results contain noticeable noises, including inconsistent word format and granularity. Therefore, we design a systematic method to normalize the open-set tagging. We have identified three significant types of noise, detailed in Tab. 1: Lexical Noise, arises from the instability of ChatGPT in adhering to output format instructions and can be mitigated through stemming as a post-processing step; Uncontrolled Granularity refers to the potential for ChatGPT to produce overly specific tags; Spurious Correlations refer to tags that often appear together due to the bias of ChatGPT or data distributions. Such tag groups should be merged to form an atomic tag. These issues must be addressed to ensure that intentions are accurately identified and utilized in downstream processes. Therefore, we normalize open-set tagging results by various aspects, including frequency, format, semantics, and associations. Specifically, we clean the raw tagging with the following normalization procedure:
61
+
62
+ - Frequency Filtering: We first filter out long-tail tags appearing less than $\alpha$ times in the whole annotated dataset. $\alpha$ is a hyperparameter related to the scale of the dataset.
63
+ - **Rule Aggregation:** We transform all tags into lower characters to avoid the influence of capitalization and replace all special characters with spaces. Finally, we apply stemming to each tag with the support of NLTK (Bird et al., 2009).
64
+ - Semantic Aggregation: We employ text embedding models to obtain the semantics of tags. We use PHRASEBERT (Wang et al., 2021), a BERT-based model designed explicitly for embedding phrases, such as titles of tags. Other embedding methods, such as OpenAI embeddings or DENSEPHRASE (Lee et al., 2020), can also be adopted as alternatives. After obtaining the semantic embeddings of tags, we use DBSCAN algorithm (Hahsler et al., 2019) to cluster tags with a given threshold $t$ of semantic similarity. Similarly, other density clustering methods can be used instead of DBSCAN for the same denoising purpose. Semantic aggregation controls the granularity of tags in terms of semantic similarities.
65
+ - Association Aggregation: We notice ChatGPT tends to provide highly associated tags that are expected to be considered as an atomic tag as a whole, mainly occurring in mathematics and coding queries. Therefore, we analyze all raw tagging results and employ the FP-Growth algorithm (Han et al., 2000) to mine association rules between tags. We then recursively merge associated tags based on the above association rules and reduce morbidity.
66
+
67
+ We apply INSTAG on 17 widely-used open-source SFT datasets introduced in Appx. $\S E$ . Over 100 thousand original unique tags are generated following the ChatGPT annotation. To filter out long-
68
+
69
+ <table><tr><td rowspan="2">Metric</td><td rowspan="2">GPT-4 Annotation</td><td rowspan="2">Human Annotation (1%)</td><td colspan="2">Agreement (κ)</td></tr><tr><td>Human-Human</td><td>Human-GPT</td></tr><tr><td>Tag Precision</td><td>96.1</td><td>100</td><td>0.47</td><td>0.92</td></tr><tr><td>Tag Consistency</td><td>86.6</td><td>100</td><td>0.73</td><td>0.75</td></tr></table>
70
+
71
+ Table 2: Evaluation for the tagging quality of INSTAG. We design two metrics, tagging precision and consistency, for evaluating INSTAG. Moreover, we also employ three human annotators to annotate $1\%$ cases and report their majority voting. We report agreement between human annotators in Fleiss-kappa scores and agreement between majority voting and GPT-4 in Cohen's kappa scores.
72
+
73
+ tail cases, we implement Frequency Filtering with $\alpha = 20$ , resulting in the retention of 8,541 tags. We apply the rule aggregation to address lexical noise, which reduces tags to 7,157. Then, semantic aggregation with a minimum semantic similarity 0.05 reduces the count to 6,587 tags. Finally, we employed the association aggregation with a minimum support of 40 times and a minimum confidence of $99\%$ , producing 1,772 association rules to transform tag groups into atomic tags. These measures were essential in streamlining the tagging process and ensuring the quality of downstream processes. An overview of frequent tags is in Appx. §B. We also train a local specialized tagging LLM, INSTAGGER, to distill such annotation abilities into smaller LLMs, shown in Appx. §C.
74
+
75
+ # 3.3 QUALITY EVALUATION
76
+
77
+ We employ both GPT-4 and human annotators to provide judgments in tagging quality. The quality of the normalized tagging dataset is evaluated in precision and consistency:
78
+
79
+ - Precision Precision is whether tags assigned to a specific query correctly relate to query intentions. Tag precision is essential since fine-grained tags should be precisely expressed as part of query intentions. For example, given a case $(q, \mathcal{T})$ where $q$ is the query and $\mathcal{T}$ is tags assigned to it, we employ annotators to identify any incorrect tags in $\mathcal{T}$ . We consider it a negative case if any tag in $\mathcal{T}$ is annotated as incorrect. Otherwise, it is a precise tagging case.
80
+ - Consistency To form a consistent tag ontology, we naturally require that the semantics of a specific tag will not shift across queries. An annotation case in consistency $(t, \mathcal{I})$ contains a tag $t$ and a set of randomly selected instructions $\mathcal{I}$ annotated with such tag. Annotators are required to identify any semantic changes in tags across all instructions.
81
+
82
+ Specifically, we randomly sample 4,000 cases for GPT-4 annotation, 2,000 each for precision and consistency. Then, we hire three annotators to manually label 40 cases (1%) selected from the above set. Manual annotations provide judgments and reveal confidence of GPT-4 annotation. The evaluation results are shown in Tab. 2. GPT-4 provides $96.1\%$ and $86.6\%$ accuracy in tag precision and consistency, respectively. Meanwhile, we also report the majority voting of human annotators, which suggests a hundred percent correctness among both tasks. We notice the Fleiss-kappa between human annotators reaches the basic agreement. In contrast, Cohen's kappa between majority voting and GPT-4 reaches more than 0.7, suggesting a solid agreement between human and GPT-4 annotators. To eliminate the possibility that such results contain robust false positive annotations, we specifically design counterfactual annotation experiments shown in Tab. 9 and proof that both human and GPT-4 are capable of precisely recalling incorrect cases. Therefore, tags provided by INSTAG are of good quality regarding precision and consistency for downstream analyses.
83
+
84
+ # 3.4 PRELIMINARY ANALYSIS
85
+
86
+ We present the analysis of open-source datasets through normalized tags in Fig. 2. To start with, we introduce the diversity and complexity attributes of SFT datasets induced by tagging results:
87
+
88
+ - Diversity is used to access the range of intentions and semantics covered by queries in a dataset. According to the tagging results, a dataset is considered more diverse if it covers more individual tags. The attribute is quantified as the unique tag coverage rate for the overall tag set.
89
+ - Complexity aims to measure the number of intentions complicating queries. We assume a more complex query would be assigned more tags. The attribute is quantified as the average tag number assigned to queries in a dataset.
90
+
91
+ We first depict the overall assessments regarding the axis of diversity and complexity as shown in Fig. 2a. Each dataset is represented as a dot whose size indicates the sample size, and color indicates
92
+
93
+ ![](images/9b3aa6e79804bde98149f8537908ff23bc26e1d7bce5e2e012d73b42c05202ff.jpg)
94
+ (a) Diversity and Complexity based on Tags
95
+
96
+ ![](images/b3043ffbba942b1c9b8b213b9210e8e6e8f8ff68841c6c93c1751b513899461a.jpg)
97
+ (b) Dataset Correlation (Column recalls Row)
98
+ Figure 2: Dataset analysis based on tags. Fig. 2a shows diversities and complexities based on tags, where data scales and AlpacaEval scores are marked in dot sizes and colors respectively. Datasets without AlpacaEval scores are marked in circles. Fig. 2b shows correlations among datasets based on the recalls of tags. Numbers are recalls using tags in the column against tags in the row.
99
+
100
+ the performance of LLMs fine-tuned thereon and tested on AlpacaEval (Li et al., 2023). As shown, (1) Tag-based metrics well presents diversity and complexity. WizardLM (Alpaca) is created by complicating the queries from Alpaca datasets using Evol-Instrcut (Xu et al., 2023a). We can see that WizardLM (Alpaca) has a larger coverage rate and average tag number than the Alpaca dataset. This observation demonstrates the complexity and diversity of an SFT dataset can be well quantified by tags. (2) The larger size, the more diverse and more complex. On both axes, the larger datasets naturally contain human queries of higher diversity and complexity, except for mathematical reasoning and code generation. (3) Math and Code show different trends. The datasets for mathematical reasoning (MATH (Hendrycks et al., 2021), GSM8K (Cobbe et al., 2021)) and code generation (DMCC (Li et al., 2022), MBPP (Austin et al., 2021), DrRepair (Yasunaga & Liang, 2020)) focus on specific downstream abilities and result in low diversity, while such datasets have relatively high complexity. (4) Diverse and complex data induces higher performance. ShareGPT, UltraChat (Ding et al., 2023), and OpenChat-v1 (Wang et al., 2023a) datasets lay at the upper-right corner of Fig. 2a, having both high diversity and complexity. Vicuna (Chiang et al., 2023), UltraChat, and Openchat, respectively fine-tuned on the datasets, achieve cutting-edge performance among open-sourced models, as evaluated by public leaderboards (e.g., AlpacaEval). This scenario verifies that LLMs can benefit from fine-tuning more diverse and complex data for alignment.
101
+
102
+ We display the correlations between datasets regarding tag recalls to understand the correlations between open-source SFT datasets. As depicted in Fig. 2b, we use the tag sets of the datasets on each column to calculate the recall to the tag sets of the datasets on each row. We can conclude from the figure that (1) Tags can identify different tasks. Datasets for mathematical reasoning and code generation tasks exhibit higher tag recalls within the tasks. This demonstrates that the tags can identify the uniqueness of mathematical reasoning and code generation datasets compared to more general-purpose datasets. (2) Few cover all. WizardLM (Alpaca), WizardLM (ShareGPT), UltraChat, and ShareGPT have higher tag recalls for other datasets. These four datasets contain more diversified queries and cover other datasets, consistent with the results in Fig. 2a.
103
+
104
+ Overall, INSTAG provides a tool for analyzing SFT datasets through the perspective of tagging. Existing SFT datasets differ in diversity and complexity as evaluated by the tagging results. However, we also notice two outliers in these figures. The dataset for Alpaca seems to have a large data size while resulting in inferior performance and low complexity. The dataset for OpenChat-v1 is filtered from ShareGPT, resulting in high query diversity and complexity while having only 8K multi-turn conversations, which suggests a considerably small data scale with high query complexity and diversity can potentially result in better performance. We give more analysis on data size in §4.3.
105
+
106
+ # 4 INSTAG FOR DATA SELECTION
107
+
108
+ As analyses in §3.4, we notice fine-tuning LLMs on more diverse and complex datasets may benefit alignment performance. Therefore, we present a data selection method supported by INSTAG in this
109
+
110
+ <table><tr><td>Model</td><td>Data Size</td><td>MT-Bench</td><td>AlpacaEval</td></tr><tr><td colspan="4">Proprietary Models</td></tr><tr><td>GPT-4</td><td>-</td><td>8.99</td><td>95.3</td></tr><tr><td>GPT-3.5-turbo</td><td>-</td><td>7.94</td><td>86.1</td></tr><tr><td>Claude-v1</td><td>-</td><td>7.90</td><td>88.4</td></tr><tr><td colspan="4">LLaMA-2 Based Open-source Models</td></tr><tr><td>Llama-2-13b-chat (Touvron et al., 2023b)</td><td>-</td><td>6.65</td><td>81.1</td></tr><tr><td>TAGLM-13b-v2.0</td><td>6K</td><td>6.55±0.02</td><td>80.9±1.4</td></tr><tr><td colspan="4">LLaMA Based Open-source Models</td></tr><tr><td>Alpaca-13b (Taori et al., 2023)</td><td>52K</td><td>4.53</td><td>21.9</td></tr><tr><td>OpenChat-13b-v1 (Wang et al., 2023a)</td><td>8K</td><td>5.22</td><td>80.9</td></tr><tr><td>Baize-v2-13b (Xu et al., 2023b)</td><td>56K</td><td>5.75</td><td>67.0</td></tr><tr><td>Vicuna-13b-v1.1 (Chiang et al., 2023)</td><td>70K</td><td>6.31</td><td>70.4</td></tr><tr><td>WizardLM-13b (Xu et al., 2023a)</td><td>70K</td><td>6.35</td><td>75.3</td></tr><tr><td>Vicuna-13b-v1.3 (Chiang et al., 2023)</td><td>125K</td><td>6.39</td><td>82.1</td></tr><tr><td>TAGLM-13b-v1.0</td><td>6K</td><td>6.44±0.04</td><td>75.8±1.5</td></tr></table>
111
+
112
+ Table 3: Main results of TAGLM. Standard deviations are derived under three GPT-4 judgments and obtain results for other baselines from the official MT-Bench and AlpacaEval leaderboard. Dashes in the data column denote unknown data sizes. Detailed results are presented in Tab. 8.
113
+
114
+ section and align LLMs with selected data to show the effectiveness of INSTAG. We introduced experimental setup (§4.1), results (§4.2), and analyses related to query diversity and complexity (§4.3).
115
+
116
+ # 4.1 EXPERIMENTAL SETUP
117
+
118
+ Data Pool. Based on the normalized tagging results and the preliminary analyses of open-source datasets as presented in Figure 2, we conduct fine-grained experiments to discuss the impact of data complexity and diversity through controlling-for-a-variable methods. Under the correlation analyses in Figure 2b, each dataset of WizardLM(Alpaca), WizardLM(ShareGPT), UltraChat, and ShareGPT maintains large tag recalls regarding other datasets. The four datasets also have the largest average tag numbers shown in Figure 2a. These results indicate that the four datasets have high coverage for other datasets regarding tags. Therefore, we pool the four datasets and create different subsets for data complexity and diversity analysis. The pooled dataset contains 306,044 samples with a tag set size 6,398 and an average tag number of 4.48. Detailed datasets are in Appx. §E.
119
+
120
+ Configuration. We use the dataset of 6K samples to align the 13B version of LLaMA (Touvron et al., 2023a) and LLaMA-2 (Touvron et al., 2023b) with human preference via SFT, and dub both aligned LLMs TAGLM-13b-v1.0 and TAGLM-13b-v2.0 respectively. All the models fine-tuned in the following analyses are based on 13B version LLMs of either LLaMA (Touvron et al., 2023a) or LLaMA-2 (Touvron et al., 2023b). If not specified otherwise, we fine-tune the model for five epochs with the batch size set to 128 and the learning rate set to $2 \times 10^{-5}$ . The Vicuna-style template is applied to concatenate queries and responses during fine-tuning. We evaluate each fine-tuned model on MT-Bench (Zheng et al., 2023) strictly following its recipe using GPT4 as a judge to demonstrate the alignment performance, set comparison to other LLMs, and conduct analyses.
121
+
122
+ Data Sampling. LLMs can benefit from datasets with higher diversity and complexity according to the analyses in §3.4. We sample a data subset of 6K samples from the pooled dataset with the highest sample complexity of an average tag number 16.56 and tag coverage of $100\%$ . We propose a Complexity-first Diverse Sampling procedure (Cf-D, Alg. 1) to obtain the datasets.
123
+
124
+ Baselines. We compare our models to two sets of baselines. We first use proprietary GPT-4, GPT-3.5, and Claude-V1 as strong baselines, and then include strong cutting-edge open-sourced aligned LLMs, Vicuna (Chiang et al., 2023), WizardLM (Xu et al., 2023a), Baize (Xu et al., 2023b), OpenChat (Wang et al., 2023a), and Alpaca (Taori et al., 2023). Details are left to Appx. $\S F$ .
125
+
126
+ ![](images/e25b0a5cf16adb63e92fb944a9090a260bd68013b60c14b7f0bbe910c91a016f.jpg)
127
+ (a) Performance under different Tag Complexities
128
+
129
+ ![](images/c164819d87e20ea76a8aeafd945e53f7aa5252fa5231d1924e663fad654a5cbe.jpg)
130
+ (b) Performance under different Tag Diversities
131
+ Figure 3: Analysis results of model performance in terms of different tag complexities and diversities. Fig. 3a shows MT-Bench scores over different tag complexities defined as an average number of tags per session. Fig. 3b shows scores over different tag diversities defined as coverage rates over all tags. We include a random baseline in both figures as shown in red triangles.
132
+
133
+ # 4.2 RESULTS
134
+
135
+ As shown in Tab. 3, TAGLM-13b-v1.0 outperforms all the open-sourced aligned LLMs, achieving a 6.44 average score on MT-Bench, although it is only fine-tuned based on LLaMA on 6K samples, far less than those of other LLMs. We report the average of three GPT-4 judgments and provide the standard deviation of scores as we notice randomness in GPT-4 judgments. This result illustrates that diversity and complexity matter in human alignment by SFT. Our INSTAG provides a decent tool for accessing and quantifying both attributes. TAGLM-13b-v2.0 fine-tuned based on LLaMA-2 achieves even higher results while lagging behind LLaMA-2-chat by only 0.1, which is aligned with human preference via RLHF. Compared to proprietary high-performing LLMs, especially GPT-4, the performance falls behind on MT-Bench. We also present more detailed results on MT-Bench in terms of eight tasks in Appx. §G. We also report evaluation results on AlpacaEval and we witness a similar conclusion as MT-Bench. TAGLM-13b-v1.0 outperforms most of the baselines that are aligned with much more data and achieves a 75.8 win rate compare with text-davinci-003, but falls behind OpenChat-13b-v1 and Vicuna-13b-v1.3. TAGLM-13b-v2.0 also achieves comparable performance on AlpacaEval with Llama-2-13b-chat.
136
+
137
+ # 4.3 DECOUPLED ANALYSIS
138
+
139
+ We primarily discuss how SFT data size relates to the alignment performance and give an ablation study on the sampling procedure according to tags. The results are shown in Tab. 4. Using the sampling procedure in Alg. 1, the alignment performance achieves the best score with 6K data, and the performance degrades when the data size increases to $10\mathrm{K}$ and $16\mathrm{K}$ . As compared to SFT with all and half of the whole pooled data, the performance remains superior. These results empirically verified there exists a small-scale subset with high diversity and complexity that can lead to excellent alignment performance. The finding is consistent with LIMA (Zhou et al., 2023). Comparing Alg. 1 to random sampling with the same 6K samples, the proposed sampling procedure results in significantly better performance than random sampling, largely surpassing by 0.68 on MT-Bench.
140
+
141
+ <table><tr><td>Selection</td><td>Data Size</td><td>MT-Bench</td></tr><tr><td rowspan="5">Cf-D</td><td>3K</td><td>5.92</td></tr><tr><td>5K</td><td>6.33</td></tr><tr><td>6K</td><td>6.44</td></tr><tr><td>10K</td><td>6.34</td></tr><tr><td>16K</td><td>6.31</td></tr><tr><td rowspan="3">Random</td><td>6K</td><td>5.76</td></tr><tr><td>10K</td><td>6.27</td></tr><tr><td>153K(half)</td><td>6.23</td></tr><tr><td>-</td><td>306K(all)</td><td>6.21</td></tr></table>
142
+
143
+ Table 4: Results for different SFT data sizes and sampling procedures. Cf-D represents the complexity-first diverse sampling in Alg. 1.
144
+
145
+ We then provide decoupled analyses of complexity and diversity to demonstrate how they influence alignment performance given the same SFT data size.
146
+
147
+ Complexity. To decouple and focus on the data complexity, we sample different data subsets of diverse averaged tag numbers. Different sampled subsets share the same sample size of 6k and have
148
+
149
+ the same tag coverage of $100\%$ , implying the largest data diversity. In the sampling procedure, all the data samples are first sorted by the tag numbers in descending order. Then for each data subset, we start from the sample in the whole dataset with the largest tag number. The sample that can expand the tag set size of the current sampled data will be extracted and removed from the whole dataset. If the tag set of the current sampled subset covers the whole tag set and the sample number is still less than $6k$ , we repeat the sampling procedure until the sample numbers reach $6k$ . This sampling procedure is similar to Alg. 1. We leave the detail in Appx. $\S H$ .
150
+
151
+ We sample 10 different data subsets, and the average tag numbers of the subsets range from 6.7 to 16.6. As shown in Figure 3a, the overall performance trend on MT-Bench is increasing along with the growth of average tag numbers. This trend may not be significant on the fine-grained level of average tag numbers where the number difference between subsets is small. Compared to the randomly sampled datasets, the average tag number of around 4.5, all the 10 data subsets can lead to superior fine-tuned model performance than the randomly sampled subset baseline. To summarize, on a coarse-grained level of data complexity, the downstream performance is positively correlated to the average tag number, while on a fine-grained level, such a phenomenon becomes less evident. This may be partly because ChatGPT does not recall all the possible tags for each query, or some tags are filtered out during the tag normalization procedure, resulting in a less accurate tag number.
152
+
153
+ Diversity. For diversity, we sample different data subsets spanning various tag coverage rates regarding the whole tag set. Different subsets share the same sample scale of 6k and the same average tag number, implying the same data complexity. The average tag number is set to 5.0. For data subset sampling, we first draw samples that can expand the tag set size of the current sampled data until the target tag coverage rate. Then, we traverse the remaining samples and extract samples that do not expand the tag coverage and can keep the current average tag number of the subset around 5.0. We leave the detailed sampling algorithm for diversity analysis in Appx. §H.
154
+
155
+ We can observe in Figure 3b that as the tag coverage increases, the fine-tuned model can achieve higher MT-Bench scores. Randomly sampled data subsets of tag coverage $71.9\%$ result in similar model performance with the sampled subset of tag coverage $70\%$ . This demonstrates that the fine-tuned models may benefit from the more diverse datasets through the scope of tags. The trend is not strictly linear, and there seems to be a plateau ranging from $50\%$ to $90\%$ coverage. This could be due to the tags assigned may not share the sample importance for diversity.
156
+
157
+ # 5 INSTAGGER: LOCAL TAGGER BY DISTILLATION
158
+
159
+ INSTAG depends on advanced chatbots that are expensive on large-scale applications. As fine-grained tags benefit SFT data selection and other applications, we naturally propose INSTAGGER, which is equipped with the tagging ability of these high-performing chatbots by distillation of denoised tagging results with significantly fewer budgets and higher efficiency. Distilling is an effective method to inject a smaller model with specialized abilities (Fu et al., 2023). We use our INSTAG results on open-sourced SFT datasets to fine-tune a 7B version LLaMA-2 model. The detailed implementation is described in Appx. §C. We validate the model on our validation set. The tag-level F1 score based on exact match (EM) and semantic-based fuzzy match are $31.8\%$ and $73.4\%$ . As this is an unconstrained open-generated tagging, EM is a rigorous metric for annotating over six thousand tags. Therefore, we also calculate the fuzzy match by PhraseBERT, which considers a predicted tag is correct if it has over 0.8 cosine similarity in semantics with any gold tag.
160
+
161
+ # 6 CONCLUSION
162
+
163
+ In this paper, we introduced INSTAG, an open-set tagging method leveraging the instruction-following ability of ChatGPT for SFT data analysis. We apply INSTAG on open-source SFT datasets, showing diverse and complex data leads to better alignment performance. We designed a complexity-first diverse sampling method to select 6K samples, and TAGLM fine-tuned on this selected dataset outperforms other open-source models aligned with considerably more data. Moreover, further decoupled analyses revealed that model performance increases with fine-tuning on more diverse and complex SFT data, respectively. In summary, our proposed INSTAG provides a novel aspect for a deeper understanding of query distribution in the alignment of LLMs. It has robust potential to be extended to more applications beyond the data selection shown in this work, such as creating comprehensive evaluations and tag-based self-instruct.
164
+
165
+ # REFERENCES
166
+
167
+ Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021.
168
+ Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, Nicholas Joseph, Saurav Kadavath, Jackson Kernion, Tom Conerly, Sheer El-Showk, Nelson Elhage, Zac Hatfield-Dodds, Danny Hernandez, Tristan Hume, Scott Johnston, Shauna Kravec, Liane Lovitt, Neel Nanda, Catherine Olsson, Dario Amodei, Tom Brown, Jack Clark, Sam McCandlish, Chris Olah, Ben Mann, and Jared Kaplan. Training a helpful and harmless assistant with reinforcement learning from human feedback, 2022a.
169
+ Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, Carol Chen, Catherine Olsson, Christopher Olah, Danny Hernandez, Dawn Drain, Deep Ganguli, Dustin Li, Eli TranJohnson, Ethan Perez, Jamie Kerr, Jared Mueller, Jeffrey Ladish, Joshua Landau, Kamal Ndousse, Kamile Lukosuite, Liane Lovitt, Michael Sellitto, Nelson Elhage, Nicholas Schiefer, Noemi Mercado, Nova DasSarma, Robert Lasenby, Robin Larson, Sam Ringer, Scott Johnston, Shauna Kravec, Sheer El Showk, Stanislav Fort, Tamera Lanham, Timothy Telleen-Lawton, Tom Conerly, Tom Henighan, Tristan Hume, Samuel R. Bowman, Zac Hatfield-Dodds, Ben Mann, Dario Amodei, Nicholas Joseph, Sam McCandlish, Tom Brown, and Jared Kaplan. Constitutional ai: Harmlessness from ai feedback, 2022b.
170
+ Steven Bird, Ewan Klein, and Edward Loper. Natural language processing with Python: analyzing text with the natural language toolkit. "O'Reilly Media, Inc.", 2009.
171
+ Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ * chatgpt quality, March 2023. URL https://lmsys.org/blog/2023-03-30-vicuna/.
172
+ Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.
173
+ Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023. URL https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm.
174
+ Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan Liu, Maosong Sun, and Bowen Zhou. Enhancing chat language models by scaling high-quality instructional conversations. arXiv preprint arXiv:2305.14233, 2023.
175
+ Yao Fu, Hao Peng, Litu Ou, Ashish Sabharwal, and Tushar Khot. Specializing smaller language models towards multi-step reasoning, 2023.
176
+ Michael Hahsler, Matthew Piekenbrock, and Derek Doran. dbscan: Fast density-based clustering with r. Journal of Statistical Software, 91:1-30, 2019.
177
+ Jiawei Han, Jian Pei, and Yiwen Yin. Mining frequent patterns without candidate generation. ACM sigmoid record, 29(2):1-12, 2000.
178
+ Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021.
179
+ Or Honovich, Thomas Scialom, Omer Levy, and Timo Schick. Unnatural instructions: Tuning language models with (almost) no human labor. arXiv preprint arXiv:2212.09689, 2022.
180
+
181
+ Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi-Rui Tam, Keith Stevens, Abdullah Barhoum, Nguyen Minh Duc, Oliver Stanley, Richard Nagyfi, et al. Openassistant conversations-democratizing large language model alignment. arXiv preprint arXiv:2304.07327, 2023.
182
+ Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi-Rui Tam, Keith Stevens, Abdullah Barhoum, Nguyen Minh Duc, Oliver Stanley, Richard Nagyfi, Shahul ES, Sameer Suri, David Glushkov, Arnav Dantuluri, Andrew Maguire, Christoph Schuhmann, Huu Nguyen, and Alexander Mattick. Openassistant conversations – democratizing large language model alignment, 2023.
183
+ Jinhyuk Lee, Mujeen Sung, Jaewoo Kang, and Danqi Chen. Learning dense representations of phrases at scale. arXiv preprint arXiv:2012.12624, 2020.
184
+ Xuechen Li, Tianyi Zhang, Yann Dubois, Rohan Taori, Ishaan Gulrajani, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Alpacaeval: An automatic evaluator of instruction-following models. https://github.com/tatsu-lab/alpaca_eval, 2023.
185
+ Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel J. Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. Science, 378(6624):1092-1097, 2022. doi: 10.1126/science.abq1158. URL https://www.science.org/doi/abs/10.1126/science.abq1158.
186
+ Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V. Le, Barret Zoph, Jason Wei, and Adam Roberts. The flan collection: Designing data and methods for effective instruction tuning, 2023.
187
+ Subhabrata Mukherjee, Arindam Mitra, Ganesh Jawahar, Sahaj Agarwal, Hamid Palangi, and Ahmed Awadallah. Orca: Progressive learning from complex explanation traces of gpt-4, 2023.
188
+ OpenAI. Gpt-4 technical report, 2023.
189
+ Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback, 2022.
190
+ Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, and Jianfeng Gao. Instruction tuning with gpt-4. arXiv preprint arXiv:2304.03277, 2023.
191
+ Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D. Manning, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model, 2023.
192
+ Feifan Song, Bowen Yu, Minghao Li, Haiyang Yu, Fei Huang, Yongbin Li, and Houfeng Wang. Preference ranking optimization for human alignment, 2023.
193
+ Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023.
194
+ Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models, 2023a.
195
+ Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn,
196
+
197
+ Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models, 2023b.
198
+ Guan Wang, Sijie Cheng, Qiying Yu, and Changling Liu. OpenChat: Advancing Open-source Language Models with Imperfect Data, 7 2023a. URL https://github.com/imoneoi/openchat.
199
+ Shufan Wang, Laure Thompson, and Mohit Iyyer. Phrase-bert: Improved phrase embeddings from bert with an application to corpus exploration. arXiv preprint arXiv:2109.06304, 2021.
200
+ Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language model with self generated instructions. arXiv preprint arXiv:2212.10560, 2022.
201
+ Yizhong Wang, Hamish Ivison, Pradeep Dasigi, Jack Hessel, Tushar Khot, Khyathi Raghavi Chandu, David Wadden, Kelsey MacMillan, Noah A. Smith, Iz Beltagy, and Hannaneh Hajishirzi. How far can camels go? exploring the state of instruction tuning on open resources, 2023b.
202
+ Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions, 2023c.
203
+ Yufei Wang, Wanjun Zhong, Liangyou Li, Fei Mi, Xingshan Zeng, Wenyong Huang, Lifeng Shang, Xin Jiang, and Qun Liu. Aligning large language models with human: A survey. arXiv preprint arXiv:2307.12966, 2023d.
204
+ Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M Dai, and Quoc V Le. Finetuned language models are zero-shot learners. In International Conference on Learning Representations.
205
+ Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. Wizardlm: Empowering large language models to follow complex instructions, 2023a.
206
+ Canwen Xu, Daya Guo, Nan Duan, and Julian McAuley. Baize: An open-source chat model with parameter-efficient tuning on self-chat data. arXiv preprint arXiv:2304.01196, 2023b.
207
+ Michihiro Yasunaga and Percy Liang. Graph-based, self-supervised program repair from diagnostic feedback. In International Conference on Machine Learning (ICML), 2020.
208
+ Hongyi Yuan, Keming Lu, and Zheng Yuan. Exploring partial knowledge base inference in biomedical entity linking. In The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks, pp. 37-49, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.bionlp-1.3. URL https://aclanthology.org/2023.bionlp-1.3.
209
+ Zheng Yuan, Hongyi Yuan, Chuanqi Tan, Wei Wang, Songfang Huang, and Fei Huang. Rrhf: Rank responses to align language models with human feedback without tears, 2023b.
210
+ Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, Yifan Du, Chen Yang, Yushuo Chen, Zhipeng Chen, Jinhao Jiang, Ruiyang Ren, Yifan Li, Xinyu Tang, Zikang Liu, Peiyu Liu, Jian-Yun Nie, and Ji-Rong Wen. A survey of large language models, 2023.
211
+ Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric. P Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llm-as-a-judge with mt-bench and chatbot arena, 2023.
212
+ Chunting Zhou, Pengfei Liu, Puxin Xu, Srini Iyer, Jiao Sun, Yuning Mao, Xuezhe Ma, Avia Efrat, Ping Yu, Lili Yu, Susan Zhang, Gargi Ghosh, Mike Lewis, Luke Zettlemoyer, and Omer Levy. Lima: Less is more for alignment, 2023.
213
+
214
+ ![](images/a8e86002e2e1e5c2e3eb8990f70ac630fef4b55211d46f4607afa61d442d3b2d.jpg)
215
+ Figure 4: The sunburst plot of all tags. We plot with the first two words of each tag and the size is proportional to the frequency of the tag.
216
+
217
+ # APPENDIX
218
+
219
+ # A LIMITATIONS
220
+
221
+ Our conclusions mainly rely on MT-Bench and AlpacaEval for model evaluations, which may miss some influence caused by SFT data. Besides, we notice MT-Bench shows instabilities in terms of the randomness of GPT-4 judgments, so we provide random ablations as comprehensively as possible to show the statistical significance of our results, including reporting standard variance of MT-Bench scores. Furthermore, our analysis of SFT datasets is mainly focused on English, so our claims may not be directly extended to multi-lingual scenarios.
222
+
223
+ # B TAG REVIEW
224
+
225
+ We present a sunburst plot of all tags in Fig. 4 showing the most frequent tags is about information-related, data manipulations, and coding queries. We plot with the first two words of each tag and the size is proportional to the frequency of the tag. We only plot with tags that have frequencies larger than 2000 in our data pool.
226
+
227
+ # C LOCAL TAGGER
228
+
229
+ We use the following template to concatenate queries to tag and tagging results:
230
+
231
+ You are a helpful assistant. Please identify tags of user intentions in the following user query and explain each tag. Please respond in the JSON format {"tag": str, "explanation": str}. Query: <query-to-tag> Assistant: <tagging-results>
232
+
233
+ Table 5: ChatGPT prompt template for annotating intention tags of given queries.
234
+ ```txt
235
+ You are a tagging system that provides useful tags for instruction intentions to distinguish instructions for a helpful AI assistant. Below is an instruction:
236
+ [begin]
237
+ {instruction}
238
+ [end]
239
+ Please provide coarse-grained tags, such as "Spelling and Grammar Check" and "Cosplay", to identify main intentions of above instruction. Your answer should be a list including titles of tags and a brief explanation of each tag. Your response have to strictly follow this JSON format: {{“tag”: str, “explanation”: str}}. Please response in English.
240
+ ```
241
+
242
+ Table 6: GPT-4 prompt template for evaluating tagging precision.
243
+ ```latex
244
+ You are an experienced judge for intention tags of instructions. You will be provided a query and a list of tags describing intentions of the query as followed:
245
+ \[ \text{query} \]: { \{ \text{query} \}} \]
246
+ {tags}
247
+ Please provide feedback about whether all tags precisely describe an intention of the instruction. Please identify all incorrect tags and provide their indices in the JSON format as your response. The JSON format for your response is a list of JSON dictionary and the JSON dictionary has only one key to identify the index of each incorrect tag: [{"idx": int}]. For example, if [tag 0] and [tag 2] are incorrect, you should response as {"idx": 0}, {"idx", 2)}. If all tags are correct, please response an empty list as [].
248
+ ```
249
+
250
+ We also include the explanation in the tagging results to make the fine-tuned model obtain better tagging performance. The overall sample size for fine-tuning is 773,511, where we randomly sample 1,000 samples for validation. The model is fine-tuned with 512 batch size for one epoch since we empirically find that training for more than one epoch will lead to over-fitting.
251
+
252
+ # D PROMPTTEMPLATESFORCHATGPT
253
+
254
+ We preset our prompt for ChatGPT for annotation (Tab. 5), precision evaluation (Tab. 6), and consistency evaluation (Tab. 7).
255
+
256
+ # E DATASETS
257
+
258
+ We apply INSTAG to 17 open-source SFT datasets for intention tagging:
259
+
260
+ - ShareGPT<sup>1</sup> refers to the multi-turn chatting histories used by VICUNA (Chiang et al., 2023). ShareGPT includes human-written queries and responses from ChatGPT and other chatbots.
261
+ - OpenChat (Wang et al., 2023a) is a subset of ShareGPT containing only chat histories with GPT-4 responses.
262
+
263
+ <table><tr><td>You are an experienced judge for consistency of intention tags for instructions. You will be provided a tag and a list of instructions labeled with this tag as followed:
264
+ [tag]: {tag}
265
+ {instructions}
266
+ Please provide feedback about whether the meaning of this tag is consistent among all instructions. Please identify all inconsistent instructions and provide their indices in the JSON format as your response. The JSON format for your response is a list of JSON dictionary: {{“idx”: int}}. For example, if the meaning of tags in [instruction 0] and [instruction 2] are inconsistent, you should response as {{“idx”: 0}, {{“idx”: 2}}}. If the meaning of tag is consistent in all instructions, please re-sponse an empty list as [].</td></tr></table>
267
+
268
+ Table 7: GPT-4 prompt template for evaluating tagging consistency.
269
+
270
+ - UltraChat (Ding et al., 2023) is a systematically designed, diverse, informative, large-scale dataset of multi-turn instructional conversations without involving human queries.
271
+ - Alpaca (Taori et al., 2023) is a dataset generated by the modified SELF-INSTRUCT method (Wang et al., 2022), containing 52,000 instruction-following demonstrations generated from OpenAI's text-davinci-003 model.
272
+ - WizardLM (Xu et al., 2023a) is an instruction dataset built with the EVOL-INSTRUCT method. EVOL-INSTRUCT utilizes ChatGPT to augment the complexity of the same queries in Alpaca and ShareGPT. We denote these two subsets as WizardLM(Alpaca) and WizardLM(ShareGPT) for clarification.
273
+ - FLAN (Wei et al.) is a series of data from NLP tasks formatted in instruction tuning. The queries in FLAN are generated by templates for each NLP task.
274
+ - Dolly (Conover et al., 2023) contains 15,000 high-quality human-generated prompt and response pairs for instruction tuning of LLMs.
275
+ - OAssist (Köpf et al., 2023) is a crowdsourced human-annotated dataset about multi-lingual conversations.
276
+ - Unnatural (Honovich et al., 2022) contains queries generated by prompting DAVINCI-002.
277
+ - Lima (Zhou et al., 2023) contains only 1,000 carefully human-curated prompts and responses.
278
+ - Math Collections: We involve a set of math datasets including GSM8K (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021) to prompt INSTAG generating fine-grained mathematical tags.
279
+ - Code Collections: We also involve a set of code datasets including DMCC (Li et al., 2022), MBPP (Austin et al., 2021), and DrRepair (Yasunaga & Liang, 2020) for the same purpose as introducing mathematical datasets.
280
+
281
+ # F BASELINE LLMS
282
+
283
+ We give introductions to the LLM baselines for human alignment.
284
+
285
+ - Alpaca (Taori et al., 2023) is the first open-sourced LLM aligned with human preference. Alpaca is fine-tuned on SFT data of 52K samples generated from text-davince-003 using Self-Instruct (Wang et al., 2023c).
286
+ - WizardLM (Xu et al., 2023a) is fine-tuned on the SFT data enhanced with a novel technique named Evol-Instruct. It complexifies the Alpaca SFT data using ChatGPT and achieves better alignment performance.
287
+ - Vicuna (Chiang et al., 2023) is an aligned LLM fine-tuned on collected user chatting logs of proprietary high-performing chatbots on ShareGPT.
288
+
289
+ ![](images/52b7b2e49bea9709492ae38a194e679b3d346b8b9bc66bd2d4888c71d7dca157.jpg)
290
+ Figure 5: Radar plot showing detailed scores of TAGLM-13b-v1.0 and major baselines on eight subtasks of MT-Bench. Detailed numbers can be viewed in Tab. 8.
291
+
292
+ - OpenChat (Wang et al., 2023a) is fine-tuned on a subset of ShareGPT with only the chatting logs with GPT-4.
293
+ - Baize (Xu et al., 2023b) uses 100K dialogues generated by self-chatting of ChatGPT. It also includes Alpaca's data for SFT.
294
+ - LLaMA-2 Chat (Touvron et al., 2023b) differs from the above-mentioned LLMs in (1) being based on per-trained LLaMA-2 instead of LLaMA (Touvron et al., 2023a); (2) being aligned with human preference by both SFT and RLHF.
295
+
296
+ # G DETAILED MT-BENCH SCORES IN CATEGORIES
297
+
298
+ As shown in Fig. 5 (we present our detailed number results in Tab. 8), TAGLM-13b-v1 outperforms all other baselines on stem and extraction, and achieves comparable performances on humanities with Vicuna, suggesting these tasks may rely on few data for alignment. TAGLM-13b-v1 ranks the second on math, coding, and writing, but falls short on roleplay and reasoning. These detailed results show that some tasks may require diverse but only a few alignment data, while tasks about reasoning and writing may continually benefit from large-scale data.
299
+
300
+ # H SAMPLING ALGORITHM FOR DECOUPLED ANALYSIS
301
+
302
+ We calculate complexity and diversity with tag-based metrics described in §3.4. We first sort all samples by the query complexity (the query tag number) and then pick distinct queries according to tags to achieve high sample diversity (tag coverage). The selection criterion at each time is to select a query with large tag numbers and can increase the tag set of the selected subset data. The algorithm is detailed in Alg. 1.
303
+
304
+ We present our sampling algorithm for decoupled analysis of complexity and diversity in Alg. 2 and Alg. 3, respectively.
305
+
306
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Data</td><td colspan="8">MT-Bench Scores</td><td colspan="2">Average</td></tr><tr><td>code extraction</td><td>humanities</td><td>math</td><td>reason</td><td>roleplay</td><td>stem</td><td>writing</td><td>all</td><td>w/o C&amp;M</td><td></td></tr><tr><td>gpt-4</td><td>-</td><td>8.55</td><td>9.38</td><td>9.95</td><td>6.8</td><td>9.0</td><td>8.9</td><td>9.7</td><td>9.65</td><td>8.99</td><td>9.43</td></tr><tr><td>gpt-3.5-turbo</td><td>-</td><td>6.9</td><td>8.85</td><td>9.55</td><td>6.3</td><td>5.65</td><td>8.4</td><td>8.7</td><td>9.2</td><td>7.94</td><td>8.39</td></tr><tr><td>claude-v1</td><td>-</td><td>6.25</td><td>8.8</td><td>9.7</td><td>4.8</td><td>5.95</td><td>8.5</td><td>9.7</td><td>9.5</td><td>7.9</td><td>8.69</td></tr><tr><td>Llama-2-13b-chat</td><td>-</td><td>3.0</td><td>6.92</td><td>9.75</td><td>3.45</td><td>5.1</td><td>7.5</td><td>8.62</td><td>8.85</td><td>6.65</td><td>7.79</td></tr><tr><td>TAGLM-13b-v2.0 (1)</td><td>6K</td><td>3.75</td><td>6.5</td><td>9.55</td><td>2.1</td><td>5.3</td><td>7.95</td><td>8.5</td><td>8.75</td><td>6.55</td><td>7.76</td></tr><tr><td>TAGLM-13b-v2.0 (2)</td><td>6K</td><td>3.7</td><td>6.2</td><td>9.52</td><td>2.15</td><td>5.35</td><td>8.1</td><td>8.4</td><td>8.95</td><td>6.55</td><td>7.75</td></tr><tr><td>TAGLM-13b-v2.0 (3)</td><td>6K</td><td>3.4</td><td>7.35</td><td>9.6</td><td>2.15</td><td>5.9</td><td>7.45</td><td>8.28</td><td>8.0</td><td>6.52</td><td>7.76</td></tr><tr><td>TAGLM-v1.0-13b (1)</td><td>6K</td><td>3.8</td><td>6.45</td><td>9.55</td><td>3.0</td><td>4.9</td><td>6.9</td><td>8.75</td><td>8.55</td><td>6.49</td><td>7.52</td></tr><tr><td>TAGLM-v1.0-13b (2)</td><td>6K</td><td>3.45</td><td>6.35</td><td>9.65</td><td>2.95</td><td>4.95</td><td>7.15</td><td>8.65</td><td>8.5</td><td>6.46</td><td>7.54</td></tr><tr><td>TAGLM-v1.0-13b (3)</td><td>6K</td><td>3.4</td><td>6.45</td><td>9.45</td><td>2.85</td><td>5.05</td><td>7.05</td><td>8.43</td><td>8.4</td><td>6.38</td><td>7.47</td></tr><tr><td>vicuna-13b-v1.3</td><td>125K</td><td>3.25</td><td>5.55</td><td>9.45</td><td>2.6</td><td>5.85</td><td>7.18</td><td>7.98</td><td>9.25</td><td>6.39</td><td>7.54</td></tr><tr><td>vicuna-13b-v1.1</td><td>70K</td><td>2.95</td><td>6.4</td><td>9.45</td><td>2.9</td><td>4.65</td><td>7.5</td><td>8.55</td><td>8.05</td><td>6.31</td><td>7.43</td></tr><tr><td>wizardlm-13b</td><td>70K</td><td>4.0</td><td>4.9</td><td>9.7</td><td>3.75</td><td>5.25</td><td>7.4</td><td>7.7</td><td>8.12</td><td>6.35</td><td>7.18</td></tr><tr><td>baize-v2-13b</td><td>56K</td><td>3.0</td><td>4.6</td><td>9.02</td><td>1.8</td><td>5.4</td><td>6.8</td><td>7.72</td><td>7.65</td><td>5.75</td><td>6.87</td></tr><tr><td>nous-hermes-13b</td><td>300K</td><td>2.45</td><td>5.05</td><td>9.0</td><td>2.65</td><td>3.8</td><td>6.38</td><td>7.02</td><td>7.75</td><td>5.51</td><td>6.5</td></tr><tr><td>gpt4all-13b-snoozy</td><td>900K</td><td>3.0</td><td>4.8</td><td>8.85</td><td>1.2</td><td>4.2</td><td>7.0</td><td>6.9</td><td>7.35</td><td>5.41</td><td>6.52</td></tr><tr><td>koala-13b</td><td>472K</td><td>2.9</td><td>4.15</td><td>8.45</td><td>1.9</td><td>4.0</td><td>6.85</td><td>7.2</td><td>7.35</td><td>5.35</td><td>6.33</td></tr><tr><td>openchat-13b-v1</td><td>8K</td><td>2.35</td><td>3.3</td><td>9.07</td><td>2.0</td><td>2.75</td><td>7.55</td><td>7.7</td><td>7.05</td><td>5.22</td><td>6.24</td></tr><tr><td>alpaca-13b</td><td>52K</td><td>2.35</td><td>4.15</td><td>7.85</td><td>1.05</td><td>3.5</td><td>5.45</td><td>5.2</td><td>6.7</td><td>4.53</td><td>5.48</td></tr></table>
307
+
308
+ Table 8: Main results of INSTAG. We present MT-Bench scores of both proprietary and open-source baselines in similar scales. We also provide average scores overall categories and categories without code and math (w/o C&M). Dashes in the data column denote unknown data scales. Parentheses mark the three different rounds of GPT-4 judgments.
309
+
310
+ Algorithm 1: Complexity-first Diverse Sampling
311
+ ```latex
312
+ Data: The Whole Pooled Dataset $\mathcal{D}$ , Sub-Dataset Size $N$ Result: The Sampled Sub-Dataset $\mathcal{D}_s$
313
+ 1 Initialize Empty $\mathcal{D}_s$ .
314
+ 2 Sorting Queries in $\mathcal{D}$ by tag number in descending;
315
+ 3 while $|\mathcal{D}_s| < N$ do
316
+ 4 Tag Set $\mathcal{T}_s^B\gets \emptyset$ .
317
+ 5 foreach Query $q\in \mathcal{D}$ do
318
+ 6 if Query Tags $\mathcal{T}_q:\left|\mathcal{T}_s^B\cup \mathcal{T}_q\right| > \left|\mathcal{T}_s^B\right|$ then 7 $\mathcal{D}_s\gets \mathcal{D}_s\cup \{q\} ;$ 8 $\mathcal{T}_s^B\gets \mathcal{T}_s^B\cup \mathcal{T}_q;$ 9 $\mathcal{D}\gets \mathcal{D}\backslash \{q\} ;$ 10 if $|\mathcal{D}_s|$ equals to $N$ then 11 Break;
319
+ 12 return $\mathcal{D}_s$
320
+ ```
321
+
322
+ # I COUNTERFACTUAL EVALUATION
323
+
324
+ To test how well annotators can evaluate tag quality, we created counterfactual cases for two tasks. In the tag precision task, we substituted some tags with similar ones in terms of semantics. In the tag consistency task, we used inconsistent instructions to replace the original instructions. Both humans and GPT-4 are able to recognize most of the counterfactual cases. And humans are better at tag precision, while GPT-4 is better at tag consistency. This analysis shows that annotators have low false positive rates and proof confidence of their judgments in the original tagging results.
325
+
326
+ Algorithm 2: Data Sampling for Complexity Analysis
327
+ Data: The Whole Pooled Dataset $\mathcal{D}$
328
+ 1,Subset Size $N$ Result:The Sampled Sub-Dataset of Different Complexity $\mathrm{D} = \{\mathcal{D}_c^i |i = 1,\dots ,n\}$
329
+ 2 Sorting Queries in $\mathcal{D}$ by tag number in descending;
330
+ 3 Initialize $\mathrm{D} = \mathrm{list}()$
331
+ 4 foreach i in $\{1,\ldots ,n\}$ do
332
+ 5 Initialize Empty $\mathcal{D}_c^i$
333
+ 6 while $|\mathcal{D}_c^i | < N$ do
334
+ 7 Tag Set $\mathcal{T}_c^B\gets \emptyset$
335
+ 8 foreach Query $q\in \mathcal{D}$ do
336
+ 9 if Query Tags $\mathcal{T}_q:\left|\mathcal{T}_c^B\cup \mathcal{T}_q\right| > |\mathcal{T}_c^B|$ then
337
+ 10 $\begin{array}{r}\mathcal{D}_c^i\gets \mathcal{D}_c^i\cup \{q\} ;\\ \mathcal{T}_c^B\gets \mathcal{T}_c^B\cup \mathcal{T}_q;\\ \mathcal{D}\gets \mathcal{D}\setminus \{q\} ;\\ \text{if} |\mathcal{D}_c^i | = N\text{then}\\ \mathrm{D}\gets \mathrm{D}\text{appends}\mathcal{D}_c^i;\\ \mathrm{Break}; \end{array}$
338
+ 14 return D
339
+
340
+ Algorithm 3: Data Sampling for Diversity Analysis
341
+ Data: The Whole Pooled Dataset \(\mathcal{D}\) Preset Coverage Rate \(\mathcal{R} = \{r^i |i = 1,\dots ,n\}\) 1,Subset Size \(N\) Result:The Sampled Sub-Dataset of Different Diversity \(\mathrm{D} = \{\mathcal{D}_d^{r_i}|i = 1,\ldots ,n\}\)
342
+ 2 Initialize \(\mathrm{D} = \mathrm{list}()\)
343
+ 3 foreach \(i\) in \(\{1,\dots ,n\}\) do
344
+ 4 Initialize Empty \(\mathcal{D}_d^{r_i}\gets \emptyset\)
345
+ 5 Tag Set \(\mathcal{T}_d\gets \emptyset\)
346
+ 6 foreach Query \(q\in \mathcal{D}\) do
347
+ 7 if Query Tags \(\mathcal{T}_q:|\mathcal{T}_d\cup \mathcal{T}_q| > |\mathcal{T}_d|\) then
348
+ 8 \(\begin{array}{rl} & {\mathcal{D}_d^{r_i}\leftarrow \mathcal{D}_d^{r_i}\cup \{q\} ;}\\ & {\mathcal{T}_d\leftarrow \mathcal{T}_d\cup \mathcal{T}_q;}\\ & {\mathcal{D}\leftarrow \mathcal{D}\setminus \{q\} ;}\\ & {\mathcal{D}\leftarrow \mathcal{D}\setminus \{q\} ;}\\ & {\mathcal{D}\leftarrow \mathcal{D}\setminus \{q\} ;}\\ & {\mathcal{D}\leftarrow \mathcal{D}\setminus \{q\} ;}\\ & {\mathcal{D}\leftarrow \mathcal{D}\setminus \{q\} ;}\\ & {\mathbb{E}\left|\mathcal{D}_d^{r_i}\right| = N\text{then}}\\ & {\mathbb{E}\left|\mathcal{D}_d^{r_i}\right| = N\text{then}}\\ & {\mathbb{E}\left|\mathcal{D}_d^{r_i}\right| = N\text{then}}\\ & {\mathbb{E}\left|\mathcal{D}_d^{r_i}\right| = N\text{then}}\\ & {\mathbb{E}\left|\mathcal{O}_d^{r_i}\right| = N\text{then}}\\ & {\mathbb{E}\left|\mathcal{O}_d^{r_i}\right| = N\text{then}}\\ & {\mathbb{E}\left|\mathcal{O}_d^{r_i}\right| = N\text{then}}\\ & {\mathbb{E}\left|\mathcal{O}_d^{r_i}\right| = N\text {then}}\\ & {\mathbb{E}\left|\mathcal{O}_d^{r_i}\right| = N\text {then}}\\ & {\mathbb{E}\left|\mathcal{O}_d^{r_i}\right| = N\text {then}}\\ & {\mathbb{E}\left|\mathcal{O}_d^{r_i}\right| = N\text {then}}\\ & {\mathbb{E}\left|\bar{\mathcal{O}}_d^{r_i}\right| = N\text {then}}\\ & {\mathbb{E}\left|\bar{\mathcal{O}}_d^{r_i}\right| = N\text {then}}\\ & {\mathbb{E}\left|\bar{\mathcal{O}}_d^{r_i}\right| = N\text {then}}\\ & {\mathbb{E}\left|\bar{\mathcal{O}}_d^{r_1}\right| = N\text {then}}\\ & {\mathbb{E}\left|\bar{\mathcal{O}}_d^{r_1}\right| = N\text {then}}\\ & {\mathbb{E}\left|\bar{\mathcal{O}}_d^{r_1}\right| = N\text {then}}\\ & {\mathbb{E}\left|\bar{\mathcal{O}}_d^{r_1} + \bar{\mathcal{O}}_d^{r_2} + \bar{\mathcal{O}}_d^{r_3} + \bar{\mathcal{O}}_d^{r_4} + \bar{\mathcal{O}}_d^{r_5} + \bar{\mathcal{O}}_d^{r_6} + \bar{\mathcal{O}}_d^{r_7} + \bar{\mathcal{O}}_d^{r_8} + \bar{\mathcal{O}}_d^{r_9} + \bar{\mathcal{O}}_d^{r_{10}} + \bar{\mathcal{O}}_d^{r_{11}} + \bar{\mathcal{O}}_d^{r_{12}} + \bar{\mathcal{O}}_d^{r_{13}} + \bar{\mathcal{O}}_d^{r_{14}} + \bar{\mathcal{O}}_d^{r_{15}}}\\ 5\quad \textbf{i f} |\mathcal{T}_{d}| / |\mathcal{T}| > r_{i}\textbf{n t h e n}\\ 6\quad \textbf{i f} |\mathcal{T}_{d}| / |\mathcal{T}| > r_{i}\textbf{n t h e n}\\ 7\quad \textbf{i f} |\mathcal{T}_{d}| / |\mathcal{T}| > r_{i}\textbf{n t h e n}\\ 8\quad \textbf{i f} |\mathcal{T}_{d}| / |\mathcal{T}| > r_{i}\textbf{n t h e n}\\ 9\quad \textbf{i f} |\mathcal{T}_{d}| / |\mathcal{T}| > r_{i}\textbf{n t h e n}\\ 10\quad \textbf{i f} |\mathcal{T}_{d}| / |\mathcal{T}| > r_{i}\textbf{n t h e n}\\ 11\quad \textbf{i f} |\mathcal{T}_{d}| / |\mathcal{T}| > r_{i}\textbf{n t h e n}\\ 12\quad \textbf{i f} |\bar{\mathcal{T}}_d| / |\bar{\mathcal{T}} | > r_{i}\textbf{n t h e n}\\ 13\quad \textbf{i f} |\bar{\mathcal{T}}_d| / |\bar{\mathcal{T}} | > r_{i}\textbf{n t h e n}\\ 14\quad \textbf{i f} |\bar{\mathcal{T}}_d| / |\bar{\mathcal{T}} | > r_{i}\textbf{n t h e n}\\ 15\quad \textbf{i f} |\bar{\mathcal{T}}_d| / |\bar{\mathcal{T}} | > r_{i}\textbf{n t h e n}\\ 16\quad \textbf{i f} |\bar{\mathcal{T}}_d| / |\bar{\mathcal{T}} | > r_{i}\textbf{n t h e n}\\ 17\quad \textbf{i f} |\bar{\mathcal{T}}_d| / |\bar{\mathcal{T}} | > r_{i}\textbf{n t h e n}\\ 18\quad \textbf{i f} |\bar{\mathcal{T}}_d| / |\bar{\mathcal{T}} | > r_{i}\textbf{n t h e n}\\ 19\quad \textbf{i f} |\bar{\mathcal{T}}_d| / |\bar{\mathcal{T}} | > r_{i}\textbf{n t h e n}\\ 20\quad D\gets D\textbf{appends} D_d^{r_i};\\ 21\quad \textbf{return D}
349
+
350
+ # J CASE FROM INSTAG
351
+
352
+ We present a sample case in Tab. 10 to illustrate the behavior of INSTAG.
353
+
354
+ <table><tr><td rowspan="2">Metric</td><td colspan="2">GPT-4 Annotation</td><td colspan="2">Human Annotation (1%)</td></tr><tr><td>Original</td><td>Counterfactual</td><td>Original</td><td>Counterfactual</td></tr><tr><td>Tag Precision</td><td>96.1</td><td>6.1</td><td>100</td><td>0</td></tr><tr><td>Tag Consistency</td><td>86.6</td><td>7.8</td><td>100</td><td>14.3</td></tr></table>
355
+
356
+ Table 9: Evaluation for the tagging quality of INSTAG. We design two metrics, tagging precision and consistency, for evaluating INSTAG. We employ GPT-4 to label 4,000 tagging results. And we also employ three human annotators to annotate $1\%$ cases and report their majority voting. We also create counterfactual cases to probe the judgment abilities of different annotators.
357
+
358
+ <table><tr><td colspan="2">Sample Case</td></tr><tr><td colspan="2">Utterance</td></tr><tr><td>Human:</td><td>List 3 breakfast foods that are healthy.</td></tr><tr><td rowspan="3">LLM:</td><td>1.Oatmeal - it&#x27;s high in fiber, protein, and antioxidants, and can lower cholesterol levels.</td></tr><tr><td>2.Greek yogurt - it&#x27;s packed with protein, calcium, and probiotics, which support gut health.</td></tr><tr><td>3.Eggs - they&#x27;re rich in protein, vitamins D and B12, and minerals like selenium, and can help you feel full longer.</td></tr><tr><td colspan="2">INSTAG Results</td></tr><tr><td>Tag</td><td>Explanation</td></tr><tr><td>food recommendation</td><td>The instruction asks for a list of breakfast foods that are healthy.</td></tr><tr><td>quantity</td><td>The instruction specifies that the list should include three breakfast foods.</td></tr><tr><td>health</td><td>The instruction emphasizes the need for the breakfast foods to be healthy.</td></tr></table>
359
+
360
+ Table 10: One case from SFT data of WizardLM tagged by INSTAG. INSTAG accurately captures the semantics (food, health) and intentions (recommendation, quantity) in the human query.
2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad24d4d4495a226b29f83a33adc46141346488a2507214a48f01994375110f03
3
+ size 822721
2024/#InsTag_ Instruction Tagging for Analyzing Supervised Fine-tuning of Large Language Models/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/9963c4ed-836b-4f69-81c2-3bec8faa181d_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/9963c4ed-836b-4f69-81c2-3bec8faa181d_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/9963c4ed-836b-4f69-81c2-3bec8faa181d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1facb36f527113b97855ac80244c84e737b35ebf476b7f5922670276a2fd4d6e
3
+ size 3578779
2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/full.md ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # $\alpha$ -TCVAE: ON THE RELATIONSHIP BETWEEN DISENTANGLEMENT AND DIVERSITY
2
+
3
+ Cristian Meo* TUDelft, NL.
4
+
5
+ Louis Mahon
6
+ University of Edinburgh, UK.
7
+
8
+ Anirudh Goyal
9
+ Google DeepMind, UK.
10
+
11
+ Justin Dauwels
12
+ TUDelft, NL.
13
+
14
+ # ABSTRACT
15
+
16
+ Understanding and developing optimal representations has long been foundational in machine learning (ML). While disentangled representations have shown promise in generative modeling and representation learning, their downstream usefulness remains debated. Recent studies re-defined disentanglement through a formal connection to symmetries, emphasizing the ability to reduce latent domains (i.e., ML problem spaces) and consequently enhance data efficiency and generative capabilities. However, from an information theory viewpoint, assigning a complex attribute (i.e., features) to a specific latent variable may be infeasible, limiting the applicability of disentangled representations to simple datasets. In this work, we introduce $\alpha$ -TCVAE, a variational autoencoder optimized using a novel total correlation (TC) lower bound that maximizes disentanglement and latent variables informativeness. The proposed TC bound is grounded in information theory constructs, generalizes the $\beta$ -VAE lower bound, and can be reduced to a convex combination of the known variational information bottleneck (VIB) and conditional entropy bottleneck (CEB) terms. Moreover, we present quantitative analyses and correlation studies that support the idea that smaller latent domains (i.e., disentangled representations) lead to better generative capabilities and diversity. Additionally, we perform downstream task experiments from both representation and RL domains to assess our questions from a broader ML perspective. Our results demonstrate that $\alpha$ -TCVAE consistently learns more disentangled representations than baselines and generates more diverse observations without sacrificing visual fidelity. Notably, $\alpha$ -TCVAE exhibits marked improvements on MPI3D-Real, the most realistic disentangled dataset in our study, confirming its ability to represent complex datasets when maximizing the informativeness of individual variables. Finally, testing the proposed model off-the-shelf on a state-of-the-art model-based RL agent, Director, significantly shows $\alpha$ -TCVAE downstream usefulness on the loconav Ant Maze task. Implementation available at https://github.com/Cmeo97/Alpha-TCVAE
17
+
18
+ # 1 INTRODUCTION
19
+
20
+ The efficacy of machine learning (ML) algorithms is intrinsically tied to the quality of data representation (Bengio et al., 2013). Such representations are useful not only for standard downstream tasks such as supervised learning (Alemi et al., 2017) and reinforcement learning (RL) (Li, 2017), but also for tasks such as transfer learning (Zhuang et al., 2020) and zero-shot learning (Sun et al., 2021). Unsupervised representation learning aims to identify semantically meaningful representations of data without supervision, by capturing the generative factors of variations that describe the structure of the data (Radford et al., 2016; Locatello et al., 2019b). According to Bengio et al. (2013), disentanglement learning holds the key to understanding the world from observations, generalizing knowledge across different tasks and domains while learning and generating compositional representations (Higgins et al., 2016; Kim & Mnih, 2018).
21
+
22
+ Problem Formulation. The goal of disentanglement learning is to identify a set of independent generative factors $\pmb{z}$ that give rise to the observations $\pmb{x}$ via $p(\pmb{x}|\pmb{z})$ . However, from an information
23
+
24
+ theory perspective, the amount of information retained by every latent variable may be insufficient to represent realistic generative factors (Kirsch et al., 2021; Do & Tran, 2020), limiting the applicability of disentangled representations to simple problems. What is more, Friedman & Dieng (2022) recently introduced the Vendi score, a new metric for gauging generative diversity, showing that entangled generative models, such as the Very Deep VAE (Child, 2020), consistently produce samples with less diversity compared to ground truth. This is indicative of their limited representational and generative prowess. In contrast, Higgins et al. (2019; 2022) re-defined disentangled representations through the lens of symmetries, linking disentanglement to computational problem spaces (e.g., disentangled representations inherently reduce the problem space (Arora & Barak, 2009)), suggesting that disentangled models should be able to explore and traverse the latent space more efficiently, leading to enhanced generative diversity.
25
+
26
+ Previous Work. Most existing disentangled models optimize lower bounds that only impose an information bottleneck on the latent variables, and while this can result in factorized representations (Higgins et al., 2016), it does not directly optimize latent variable informativeness (Do & Tran, 2020). As a result, while several approaches have been proposed to learn disentangled representations by optimizing different bounds (Chen et al., 2018; Kim & Mnih, 2018), imposing sparsity priors (Mathieu et al., 2019), or isolating source of variance (Rolinek et al., 2019), none of the proposed models successfully learned disentangled representations of realistic datasets. Moreover, to the best of our knowledge, no systematic and quantitative analyses have been proposed to assess to what extent disentanglement and generative diversity (Friedman & Dieng, 2022) are correlated.
27
+
28
+ Proposed method. In this work, we propose $\alpha$ -TCVAE, a VAE optimized using a novel convex lower bound of the joint total correlation (TC) between the learned latent representation and the input data. The proposed bound, through a convex combination of the variational information bottleneck (VIB) Alemi et al. (2017) and the conditional entropy bottleneck (CEB) Fischer & Alemi (2020), maximizes the average latent variable informativeness, improving both representational and generative capabilities. The effectiveness of $\alpha$ -TCVAE is especially prominent in the MPI3D-Real Dataset (Gondal et al., 2019), the most realistic dataset in our study that is compositionally built upon distinct generative factors. Figure 1 illustrates a comparison of the latent traversals between $\alpha$ -TCVAE, Factor-VAE and $\beta$ -VAE, showing that $\alpha$ -TCVAE leads to the best visual fidelity and generative diversity (i.e., higher Vendi Score). Interestingly, the proposed TC bound is grounded in information theory constructs, generalizes the $\beta$ -VAE (Higgins et al., 2016) lower bound, and can be reduced to a convex combination of the known variational information bottleneck (VIB) (Alemi et al., 2017) and conditional entropy bottleneck (CEB) (Fischer & Alemi, 2020) terms.
29
+
30
+ Experimental Evaluation In order to determine the effectiveness of $\alpha$ -TCVAE and the downstream usefulness of the learned representations, we measure the diversity and quality of generated images and disentanglement of its latent representations. Then, we perform a correlation study between the considered downstream scores across all models, analyzing how generative diversity and disentanglement are related across different datasets. This analysis substantiates our claim that disentanglement leads to improved diversity. Finally, we conduct experiments to assess the downstream usefulness of the proposed method from a broader ML perspective. Notably, the proposed method consistently outperforms the related baselines, showing a significant improvement in the RL Ant Maze task when applied off-the-shelf in Director, a hierarchical model-based RL agent (Hafner et al., 2022).
31
+
32
+ # 2 RELATED WORK
33
+
34
+ Generative Modelling and Disentanglement Recently Locatello et al. (2019b) demonstrated that unsupervised disentangled representation learning is theoretically impossible, nonetheless disentangled VAEs, acting as both representational and generative models, Kingma & Welling (2013); Higgins et al. (2016); Chen et al. (2018); Kim & Mnih (2018) achieve practical results by leveraging implicit biases within the data and learning dynamics Burgess & Kim (2018); Higgins et al. (2019); Mathieu et al. (2019). On the generation side, they have been widely used to generate data such as images (Chen et al., 2019), text (Shi et al., 2019), speech (Sun et al., 2020; Li et al., 2023) and music (Wang et al., 2020). Various extensions to the base VAE model have been presented to improve generation quality in terms of visual fidelity (Peng et al., 2021; Vahdat & Kautz, 2020; Razavi et al.,
35
+
36
+ ![](images/c90ce73da8d4b14e1c2c19d67ca6813c0318d84a30b28b3a70fb97ecff96675a.jpg)
37
+ Figure 1: Ground truth (first row), reconstructions (second row) and latent traversals comparison of $\alpha$ -TCVAE, Factor-VAE, and $\beta$ -VAE on the MPI3D-Real Dataset. Notably, $\alpha$ -TCVAE showcases superior visual fidelity and generative diversity, as indicated by a higher Vendi Score.
38
+
39
+ 2019). On the representational side, aiming for explainable and factorized representations, Higgins et al. (2016) proposed $\beta$ -VAE, which inspired a number of following disentangled VAE-based models, such as Factor-VAE (Kim & Mnih, 2018), $\beta$ -TCVAE Chen et al. (2018), and $\beta$ -Annealed VAE (Burgess et al., 2018). Both $\beta$ -VAE and Factor-VAE aim to learn disentangled representations by imposing a bottleneck on the information flowing through the latent space. While $\beta$ -VAE does this by introducing a $\beta$ hyperparameter that increases the strength of the information bottleneck, Factor-VAE introduces a TC regularization term. Chen et al. (2018) proposed $\beta$ -TCVAE, which minimizes the total correlation of the latent variables using Monte-Carlo and importance sampling. Roth et al. (2023) proposed the Hausdorff Factorized Support (HFS) criterion, a relaxed disentangle-. ment criterion that encourages only pairwise factorized support, rather than a factorial distribution, by minimizing a Hausdorff distance. This allows for arbitrary distributions of the factors over their support, including correlations between them. Our model, namely $\alpha$ -TCVAE is optimized by a TC lower bound as well, however we do not make use of any trick or expensive sampling strategy. In contrast, we derive a TC lower bound that does not require any extra network or sampling strategy and is theoretically grounded in the Deep Information Bottleneck framework Alemi et al. (2017).
40
+
41
+ Disentanglement and Deep Information Bottleneck In the last few years, a link between the latent space capacity and disentanglement of the learned variables (Bengio et al., 2013; Shwartz-Ziv & Tishby, 2017; Goyal et al., 2021) has been identified, showing that decreasing the capacity of a network induces disentanglement on the learned representations. This relationship has been explained by the information bottleneck (IB) principle, introduced by Tishby et al. (2001) as a regularization method to obtain minimal sufficient encoding by constraining the amount of information captured by the latent variables from the observed variable. Variational IB (VIB) (Alemi et al., 2017) has extended the IB framework by applying it to neural networks, which results in a simple yet effective method to learn representations that generalize well and are robust against adversarial attacks. Furthermore, (Alemi et al., 2017; Kirsch et al., 2021) outlined the relationship between VIB, VAE (Kingma & Welling, 2013) and $\beta$ -VAE (Higgins et al., 2016), providing an information theoretical interpretation of the Kullback-Leibler (KL) divergence term used in these models as a regularizer. Despite the advantages introduced by the VIB framework, imposing independence between every latent variable may be too strong an assumption (Roth et al., 2023). For this reason, Fischer & Alemi (2020) introduced the conditional entropy bottleneck (CEB), which assumes conditional independence between the learned latent variables, providing the ability to learn more expressive and robust representations (Kirsch et al., 2021). Recently, a generalization of the mutual Information (MI),
42
+
43
+ namely total correlation (TC), has been used to learn disentangled representations as well (Kim & Mnih, 2018). Following Hwang et al. (2021), who propose a similar TC bound for a multi-view setting, we derive a novel TC lower bound for the unsupervised representational learning setting. As a result, the proposed bound is able to learn expressive and disentangled representations.
44
+
45
+ Disentanglement and Diversity The ideal generative model learns a distribution that well explains the observed data, which can then be used to draw a diverse set of samples. Diversity is thus an important desirable property of generative models (Friedman & Dieng, 2022). We desire the ability to produce samples that are different from each other and from the samples we already have at train time, while still coming from the same underlying distribution. The benefits of diversity have been advocated in a number of different contexts, such as image synthesis (Mao et al., 2019), molecular design (Lin et al., 2021; Wu et al., 2021), natural language text (McCoy et al., 2023), and drug discovery (Kim & Mnih, 2018). Motivated by the benefits of generative diversity, several VAE-based models have aimed to show increased diversity in their generated samples (Razavi et al., 2019). Some works have also noted improvements in diversity due to disentanglement. Lee et al. (2018) adversarially disentangle style from content and show enhanced diversity of image-to-image translations. Kazemi et al. (2019) also perform style-content disentanglement, this time in the context of text generation, and again observe an increase in diversity. Li et al. (2020) shows that disentangling pose, shape, and texture leads to greater diversity in generated images. Collectively, these studies emphasize that diversity is often a valuable indicator of effectiveness in various applications, and suggest that diversity and disentanglement are intertwined aspects of generative models. Yet, to the best of our knowledge, no quantitative analyses that support this claim have been presented. In this work, we present a correlation study, showing how downstream metrics of disentanglement (e.g., DCI (Eastwood & Williams, 2018)) and diversity (e.g., Vendi Score (Friedman & Dieng, 2022)) are correlated across several models and datasets.
46
+
47
+ # 3 $\alpha$ -TCVAE FRAMEWORK DERIVATION
48
+
49
+ Motivation. In contrast to most existing methods, which only impose an information bottleneck to learn disentangled representations, we seek to maximize the informativeness of individual latent variables as well. The total joint correlation (TC) can be explicitly expressed in terms of mutual information between the observed data and the latent generative factors, as shown in equation 4, allowing us to link disentanglement to latent variables informativeness. As a result, leveraging the TC formulation, we can derive a lower bound that not only promotes disentanglement but also maximizes the information retained by individual latent variables.
50
+
51
+ Derivation. In this section, we formally derive the novel TC bound. Let $\mathcal{D} = \{X, V\}$ be the ground-truth set that consists of images $x \in \mathbb{R}^{N \times N}$ , and a set of conditionally independent ground-truth data generative factors $v \in \mathbb{R}^M$ , where $\log p(v|x) = \sum_k \log p(v_k|x)$ . The goal is to develop an unsupervised deep generative model that can learn the joint distribution of the data $x$ , while uncovering a set of generative latent factors $z \in \mathbb{R}^K$ , $K \geq M$ , such that $z$ can fully describe the data structure of $x$ and generate data samples that follow the underlying ground-truth generative factors $v$ . Since directly optimizing the joint TC is intractable, we are going to maximize a lower bound of the joint total correlation $TC(z, x)$ between the learned latent representations $z$ and the input data $x$ , following the approach proposed by Hwang et al. (2021). The total correlation is defined as the KL divergence between the joint distribution and the factored marginals. In our case:
52
+
53
+ $$
54
+ T C _ {\theta} (\boldsymbol {z}) \triangleq D _ {K L} \left[ \int q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) p _ {D} (\boldsymbol {x}) d \boldsymbol {x} \| \prod_ {k = 1} ^ {K} q _ {\theta} (\boldsymbol {z} _ {k}) \right], \tag {1}
55
+ $$
56
+
57
+ where the joint distribution is $q_{\theta}(\pmb{z}) = \int q_{\theta}(\pmb{z}|\pmb{x}) p_D(\pmb{x}) d\pmb{x}$ , $p_D(\pmb{x})$ is the data distribution, $q_{\theta}(\pmb{z}_k) = \int q_{\theta}(\pmb{z}|\pmb{x}) d\pmb{z}_{\neq k}$ and $\pmb{z}_{\neq k}$ indicates that the k-th component of $\pmb{z}$ is not considered. Since we aim to find the encoder $q_{\theta}(\pmb{z}|\pmb{x})$ that disentangles the learned representations $\pmb{z}$ , we can formulate the following objective:
58
+
59
+ $$
60
+ T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) \triangleq T C _ {\theta} (\boldsymbol {z}) - T C _ {\theta} (\boldsymbol {z} | \boldsymbol {x}), \tag {2}
61
+ $$
62
+
63
+ where the conditional $\mathrm{TC}(z|x)$ can be expressed as:
64
+
65
+ $$
66
+ T C _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \triangleq \mathbb {E} _ {q _ {\theta} (\boldsymbol {z})} \left[ D _ {K L} \left[ q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| \prod_ {k = 1} ^ {K} q _ {\theta} (\boldsymbol {z} _ {k} | \boldsymbol {x}) \right] \right], \tag {3}
67
+ $$
68
+
69
+ which is the expected KL divergence of the joint conditional from the factored conditionals. Intuitively, we can see that minimizing $TC_{\theta}(z|x)$ , $TC_{\theta}(z,x)$ will be maximized, enhancing the disentanglement of the learned representation $z$ . Moreover, decomposing equation 2 we can express the TC in terms of MI (Gao et al., 2019):
70
+
71
+ $$
72
+ T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) = \sum_ {k = 1} ^ {K} I _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\theta} (\boldsymbol {z}, \boldsymbol {x}), \tag {4}
73
+ $$
74
+
75
+ where $I_{\theta}(z, x)$ is the mutual information between $z$ and $x$ and is known as the VIB term Alemi et al. (2017). Additionally, we can also express it in terms of Conditional MI:
76
+
77
+ $$
78
+ T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) = \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ (K - 1) I _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\theta} \left(\boldsymbol {z} _ {\neq k}, \boldsymbol {x} \mid \boldsymbol {z} _ {k}\right) \right], \tag {5}
79
+ $$
80
+
81
+ where $I_{\theta}(z_{\neq k}, \boldsymbol{x} | z_k)$ is known as the CEB term (Fischer & Alemi, 2020). Equation 4 and equation 5 illustrate the link of the designed objective to both VIB and CEB frameworks. A complete derivation of them can be found in Appendices A.1 and A.2, respectively. While the VIB term promotes compression of the latent representation, the CEB term promotes balance between the information contained in each latent dimension. Since we want to promote both disentanglement and individual variable informativeness of the learned latent representation we propose a lower bound that convexly combines the found VIB and CEB terms. We define the bound as follows:
82
+
83
+ $$
84
+ T C (\boldsymbol {z}, \boldsymbol {x}) \geq \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})} [ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) ] - \underbrace {\frac {K \alpha}{K - \alpha} D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r _ {p} (\boldsymbol {z} | \boldsymbol {x}))} _ {\text {C E B}} - \underbrace {\left(1 - \frac {\alpha}{K}\right)} _ {\text {V I B}} \underbrace {D _ {K L} \left(q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})\right)} _ {\text {V I B}}, \tag {6}
85
+ $$
86
+
87
+ where $\alpha$ is a hyperparameter that trades off VIB and CEB terms. Following Hwang et al. (2021), we define $r_p(\boldsymbol{z}|\boldsymbol{x}) = N(\pmb{\mu}_p, \pmb{\sigma}_p\pmb{I})$ and $r(\boldsymbol{z}) = N(\mathbf{0}, \mathbf{I})$ , respectively, where $\pmb{\sigma}_p \triangleq \left( \sum_{k=1}^{K} \frac{1}{\pmb{\sigma}_k^2} \right)^{-1}$ and $\pmb{\mu}_p \triangleq \pmb{\sigma}_p \cdot \sum_{k=1}^{K} \frac{\pmb{\mu}_k}{\pmb{\sigma}_k^2}$ while $\pmb{\mu}_k$ and $\pmb{\sigma}_k$ are the mean and standard deviation used to compute $\pmb{z}_k$ using the reparametrization trick as in Kingma & Welling (2013). A full derivation of the bound defined in equation 6 can be found in Appendix A.
88
+
89
+ Practical Implications. Disentangled models with $M$ generative factors and $K$ latent dimensions usually have $(K - M)$ noisy latent dimensions Do & Tran (2020), but our CEB term induces an inductive bias on the information flowing through every individual latent variable, pushing otherwise noisy dimensions to be informative. The derived TC lower bound generalizes the structure of the widely used $\beta$ -VAE (Higgins et al., 2016) bound. Indeed, for $\alpha = 0$ , the TC bound reduces to $\beta$ -VAE one. A comparison of $\alpha$ -TCVAE, $\beta$ -VAE, $\beta$ -TCVAE, HFS and Factor-VAE lower bounds can be found in Tab. A.3.
90
+
91
+ # 4 EXPERIMENTS
92
+
93
+ In this section, we design empirical experiments to understand the performance of $\alpha$ -TCVAE and its potential limitations by exploring the following questions: (1) Does maximising the informativeness of latent variables consistently lead to an increase in representational power and generative diversity? (2) Do disentangled representations inherently present higher diversity than entangled ones? (3) How are they correlated with other downstream metrics (i.e., FID (Heusel et al., 2017) and unfairness (Locatello et al., 2019a))? (4) To what extent does maximising the latent variables' informativeness in disentangled representations improve their downstream usefulness?
94
+
95
+ Experimental Setup. In order to assess the performance of both proposed and baseline models, we validate the considered models on the following datasets. Teapots (Moreno et al., 2016) contains 200,000 images of teapots with features: azimuth and elevation, and object colour. 3DShapes (Burgess & Kim, 2018) contains 480,000 images, with features: object shape and colour, floor colour, wall colour, and horizontal orientation. MPI3D-Real (Gondal et al., 2019) contains 103,680 images of objects at the end of a robot arm, with features: object colour, size, shape, camera height, azimuth, and robot arm altitude. Cars3D (Reed et al., 2015) contains 16,185 images with features: car-type, elevation, and azimuth. CelebA (Liu et al., 2015) contains over 200,000 images of faces
96
+
97
+ under a broad range of poses, facial expressions, and lighting conditions, totalling 40 different factors. All datasets under consideration consist of RGB images with dimensions $64 \times 64$ . Among them, CelebA stands out as the most realistic and complex dataset. On the other hand, MPI3D-Real is considered the most realistic among factorized datasets, which we define as those compositionally generated using independent factors. To assess the generated images, we use the FID score (Heusel et al., 2017) to measure the distance between the distributions of generated and real images, and the Vendi score (Friedman & Dieng, 2022) to measure the diversity of sampled images. Both Vendi and FID use the Inception Network (Szegedy et al., 2017) to extract image features and compute the related similarity metrics. Since DCI (Eastwood & Williams, 2018) scores can produce unreliable results in certain cases, (Mahon et al., 2023; Cao et al., 2022; Do & Tran, 2020), we measure disentanglement using also single neuron classification SNC (Mahon et al., 2023). Further details on used datasets and metrics are given in Appendix C.
98
+
99
+ Baseline Methods. We compare $\alpha$ -TCVAE to four other VAE models: $\beta$ -VAE (Higgins et al., 2016), $\beta$ -TCVAE (Chen et al., 2018), $\beta$ -VAE+HFS (Roth et al., 2023) and FactorVAE (Kim & Mnih, 2018), all of which are described in Section 2, as well as a vanilla VAE (Kingma & Welling, 2013). To assess diversity and visual fidelity beyond VAE-based models, we also compare to a generative adversarial network model, StyleGAN (Karras et al., 2019).
100
+
101
+ Generation Faithfulness and Diversity Analyses. We present image generation results from our model alongside baseline models, evaluating performance on the FID and Vendi metrics across datasets. For image generation using VAE-models, we adopt two strategies: (1) Sampling a noise vector from a multivariate standard normal and decoding it. (2) Encoding an actual image, then selecting a latent dimension. The value of this chosen dimension is adjusted by shifts of $+ / - 1, 2, 4, 6, 8$ , or 10 standard deviations. Subsequently, we decode the adjusted representation. In Figures 2 and 3 the two sampling strategies are labeled as 'Sampled from Noise' and 'Sampled from Traversals' respectively. Figures 2 and 3 show that $\alpha$ -TCVAE consistently generates more diverse (higher Vendi) and more faithful (lower FID) images than baseline VAE models.
102
+
103
+ ![](images/1c0d5dbf2a9df2bbfc016039e16757b28db883d1c4aaae4854e6d425f862c328.jpg)
104
+ Figure 2: Diversity of generated images, as measured by Vendi score. Two different sampling strategies are considered: sampled from white noise and from traversals. The diversity of the images of our model, $\alpha$ -TCVAE, is consistently higher than baseline VAE models, and on par with StyleGAN. The green dashed line represents ground truth dataset diversity. Traversals produce significantly more diverse images than samples.
105
+
106
+ The Vendi score of $\alpha$ -TCVAE is comparable to that of Style-GAN, and its FID score is only slightly worse. Moreover, Style-GAN takes $15\mathrm{x}$ the training time ( $\sim$ 2hrs vs. $>30\mathrm{hrs}$ on a single Nvidia Titan XP) and learns only a generative model, whereas VAEs learn both a generative model and a representational model. Noticeably, all VAE-based models perform poorly in terms of both diversity and reconstruction quality when sampling from white noise, highlighting the benefit of a structured sampling strategy when using VAE-based models for generative tasks. Another finding is that traversal-generated images are superior to those obtained from the prior, i.e. sampling from a standard normal and decoding. This is in keeping with prior work showing that drawing latent samples from a distribution other than the standard normal, e.g. a GMM, often leads to higher quality generated images Chadebec et al. (2022), and it supports the claim that disentangled models allow more systematic exploration of the latent space leading to more diverse images. This claim is also supported by noting that all disentangled VAEs give higher diversity than the vanilla VAE.
107
+
108
+ ![](images/8f819da6c03c0dc2a4ddbba989dacf654f0a11fa233c6ad7e021edac89b2fe99.jpg)
109
+ Figure 3: Faithfulness of generated images to the data distribution, as measured by FID score. Two different sampling strategies are considered: sampled from white noise and from traversals. The scores for the images of our model, $\alpha$ -TCVAE, are consistently better than baseline VAE models (lower FID is better), and only slightly worse than StyleGAN. Traversals produce significantly more faithful images than samples.
110
+
111
+ Disentanglement Analyses and Downstream Metrics Correlation Study In this section we examine the disentanglement capabilities of $\alpha$ -TCVAE and the related VAE baselines, and how it relates, statistically, with the diversity and quality of generated images, as measured in Section 4. Figures 4, 5 and 6 show that $\alpha$ -TCVAE consistently achieves comparable or better DCI, SNC and unfairness scores. The improvement of $\alpha$ -TCVAE over the baselines is most significant on the most realistic factorized dataset, namely MPI3D-Real. Interestingly, while there is a significant gap between the DCI scores of disentangled and entangled models across every factorized dataset, SNC shows that in terms of single neuron factorization, for both Cars3D and MPI3D-Real, $\alpha$ -TCVAE is the only model that significantly improves over the entangled VAE. This is perhaps due to the tendency of DCI to sometimes overestimate disentanglement Mahon et al. (2023); Cao et al. (2022).
112
+
113
+ Furthermore, as illustrated in Figure 4, no model has been successful in learning disentangled representations from the CelebA dataset. To meaningfully encode CelebA images, we used high-dimensional latent representations (e.g., 48 dimensions). However, as highlighted by Do & Tran (2020), disentangling and measuring disentanglement in high-dimensional representations are notoriously challenging tasks. Indeed, while DCI and unfairness present unrealistic results, SNC gave all models a score of zero, and so we do not display the figures here. Figure 10 illustrates a significant correlation between the Vendi, unfairness, and DCI metrics. There is a compelling correlation between Vendi and DCI scores, underscoring that diversity and disentanglement are statistically related. This resonates with the understanding that disentangled latent spaces naturally exhibit superior generative diversity (Higgins et al., 2019). Additionally, Vendi and DCI both exhibit a negative correlation with unfairness. This observation is consistent with Locatello et al. (2019a)'s findings, implying that the fairness of downstream prediction tasks is deeply associated with the diversity and disentanglement of the representations being learned. Further correlations results are given in Appendix D, along with examples of latent traversals.
114
+
115
+ Attribute Classification Task In this experiment, we train a multilayer perceptron (MLP)
116
+
117
+ ![](images/50906f4ed7c95e8bc3cd28200bc011fb1e43f4373434ed817459d3d9079d4a15.jpg)
118
+
119
+ ![](images/5815278cdfd58138b1ac543c99a41f773f7bc336a7f00d251acc643227137702.jpg)
120
+ Figure 4: Comparison of DCI scores of our model with those of baseline models.
121
+
122
+ ![](images/bdf300b8b7ea2cbc41a0ae839daba9c386a27a3f4ac39db5fee3c41f23fbf6aa.jpg)
123
+ Figure 5: Comparison of SNC scores of our model with those of baseline models.
124
+ Figure 6: Comparison of unfairness scores of our model with those of baseline models.
125
+
126
+ ![](images/288c12fe7d728ccab7133cf8d8004e9accb76e15d833e28c337c83cc8034b44c.jpg)
127
+ Figure 7: Correlations between diversity (Vendi score), generation faithfulness (FID score), unfairness and DCI. Correlations are computed using the results from all models across 5 different seeds.
128
+
129
+ to classify sample attributes using the models' encoded latent representations. Figure 8 reveals that $\alpha$ -TCVAE either matches or surpasses the baseline models in terms of attribute classification accuracy. The improvement is minor on 3DShapes and Teapots, but more significant on Cars3D and MIP3D-Real. Interestingly, the only dataset where all VAEs exhibit commendable performance is CelebA, where high-dimensional representations are used. This suggests that, for this particular downstream task, the dimensionality of the representation may be the main constraining factor. In fact, this downstream task inherently favours high-dimensional attributes, considering that a MLP is employed for the attribute classification.
130
+
131
+ # Loconav Ant Maze Reinforcement Learning
132
+
133
+ Task. In this experiment, a model-based RL agent has to learn its proprioceptive dynamical system while escaping from a maze. Recently, Hafner et al. (2022) introduced Director, a hierarchical model-based RL agent. Director employs a hierarchical strategy with a Goal VAE that learns and generates sub-goals, simplifying the planning task. The first hierarchy level represents the agent's internal states, while in the second one, the Goal VAE encodes the agent's state and infers sub-goals. As a result, the Goal VAE generates sub-goals to guide the agent through the environment. Given the enhanced generative diversity of $\alpha$ -TCVAE, we postulated that integrating our proposed TC bound could improve Director's exploration. In
134
+
135
+ this experiment, we replaced the beta-VAE objective, used to train Director's Goal VAE, with our TC-bound, expecting a richer diversity in sub-goals, thus expediting environment exploration and enhancing overall learning behaviour. Figure 9 compares the performance of Director and Alpha
136
+
137
+ ![](images/138acc4cacfca5bfeab6d09ec8a565d0b98ec98ea4663418550e284953d46fa2.jpg)
138
+ Figure 8: Comparison of $\alpha$ -TCVAE and baseline models on the Downstream Attribute Classification Task. Our proposed model either matches or surpasses the baseline models in terms of attribute classification accuracy
139
+
140
+ ![](images/d88a419b250e7c37599e12f9d8e272a685eff7c4639ca9a377f1af923fcfd28a.jpg)
141
+ (a)
142
+
143
+ ![](images/9523c2a5bdaeb301b37e47bfa37296d28a908b2efc79946640b00cf1d1d94e51.jpg)
144
+ (b)
145
+ Figure 9: Performance of Director, a model-based hierarchical RL agent, and Alpha-Director on the Antmaze task. While director samples sub-goals using the original $\beta$ -VAE, Alpha-Director samples sub-goals using the proposed $\alpha$ -TCVAE. Sampling using $\alpha$ -TCVAE gives more diverse goals (b), better exploration (c) and significantly higher mean return (a).
146
+
147
+ ![](images/9085dbc0442cae3cf570befff366151b3269dd73edf09fdd7e54897ad48ab07e.jpg)
148
+ (c)
149
+
150
+ Director, which replaces $\beta$ -VAE objective with the proposed TC-bound instead, the results are averaged across three seeds. Figure 9-(a) presents the mean return, which scores the performances of the agent on the given task (i.e., finding the exit of the maze while learning proprioceptive dynamics), showing that Alpha-Director significantly outperforms Director, learning faster and to a higher final high mean reward. Figure 9-(b) illustrates the Vendi score of sampled goals across batch and sequence length, showing that $\alpha$ -TCVAE generates sub-goals with a higher diversity score. As a result, Alpha-Director has a better exploration, as shown in Figure 9-(c), leading to faster learning. Collectively, these findings highlight that $\alpha$ -TCVAE enables the agent to sample a broader range of sub-goals, fostering efficient exploration and ultimately enhancing task performance.
151
+
152
+ # 5 DISCUSSION AND FUTURE WORK
153
+
154
+ Through comprehensive quantitative analyses, we answer the defined research questions while delineating the advantages and limitations of the proposed model relative to the evaluated baselines. Our findings resonate with the hypothesis posited by Higgins et al. (2019), emphasizing a strong correlation between disentanglement and generative diversity. Notably, disentangled representations consistently showcase enhanced visual fidelity and diversity compared to the entangled ones. This correlation persists across all datasets rendered using disentangled representations. Intriguingly, traversal analyses of $\alpha$ -TCVAE, illustrated in Figures 1 and 16 in Appendix C, reveal that it is able to discover novel generative factors, such as object positioning and vertical perspectives, which are absent from the training dataset. We hypothesize that the CEB term is responsible for this phenomenon. Most existing models optimize only the information bottleneck, and while this can result in factorized representations, it does not directly optimize latent variable informativeness. Our proposed bound also includes a CEB term, and so maximizes the average informativeness as well, which may push otherwise noisy variables to learn new generative factors. Future research will delve deeper into comprehending this phenomenon and exploring its potential applications.
155
+
156
+ In accordance with the literature, the main limitation of $\alpha$ -TCVAE is that, akin to other disentangled VAEs, it is difficult to scale efficiently. This scaling challenge permeates the entire disentanglement paradigm. In high-dimensional spaces, not only do disentangled VAE-based models struggle to produce disentangled representations, but also the metrics used to measure disentanglement tend not to be useful. (e.g., DCI and SNC(Eastwood & Williams, 2018; Mahon et al., 2023)). On the other hand, disentangled representations have a number of desirable properties, as already showcased in the literature (Higgins et al., 2022). In particular, their impact is undeniable in the Ant Maze RL experiment from Figure 9. Reinforcing this observation, our correlation study underscores the relationship between disentanglement and diversity, leading to the following question: can we leverage diversity as a surrogate for measuring disentanglement in complex and high-dimensional scenarios? We leave the answer to this question as a future work.
157
+
158
+ # 6 CONCLUSION
159
+
160
+ We introduce $\alpha$ -TCVAE, a VAE optimized through a convex lower bound on the joint total correlation (TC) between the latent representation and the input data. This proposed bound naturally reduces to a convex combination of the known variational information bottleneck (VIB) (Alemi et al., 2017) and the conditional entropy bottleneck (CEB) (Fischer & Alemi, 2020). Moreover, it generalizes the widely adopted $\beta$ -VAE bound. By maximizing disentanglement and average informativeness of the latent variables, our approach enhances both representational and generative capabilities. A comprehensive quantitative evaluation indicates that $\alpha$ -TCVAE consistently produces superior representations. This is evident from its performance across key downstream metrics: disentanglement (i.e., DCI and SNC), generative diversity (i.e., Vendi score), visual fidelity (i.e., FID), and its demonstrated downstream usefulness. In particular, our $\alpha$ -TCVAE showcases significant improvements on the MPI3D-Real dataset, the most realistic factorized dataset in our evaluation, and in a downstream reinforcement learning task. This highlights the strength of maximizing the average informativeness of latent variables, offering a pathway to address the inherent challenges of disentangled VAE-based models.
161
+
162
+ # 7 ETHIC STATEMENT AND REPRODUCIBILITY
163
+
164
+ To the best of the authors' knowledge, this study does not involve any ethical issues. The authors aim to maximize the reproducibility of the study. The codes of this project will be released in the camera-ready version. In the methods section, notions align with existing literature.
165
+
166
+ # 8 ACKNOWLEDGEMENTS
167
+
168
+ We thank Prof. Yoshua Bengio for the useful feedback provided along the project. We thank Mila - Quebec AI institute, TUDelft, and Compute Canada for providing all the resources to make the project possible.
169
+
170
+ # REFERENCES
171
+
172
+ Alexander A. Alemi, Ian Fischer, Joshua V. Dillon, and Kevin Murphy. Deep variational information bottleneck. International Conference on Learning Representations, 2017.
173
+ Sanjeev Arora and Boaz Barak. Computational Complexity: A Modern Approach. Cambridge University Press, 2009. doi: 10.1017/CBO9780511804090.
174
+ Yoshua Bengio, Aaron Courville, and Pascal Vincent. Representation learning: A review and new perspectives. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(8):1798-1828, 2013.
175
+ Chris Burgess and Hyunjik Kim. 3d shapes dataset. https://github.com/deepmind/3dshapes-dataset/, 2018.
176
+ Christopher P. Burgess, Irina Higgins, Arka Pal, Loic Matthew, Nick Watters, Guillaume Desjardins, and Alexander Lerchner. Understanding disentangling in $\beta$ -VAE. arXiv preprint ArXiv:1804.03599, 2018.
177
+ Jinkun Cao, Ruiqian Nai, Qing Yang, Jialei Huang, and Yang Gao. An empirical study on disentanglement of negative-free contrastive learning. Advances in Neural Information Processing Systems, 35:1210-1222, 2022.
178
+ Clément Chadebec, Louis Vincent, and Stéphanie Allassonnière. Pythae: Unifying generative autoencoders in python-a benchmarking use case. Advances in Neural Information Processing Systems, 35:21575-21589, 2022.
179
+ Ricky TQ Chen, Xuechen Li, Roger B. Grosse, and David K. Duvenaud. Isolating sources of disentanglement in variational autoencoders. Advances in Neural Information Processing Systems, 31, 2018.
180
+ Wenxiao Chen, Haowen Xu, Zeyan Li, Dan Pei, Jie Chen, Honglin Qiao, Yang Feng, and Zhaogang Wang. Unsupervised anomaly detection for intricate KPIs via adversarial training of VAE. In IEEE INFOCOM 2019-IEEE Conference on Computer Communications, pp. 1891-1899. IEEE, 2019.
181
+ Rewon Child. Very deep VAEs generalize autoregressive models and can outperform them on images. In International Conference on Learning Representations, 2020.
182
+ Kien Do and Truyen Tran. Theory and evaluation metrics for learning disentangled representations. In International Conference on Learning Representations, 2020.
183
+ Cian Eastwood and Christopher KI Williams. A framework for the quantitative evaluation of disentangled representations. In International Conference on Learning Representations, 2018.
184
+ Ian Fischer and Alexander A Alemi. CEB improves model robustness. Entropy, 22(10):1081, 2020.
185
+ Dan Friedman and Adji Bousso Dieng. The vendi score: A diversity evaluation metric for machine learning. arXiv preprint ArXiv:2210.02410, 2022.
186
+
187
+ Shuyang Gao, Rob Brekelmans, Greg Ver Steeg, and Aram Galstyan. Auto-encoding total correlation explanation. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 1157-1166. PMLR, 2019.
188
+ Muhammad Waleed Gondal, Manuel Wuthrich, Djordje Miladinovic, Francesco Locatello, Martin Breidt, Valentin Volchkov, Joel Akpo, Olivier Bachem, Bernhard Scholkopf, and Stefan Bauer. On the transfer of inductive bias from simulation to the real world: a new disentanglement dataset. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019.
189
+ Anirudh Goyal, Aniket Didolkar, Nan Rosemary Ke, Charles Blundell, Philippe Beaudoin, Nicolas Heess, Michael C. Mozer, and Yoshua Bengio. Neural production systems. Advances in Neural Information Processing Systems, 34:25673-25687, 2021.
190
+ Danijar Hafner, Kuang-Huei Lee, Ian Fischer, and Pieter Abbeel. Deep hierarchical planning from pixels. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems, volume 35, pp. 26091-26104. Curran Associates, Inc., 2022.
191
+ Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in Neural Information Processing Systems, 30, 2017.
192
+ Irina Higgins, Loic Matthew, Arka Pal, Christopher Burgess, Xavier Glorot, Matthew Botvinick, Shakir Mohamed, and Alexander Lerchner. Beta-VAE: learning basic visual concepts with a constrained variational framework. In International Conference on Learning Representations, 2016.
193
+ Irina Higgins, David Amos, David Pfau, Sebastien Racaniere, Loic Matthew, Danilo Rezende, and Alexander Lerchner. Towards a definition of disentangled representations. In Theoretical Physics for Deep Learning Workshop, ICML, 2019.
194
+ Irina Higgins, Sébastien Racanière, and Danilo Rezende. Symmetry-based representations for artificial and biological general intelligence. Frontiers in Computational Neuroscience, 16:836498, 2022.
195
+ HyeongJoo Hwang, Geon-Hyeong Kim, Seunghoon Hong, and Kee-Eung Kim. Multi-view representation learning via total correlation objective. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, 2021.
196
+ Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401-4410, 2019.
197
+ Hadi Kazemi, Seyed Mehdi Iranmanesh, and Nasser Nasrabadi. Style and content disentanglement in generative adversarial networks. In 2019 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 848-856. IEEE, 2019.
198
+ Hyunjik Kim and Andriy Mnih. Disentangling by factorising. In International Conference on Machine Learning, pp. 2649-2658. PMLR, 2018.
199
+ Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint ArXiv:1312.6114, 2013.
200
+ Andreas Kirsch, Clare Lyle, and Yarin Gal. Unpacking information bottlenecks: Surrogate objectives for deep learning, 2021. URL https://openreview.net/forum?id=5rc0K0ezhqI.
201
+ Hsin-Ying Lee, Hung-Yu Tseng, Jia-Bin Huang, Maneesh Singh, and Ming-Hsuan Yang. Diverse image-to-image translation via disentangled representations. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 35-51, 2018.
202
+
203
+ Yang Li, Cheng Yu, Guangzhi Sun, Weiqin Zu, Zheng Tian, Ying Wen, Wei Pan, Chao Zhang, Jun Wang, Yang Yang, et al. Cross-utterance conditioned VAE for speech generation. arXiv preprint ArXiv:2309.04156, 2023.
204
+ Yuheng Li, Krishna Kumar Singh, Utkarsh Ojha, and Yong Jae Lee. Mixmatch: multifactor disentanglement and encoding for conditional image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8039-8048, 2020.
205
+ Yuxi Li. Deep reinforcement learning: An overview. arXiv preprint ArXiv:1701.07274, 2017.
206
+ Zeming Lin, Tom Sercu, Yann LeCun, and Alexander Rives. Deep generative models create new and diverse protein structures. In Machine Learning for Structural Biology Workshop, NeurIPS, 2021.
207
+ Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), December 2015.
208
+ Francesco Locatello, Gabriele Abbati, Thomas Rainforth, Stefan Bauer, Bernhard Scholkopf, and Olivier Bachem. On the fairness of disentangled representations. Advances in Neural Information Processing Systems, 32, 2019a.
209
+ Francesco Locatello, Stefan Bauer, Mario Lucic, Gunnar Raetsch, Sylvain Gelly, Bernhard Schölkopf, and Olivier Bachem. Challenging common assumptions in the unsupervised learning of disentangled representations. In International Conference on Machine Learning, pp. 4114-4124. PMLR, 2019b.
210
+ Louis Mahon, Lei Shah, and Thomas Lukasiewicz. Correcting flaws in common disentanglement metrics. arXiv preprint ArXiv:2304.02335, 2023.
211
+ Qi Mao, Hsin-Ying Lee, Hung-Yu Tseng, Siwei Ma, and Ming-Hsuan Yang. Mode seeking generative adversarial networks for diverse image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1429-1437, 2019.
212
+ Emile Mathieu, Tom Rainforth, Nana Siddharth, and Yee Whye Teh. Disentangling disentanglement in variational autoencoders. In International Conference on Machine Learning, pp. 4402-4412. PMLR, 2019.
213
+ R. Thomas McCoy, Paul Smolensky, Tal Linzen, Jianfeng Gao, and Asli Celikyilmaz. How much do language models copy from their training data? evaluating linguistic novelty in text generation using raven. Transactions of the Association for Computational Linguistics, 11:652-670, 2023.
214
+ Pol Moreno, Christopher KI Williams, Charlie Nash, and Pushmeet Kohli. Overcoming occlusion with inverse graphics. In European Conference on Computer Vision, pp. 170-185. Springer, 2016.
215
+ Jialun Peng, Dong Liu, Songcen Xu, and Houqiang Li. Generating diverse structure for image inpainting with hierarchical vq-VAE. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10775–10784, 2021.
216
+ Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint ArXiv:1511.06434, 2016.
217
+ Ali Razavi, Aaron Van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-VAE-2. Advances in Neural Information Processing Systems, 32, 2019.
218
+ Scott E Reed, Yi Zhang, Yuting Zhang, and Honglak Lee. Deep visual analogy-making. In Advances in Neural Information Processing Systems, volume 28. Curran Associates, Inc., 2015.
219
+ Michal Rolinek, Dominik Zietlow, and Georg Martius. Variational autoencoders pursue pca directions (by accident). In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12406-12415, 2019.
220
+ Karsten Roth, Mark Ibrahim, Zeynep Akata, Pascal Vincent, and Diane Bouchacourt. Disentanglement of correlated factors via hausdorff factorized support. In International Conference on Learning Representations (ICLR), 2023.
221
+
222
+ Yuge Shi, Brooks Paige, Philip Torr, et al. Variational mixture-of-experts autoencoders for multimodal deep generative models. Advances in Neural Information Processing Systems, 32, 2019.
223
+ Ravid Shwartz-Ziv and Naftali Tishby. Opening the black box of deep neural networks via information. arXiv preprint ArXiv:1703.00810, 2017.
224
+ Guangzhi Sun, Yu Zhang, Ron J. Weiss, Yuan Cao, Heiga Zen, Andrew Rosenberg, Bhuvana Ramabhadran, and Yonghui Wu. Generating diverse and natural text-to-speech samples using a quantized fine-grained VAE and autoregressive prosody prior. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6699-6703. IEEE, 2020.
225
+ Xiaohong Sun, Jinan Gu, and Hongying Sun. Research progress of zero-shot learning. Applied Intelligence, 51(6):3600-3614, 2021.
226
+ Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, and Alexander Alemi. Inception-v4, inception-resnet and the impact of residual connections on learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 31, 2017.
227
+ Naftali Tishby, Fernando C. Pereira, and William Bialek. The information bottleneck method. Proceedings of the 37th Allerton Conference on Communication, Control and Computation, 2001.
228
+ Arash Vahdat and Jan Kautz. NVAE: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020.
229
+ Ziyu Wang, Yiyi Zhang, Yixiao Zhang, Junyan Jiang, Ruihan Yang, Junbo Zhao, and Gus Xia. Pianotree VAE: Structured representation learning for polyphonic music. arXiv preprint ArXiv:2008.07118, 2020.
230
+ Zachary Wu, Kadina E. Johnston, Frances H. Arnold, and Kevin K. Yang. Protein sequence design with deep generative models. Current Opinion in Chemical Biology, 65:18-27, 2021.
231
+ Fuzhen Zhuang, Zhiyuan Qi, Keyu Duan, Dongbo Xi, Yongchun Zhu, Hengshu Zhu, Hui Xiong, and Qing He. A comprehensive survey on transfer learning. Proceedings of the IEEE, 109(1): 43-76, 2020.
232
+
233
+ # A TOTAL CORRELATION LOWER BOUND DERIVATION
234
+
235
+ In this section we are going to derive the TC lower bound defined in equation 6. Since it is defined as a convex combination of marginal log-likelihood, VIB, and CEB terms, we are going to split the derivation into two subsections. First, we will derive a first TC bound that introduces the VIB term. Then, we will derive another TC bound, which explicitly shows the CEB term. Finally, we will define the TC bound shown in equation 6 as a convex combination of the two bounds.
236
+
237
+ # A.1 TC BOUND AND THE VARIATIONAL INFORMATION BOTTLENECK
238
+
239
+ Unfortunately, direct optimization of mutual information terms is intractable Alemi et al. (2017). Therefore, we first need to find a lower bound of equation 4. Following the approach used in Hwang et al. (2021), we can expand it as:
240
+
241
+ $$
242
+ \begin{array}{l} T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) = \sum_ {k = 1} ^ {K} I _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\theta} (\boldsymbol {z}, \boldsymbol {x}), \tag {7} \\ = \sum_ {k = 1} ^ {K} \left[ \mathbb {E} _ {q _ {\theta} (\boldsymbol {x}, \boldsymbol {z} _ {k})} \left[ \log \frac {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {D} (\boldsymbol {x})} \right] \right] - \mathbb {E} _ {q _ {\theta} (\boldsymbol {x}, \boldsymbol {z})} \left[ \log \frac {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z})}{p _ {D} (\boldsymbol {x})} \right], \\ = \sum_ {k = 1} ^ {K} \left[ \mathbb {E} _ {q _ {\theta} (\boldsymbol {x}, \boldsymbol {z} _ {k})} \left[ \log \frac {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {D} (\boldsymbol {x})} \frac {p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})} \right] \right] - \mathbb {E} _ {q _ {\theta} (\boldsymbol {z}, \boldsymbol {x})} \left[ \log \frac {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})}{q _ {\theta} (\boldsymbol {z})} \frac {r (\boldsymbol {z})}{r (\boldsymbol {z})} \right]. \\ \end{array}
243
+ $$
244
+
245
+ Let's expand these two terms:
246
+
247
+ $$
248
+ \begin{array}{l} \mathbb {E} _ {q _ {\theta} (\boldsymbol {x}, \boldsymbol {z} _ {k})} \left[ \log \frac {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {D} (\boldsymbol {x})} \frac {p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})} \right] = \iint q _ {\theta} (\boldsymbol {z} _ {k}, \boldsymbol {x}) \log \frac {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {D} (\boldsymbol {x})} \frac {p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})} d \boldsymbol {z} _ {k} d \boldsymbol {x}, \tag {8} \\ = \int \int q _ {\theta} (\pmb {z} _ {k} | \pmb {x}) p _ {D} (\pmb {x}) \left(\log \left(\frac {q _ {\theta} (\pmb {x} | \pmb {z} _ {k})}{p _ {\phi} (\pmb {x} | \pmb {z} _ {k})}\right) + \log p _ {\phi} (\pmb {x} | \pmb {z} _ {k}) - \log p _ {D} (\pmb {x})\right) d \pmb {z} _ {k} d \pmb {x}, \\ = H (\boldsymbol {x}) + \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} _ {k})} [ D _ {K L} (q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k}) \| p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})) ] + \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} _ {k}, \boldsymbol {x})} [ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k}) ]. \\ \end{array}
249
+ $$
250
+
251
+ $$
252
+ \begin{array}{l} \mathbb {E} _ {q _ {\theta} (\boldsymbol {z}, \boldsymbol {x})} \left[ \log \frac {q _ {\theta} (\boldsymbol {z} \mid \boldsymbol {x})}{q _ {\theta} (\boldsymbol {z})} \frac {r (\boldsymbol {z})}{r (\boldsymbol {z})} \right], \tag {9} \\ = \int q _ {\theta} (\boldsymbol {x}, \boldsymbol {z}) \log \left(\frac {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})}{q _ {\theta} (\boldsymbol {z})} \frac {r (\boldsymbol {z})}{r (\boldsymbol {z})}\right) d \boldsymbol {z} d \boldsymbol {x}, \\ = \int q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) p _ {D} (\boldsymbol {x}) \left(\left(\log \frac {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})}{r (\boldsymbol {z})}\right) + \log \left(\frac {r (\boldsymbol {z})}{q _ {\theta} (\boldsymbol {z})}\right)\right) d \boldsymbol {z} d \boldsymbol {x}, \\ = \mathbb {E} _ {p D (\boldsymbol {x})} [ D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})) ] - \mathbb {E} _ {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z})} [ D _ {K L} (q _ {\theta} (\boldsymbol {z}) \| r (\boldsymbol {z})) ]. \\ \end{array}
253
+ $$
254
+
255
+ As a result, we can write:
256
+
257
+ $$
258
+ \begin{array}{l} T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) = \sum_ {k = 1} ^ {K} \left[ H (\boldsymbol {x}) + \mathbb {E} _ {q _ {\theta} \left(\boldsymbol {z} _ {k}\right)} \left[ D _ {K L} \left(q _ {\theta} \left(\boldsymbol {x} \mid \boldsymbol {z} _ {k}\right) \| p _ {\phi} (\boldsymbol {x} \mid \boldsymbol {z} _ {k})\right) \right] + \mathbb {E} _ {q _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right)} \left[ \log p _ {\phi} (\boldsymbol {x} \mid \boldsymbol {z} _ {k}) \right] \right], \tag {10} \\ - \mathbb {E} _ {p _ {D} (\boldsymbol {x})} \left[ D _ {K L} \left(q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})\right) \right] + \mathbb {E} _ {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z})} \left[ \left[ D _ {K L} \left(q _ {\theta} (\boldsymbol {z}) \| r (\boldsymbol {z})\right) \right], \right. \\ \geq \sum_ {k = 1} ^ {K} \left[ H (\boldsymbol {x}) + \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} _ {k}, \boldsymbol {x})} [ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k}) ] - \mathbb {E} _ {p _ {D} (\boldsymbol {x})} [ D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})) ], \right. \\ = \sum_ {k = 1} ^ {K} \left[ H (\boldsymbol {x}) + \int \left(\int q _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) d \boldsymbol {z} _ {\neq k}\right) \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k}) d \boldsymbol {z} _ {k} d \boldsymbol {x} \right] - \mathbb {E} _ {p _ {D} (\boldsymbol {x})} [ D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})) ], \\ = \sum_ {k = 1} ^ {K} \left[ H (\boldsymbol {x}) + \mathbb {E} _ {q _ {\theta} (\boldsymbol {z}, \boldsymbol {x})} \left[ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k}) \right] - \mathbb {E} _ {p _ {D} (\boldsymbol {x})} \left[ D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})) \right], \right. \\ = K H (\boldsymbol {x}) + \mathbb {E} _ {q _ {\theta} (\boldsymbol {z}, \boldsymbol {x})} [ \log \prod_ {k = 1} ^ {K} p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k}) ] - \mathbb {E} _ {p _ {D} (\boldsymbol {x})} [ D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})) ], \\ = K H (\boldsymbol {x}) + \mathbb {E} _ {q _ {\theta} (\boldsymbol {z}, \boldsymbol {x})} [ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) + \log p _ {D} (\boldsymbol {x}) ^ {K - 1} ] - \mathbb {E} _ {p _ {D} (\boldsymbol {x})} [ D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})) ], \\ = \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})} [ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) ] - \underbrace {\mathbb {E} _ {p _ {D} (\boldsymbol {x})} [ D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})) ]} _ {\text {V I B}} =: \mathcal {L} (\boldsymbol {z}, \boldsymbol {x}). \\ \end{array}
259
+ $$
260
+
261
+ Maximizing $\mathcal{L}(\pmb {z},\pmb {x})$ not only maximizes the original objective $TC(z,\pmb {x})$ , but at the same time minimize the gap produced by upper bounding equation 10. As a result,
262
+
263
+ $$
264
+ \sum_ {k = 1} ^ {K} \left[ \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} _ {k})} \left[ D _ {K L} \left(q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k}) \| p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})\right) \right] \right] + \mathbb {E} _ {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z})} \left[ \left[ D _ {K L} \left(q _ {\theta} (\boldsymbol {z}) \| r (\boldsymbol {z})\right) \right], \right. \tag {11}
265
+ $$
266
+
267
+ will be minimized, leading to: $r(z)\approx q_{\theta}(z)$ and $p_{\phi}(\pmb {x}|z_k)\approx q_\theta (\pmb {x}|z_k)$
268
+
269
+ Moreover, since $H(\pmb{x})$ and $\log p_D(\pmb{x})^{K-1}$ do not depend on $\theta$ , we can drop them from $\mathcal{L}(\pmb{z},\pmb{x})$ . Finally, to avoid using a heavy notation, we will denote the VIB term as $D_{KL}(q_{\theta}(\pmb{z}|\pmb{x})||r(\pmb{z}))$ , leading to the first TC bound which introduces the VIB term:
270
+
271
+ $$
272
+ T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) \geq \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})} [ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) ] - \underbrace {D _ {K L} \left(q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})\right)} _ {\text {V I B}}. \tag {12}
273
+ $$
274
+
275
+ # A.2 TC BOUND AND THE CONDITIONAL VARIATIONAL INFORMATION BOTTLENECK
276
+
277
+ Expanding Eq. equation 2, we can reformulate $TC(z,x)$ as follow:
278
+
279
+ $$
280
+ \begin{array}{l} T C _ {\phi} (\boldsymbol {z}, \boldsymbol {x}) = \sum_ {k = 1} ^ {K} I _ {\phi} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\phi} (\boldsymbol {z}, \boldsymbol {x}), (13) \\ = \sum_ {k = 1} ^ {K} \left[ \frac {K - 1}{K} I _ {\phi} (\boldsymbol {z} _ {k}, \boldsymbol {x}) + \frac {1}{K} I _ {\phi} (\boldsymbol {z} _ {k}, \boldsymbol {x}) - \frac {1}{K} I _ {\phi} (\boldsymbol {z}, \boldsymbol {x}) \right], \\ = \sum_ {k = 1} ^ {K} \left[ \frac {K - 1}{K} I _ {\phi} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) + \frac {1}{K} \left(I _ {\phi} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\phi} (\boldsymbol {z}, \boldsymbol {x})\right) \right]. (14) \\ \end{array}
281
+ $$
282
+
283
+ Interestingly, can write the last term of Eq. equation 14 as:
284
+
285
+ $$
286
+ \begin{array}{l} I _ {\phi} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\phi} (\boldsymbol {z}, \boldsymbol {x}) = \mathbb {E} _ {p _ {\phi} \left(\boldsymbol {x}, \boldsymbol {z} _ {k}\right)} \left[ \log \frac {p _ {\phi} \left(\boldsymbol {x} \mid \boldsymbol {z} _ {k}\right)}{p _ {D} (\boldsymbol {x})} \right] - \mathbb {E} _ {p _ {\phi} \left(\boldsymbol {x}, \boldsymbol {z}\right)} \left[ \log \frac {p _ {\phi} \left(\boldsymbol {x} \mid \boldsymbol {z}\right)}{p _ {D} (\boldsymbol {x})} \right], \tag {15} \\ = \int \left(\int p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) p (\boldsymbol {z}) d \boldsymbol {z} _ {\neq \boldsymbol {z} _ {k}}\right) \log \frac {p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {D} (\boldsymbol {x})} d \boldsymbol {z} _ {k} d \boldsymbol {x}, \\ - \int p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) p (\boldsymbol {z}) \log \frac {p _ {\phi} (\boldsymbol {x} | \boldsymbol {z})}{p _ {D} (\boldsymbol {x})} d \boldsymbol {z} d \boldsymbol {x}, \\ = \int p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) p (\boldsymbol {z}) \log \frac {p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p (\boldsymbol {x} | \boldsymbol {z})} d \boldsymbol {z} d \boldsymbol {x}, \\ = - \int p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) p (\boldsymbol {z}) \log \frac {p (\boldsymbol {x} | \boldsymbol {z})}{p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})} d \boldsymbol {z} d \boldsymbol {x}, \\ = - \int p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) p (\boldsymbol {z}) \log \frac {p (\boldsymbol {x} | \boldsymbol {z} _ {\neq k} , \boldsymbol {z} _ {k})}{p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})} d \boldsymbol {z} d \boldsymbol {x}, \\ = - I _ {\phi} \left(\boldsymbol {z} _ {\neq k}, \boldsymbol {x} | \boldsymbol {z} _ {k}\right). \\ \end{array}
287
+ $$
288
+
289
+ We can now write equation 5:
290
+
291
+ $$
292
+ T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) = \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ (K - 1) I _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\theta} \left(\boldsymbol {z} _ {\neq k}, \boldsymbol {x} \mid \boldsymbol {z} _ {k}\right) \right].
293
+ $$
294
+
295
+ Interestingly, the second IB term in Eq. (8) can now be expressed as multiple conditional MIs between the observation and $K - 1$ other latent variables given the k-th latent representation variable, penalizing the extra information of the observation not inferable from the given latent representation variable. Moreover, we can further expand the TC as:
296
+
297
+ $$
298
+ \begin{array}{l} T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) = \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ (K - 1) I _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\theta} \left(\boldsymbol {z} _ {\neq k}, \boldsymbol {x} \mid \boldsymbol {z} _ {k}\right) \right], (16) \\ = \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ (K - 1) \left[ \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} _ {k}, \boldsymbol {x})} \left[ \log \frac {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {D} (\boldsymbol {x})} \frac {p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})} \right] \right] \right. \\ \left. - \mathbb {E} _ {q _ {\theta} (\boldsymbol {x}, \boldsymbol {z})} \left[ \log \frac {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})}{q _ {\theta} (\boldsymbol {z} _ {k} | \boldsymbol {x})} \frac {r _ {p} (\boldsymbol {z} | \boldsymbol {x})}{r _ {p} (\boldsymbol {z} | \boldsymbol {x})} \right] + \mathbb {E} _ {q _ {\theta} (\boldsymbol {x}, \boldsymbol {z})} \left[ \log q _ {\theta} (\boldsymbol {z} _ {\neq k}) \right] \right], \\ = \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ (K - 1) \left[ \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} _ {k}, \boldsymbol {x})} \left[ \log \frac {q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {D} (\boldsymbol {x})} \frac {p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})}{p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})} \right] \right] \right. \\ \left. - \mathbb {E} _ {p _ {D} (\boldsymbol {x})} [ D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r _ {p} (\boldsymbol {z} | \boldsymbol {x})) ] - \mathbb {E} _ {q _ {\theta} (\boldsymbol {x}, \boldsymbol {z})} \left[ \log \frac {r _ {p} (\boldsymbol {z} _ {k} | \boldsymbol {x}) r _ {p} (\boldsymbol {z} _ {\neq k} | \boldsymbol {x})}{q _ {\theta} (\boldsymbol {z} _ {k} | \boldsymbol {x}) q _ {\theta} (\boldsymbol {z} _ {\neq k})} \right] \right], \\ = \frac {K - 1}{K} \sum_ {k = 1} ^ {K} \left[ H (\boldsymbol {x}) + \mathbb {E} _ {q _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right)} \left[ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k}) \right] \right] - \frac {1}{K} \sum_ {k = 1} ^ {K} \left[ \mathbb {E} _ {p _ {D} (\boldsymbol {x})} \left[ D _ {K L} \left(q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r _ {p} (\boldsymbol {z} | \boldsymbol {x})\right) \right] \right] \\ + \frac {K - 1}{K} \sum_ {k = 1} ^ {K} \left[ \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} _ {k})} \left[ D _ {K L} \left(q _ {\theta} (\boldsymbol {x} | \boldsymbol {z} _ {k}) \| p _ {\phi} (\boldsymbol {x} | \boldsymbol {z} _ {k})\right) \right] \right] + \frac {1}{K} \sum_ {k = 1} ^ {K} \mathbb {E} _ {q _ {\theta} \left(\boldsymbol {z} _ {\neq k}, \boldsymbol {x}\right)} \left[ D _ {K L} \left(q _ {\theta} \left(\boldsymbol {z} _ {k} | \boldsymbol {x}\right) \| r _ {p} \left(\boldsymbol {z} _ {k} | \boldsymbol {x}\right) \right. \right] (17) \\ + \int D _ {K L} \left(q _ {\theta} \left(\boldsymbol {z} _ {\neq k}\right) \| r _ {p} \left(\boldsymbol {z} _ {\neq k} \mid \boldsymbol {x}\right)\right) d \boldsymbol {x}, \\ \geq \frac {K - 1}{K} \sum_ {k = 1} ^ {K} \left[ H (\boldsymbol {x}) + \mathbb {E} _ {q _ {\theta} \left(\boldsymbol {z} _ {k} \mid \boldsymbol {x}\right)} \left[ \log p _ {\phi} (\boldsymbol {x} \mid \boldsymbol {z} _ {k}) \right] \right] - \underbrace {\frac {1}{K} \sum_ {k = 1} ^ {K} \left[ \mathbb {E} _ {p _ {D} (\boldsymbol {x})} \left[ D _ {K L} \left(q _ {\theta} (\boldsymbol {z} \mid \boldsymbol {x}) \| r _ {p} (\boldsymbol {z} \mid \boldsymbol {x})\right) \right] \right]} _ {\text {CEB}}. (18) \\ \end{array}
299
+ $$
300
+
301
+ Maximizing Eq. 18 not only maximizes the original objective $TC(\pmb{z}, \pmb{x})$ but at the same time minimizes the gap produced by upper bounding Eq. equation 17, leading to: $r_p(\pmb{z}_k | \pmb{x}) \approx q_\theta(\pmb{z}_k | \pmb{x})$ , $q_\theta(\pmb{z}_{\neq k}) \approx r_p(\pmb{z}_{\neq k} | \pmb{x})$ and $q_\theta(\pmb{x} | \pmb{z}_k) \approx p_\phi(\pmb{x} | \pmb{z}_k)$ . Moreover, since $H(\pmb{x})$ does not depend on $\theta$ , we can drop it from Eq. equation 18. Finally, to avoid using a heavy notation, we will denote the CEB term as $D_{KL}(q_\theta(\pmb{z}_k | \pmb{x}) \| r_p(\pmb{z} | \pmb{x}))$ , leading to the second TC bound which introduces the CEB term:
302
+
303
+ $$
304
+ T C _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) \geq \frac {K - 1}{K} \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})} [ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) ] - \underbrace {D _ {K L} \left(q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r _ {p} (\boldsymbol {z} | \boldsymbol {x})\right)} _ {\text {C E B}}. \tag {19}
305
+ $$
306
+
307
+ # A.3 FINAL TC BOUND
308
+
309
+ In order to obtain the final expression of the derived TC bound, we can compute a convex combination of the two bounds defined in Eq. equation 12 and equation 19.
310
+
311
+ $$
312
+ \begin{array}{l} T C (\boldsymbol {z}, \boldsymbol {x}) = (1 - \alpha) \left(\sum_ {k = 1} ^ {K} I _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - I _ {\theta} (\boldsymbol {z}, \boldsymbol {x})\right) (20) \\ \left. + \alpha \left(\sum_ {k = 1} ^ {K} \left[ \frac {K - 1}{K} I _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) + \frac {1}{K} I _ {\theta} \left(\boldsymbol {z} _ {k}, \boldsymbol {x}\right) - \frac {1}{K} I _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) \right]\right)\right), (21) \\ = \frac {K (1 - \alpha) + \alpha (K - 1)}{K} \sum_ {k = 1} ^ {K} I _ {\theta} (\boldsymbol {z} _ {k}, \boldsymbol {x}) - \frac {\alpha}{K} \sum_ {k = 1} ^ {K} \left(I _ {\theta} (\boldsymbol {z}, \boldsymbol {x}) - I _ {\theta} (\boldsymbol {z} _ {k}, \boldsymbol {x})\right) - (1 - \alpha) I _ {\theta} (\boldsymbol {z}, \boldsymbol {x}), \\ \geq \frac {K - \alpha}{K} \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})} \left[ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) \right] - \alpha D _ {K L} \left(q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r _ {p} (\boldsymbol {z} | \boldsymbol {x})\right) - (1 - \alpha) D _ {K L} \left(p _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z})\right), \\ = \mathbb {E} _ {q _ {\theta} (\boldsymbol {z} | \boldsymbol {x})} \left[ \log p _ {\phi} (\boldsymbol {x} | \boldsymbol {z}) \right] - \underbrace {\frac {K \alpha}{K - \alpha} D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r _ {p} (\boldsymbol {z} | \boldsymbol {x}))} _ {\text {C E B}} - \underbrace {\frac {(1 - \alpha)}{(1 - \frac {\alpha}{K})} D _ {K L} (q _ {\theta} (\boldsymbol {z} | \boldsymbol {x}) \| r (\boldsymbol {z}))} _ {\text {V I B}}. \\ \end{array}
313
+ $$
314
+
315
+ where $\alpha$ is a hyperparameter that balances the effects of VIB and CEB terms. Table A.3 illustrates the lower bounds defined for $\beta$ -VAE Higgins et al. (2016), FactorVAE Kim & Mnih (2018), HFS Roth et al. (2023) and $\beta$ -TCVAE Chen et al. (2018) comparing them to the derived TC bound. We can see that the three bounds present a similar structure, presenting a marginal log-likelihood term and either one or two KL regularizers that impose some kind of information bottleneck.
316
+
317
+ <table><tr><td>Model</td><td>Lower Bound</td></tr><tr><td>β-VAE</td><td>Eqθ(z|x) [log pφ(x|z)] - βKL(qθ(z|x) ||p(z))</td></tr><tr><td>FactorVAE</td><td>Eqθ(z|x) [log pφ(x|z)] - βKL(qθ(z|x) ||p(z)) - γKL(qθ(z)|| ∏k=1K qφ(zk))</td></tr><tr><td>β-TCVAE</td><td>Eq(z|n)p(n) [log p(n|z) - αIq(z;n) - βKL(q(z)|| ∏j q(zj)) - γ ∑j DKL(q(zj)||p(zj))</td></tr><tr><td>HFS</td><td>Eqθ(z|x) [log pφ(x|z)] - γ[∑i=1K-1 ∑j=i+1 K maxz∈Z::,1×Z::,2×...×Z::,K minz&#x27;∈Z::,(i,j)d(z,z&#x27;)</td></tr><tr><td>α-TCVAE</td><td>Eqθ(z|x) [log pφ(x|z)] - Kα/K-αDKL(qθ(z|x)||rp(z|x)) - (1-α)/(1-α/K)DKL(qθ(z|x)||r(z))</td></tr></table>
318
+
319
+ Table 1: This table compares the lower bound objective functions of $\beta$ -VAE, $\beta$ -TCVAE, FactorVAE and HFS-VAE. The lower bound objective function of $\beta$ -VAE is composed of the expected log-likelihood of the data given the latent variables and the KL divergence between the approximate posterior and the prior of the latent variables (i.e., VIB term). The FactorVAE model further adds a KL divergence term between the approximate posterior and the factorized prior of the latent variables, which approximates the total correlation of the latent variables, and HFS-VAE further adds a Monte-Carlo approximation of Hausdorff distance. $\alpha$ -TCVAE, on the other hand, uses a convex combination of VIB term and KL divergence between the approximate posterior and the prior of the latent variables conditioned on the k-th latent variable (i.e., CEB term). $K$ represents the dimensionality of the latent variables, while $\beta$ , $\gamma$ and $\alpha$ are hyperparameters of the models.
320
+
321
+ # B ARCHITECTURES AND HYPERPARAMETERS DETAILS
322
+
323
+ The hyperparameters used for the different experiments are shown in Table 2.
324
+
325
+ Table 2: Comparison of the different hyperparameters used across the Datasets
326
+
327
+ <table><tr><td>Dataset</td><td>β</td><td>γ</td><td>α</td><td>latent dim K</td><td>Training Epochs</td></tr><tr><td>Teapots</td><td>2</td><td>10</td><td>0.25</td><td>10</td><td>50</td></tr><tr><td>3DShapes</td><td>3</td><td>10</td><td>0.25</td><td>10</td><td>50</td></tr><tr><td>Cars3D</td><td>4</td><td>10</td><td>0.25</td><td>10</td><td>50</td></tr><tr><td>MPI3D-Real</td><td>5</td><td>10</td><td>0.25</td><td>10</td><td>50</td></tr><tr><td>Celeba</td><td>5</td><td>10</td><td>0.25</td><td>48</td><td>50</td></tr></table>
328
+
329
+ All encoder, decoder and discriminator architectures are taken from Roth et al. (2023).
330
+
331
+ # C FURTHER DETAILS ON DATASETS AND METRICS
332
+
333
+ # C.1 DATASETS
334
+
335
+ We test on five datasets. Teapots (Moreno et al., 2016) contains 200,000 images of size $64 \times 64$ . Each image features a rendered, camera-centered teapot with 5 uniformly distributed generative factors of variation: azimuth and elevation (sampled between 0 and $2\pi$ ), along with three RGB colour channels (each sampled between 0 and 1). 3DShapes (Burgess & Kim, 2018) consists of 480,000 images of size $64 \times 64$ . Every image displays a rendered, camera-centered object with 6 uniformly distributed generative factors of variation: shape (sampled from [cylinder, tube, sphere, cube]), object colour, object hue, floor colour, wall colour, and horizontal orientation, all determined using linearly spaced values. MPI3D-Real (Gondal et al., 2019) comprises 103,680 images of size $64 \times 64$ . Each image captures objects at a robot arm's end, characterized by 6 factors: object colour, size, shape, camera height, azimuth, and robot arm altitude. Cars3D (Reed et al., 2015) is made up of 16, 185 images of size $64 \times 64$ . Each image portrays a rendered, camera-centered car, categorized by 3 factors: car-type, elevation, and azimuth. CelebA (Liu et al., 2015) encompasses over 200,000 images of size $64 \times 64$ . Every image presents a celebrity, highlighted by a broad range of poses, facial expressions, and lighting conditions, which sum up to 40 different factors. Every model is trained using a subset containing the $80\%$ of the selected dataset images in a fully unsupervised way. The models are evaluated on the remaining images using the following downstream scores. While
336
+
337
+ CelebA is the most complex dataset, MPI3D-Real is the most realistic among the ones usually used in the disentanglement community.
338
+
339
+ # C.2 METRICS
340
+
341
+ When using the FID score to assess image quality, we compare the distribution of generated images to that of the real images. Specifically, FID (Heusel et al., 2017) measures the distance between two distributions of images, and we apply it to measure the distance between the generated images and the real ones. A lower distance is better, indicating that the generated images belong to the distribution of ground truth images.
342
+
343
+ The Vendi score (Friedman & Dieng, 2022), which we use to measure the diversity of the generated images, is computed with respect to a similarity measure. Specifically, it is calculated as the exponential of the entropy of the eigenvalues of the similarity matrix, i.e. the matrix whose $(i,j)$ th entry is the similarity between the $i$ th and $j$ th data points. It can be interpreted as the effective number of distinct elements in the set.
344
+
345
+ To assess the quality of encoded latent representations, we use DCI, SNC/NK (Mahon et al., 2023) and the unfairness measure of Locatello et al. (2019a).
346
+
347
+ DCI, the first disentanglement metric we compute, first trains a regressor to predict the generative factors from the latent representation, and from this regressor extracts a matrix of feature importances, where the $(i,j)$ th entry is the import of the $i$ th latent dimension to predicting the $j$ th generative factor. It then takes (a normalized version of) the entropy of rows and columns to compute 'disentanglement' and 'completeness', respectively. The accuracy of the regressor is taken as the 'informativeness' score. The average of these three scores, across all factors and neurons, is the final DCI score.
348
+
349
+ SNC/NK, the second disentanglement metric we compute, works by first aligning neurons to latent factors using the Kuhn-Munkres algorithm to enforce uniqueness. Then each aligned neuron is used as a classifier for the corresponding factor, by binning its values. A higher accuracy of this single-neuron classifier (SNC) is better, indicating that the factor is well-represented by a single unique neuron. Neuron knockout (NK) is calculated as the difference between an MLP classifier that predicts the generative factor from all neurons, and one that predicts using all neurons but the one that factor was aligned to. A high NK is also better, indicating that no neurons, other than the one it was aligned to, contain information about the given factor. SNC/NK measures a slightly different and stronger notion of disentanglement than DCI, as it explicitly assumes an inductive bias that enforces each factor to be represented by a single latent variable.
350
+
351
+ MIG, is a disentanglement metric that quantifies the degree of separation between the latent variables and the generative factors in a dataset. It calculates the mutual information between each latent variable and each generative factor, identifying the variable that shares the most information with each factor. The gap, or difference, in mutual information between the top two variables for each generative factor is then computed. A larger gap indicates that one latent variable is significantly more informative about a generative factor than the others, signifying a higher degree of disentanglement. This metric is particularly useful in scenarios where a clear and distinct representation of generative factors is desired in the latent space. MIG thus complements DCI and SNC/NK by providing a measure of how well-separated the representations of different generative factors are within the model's latent space.
352
+
353
+ # D EXTENDED RESULTS
354
+
355
+ Here, we present further results, in addition to those from Section 4. Figure 10 extended Fig. 7, reporting the correlations also with SNC, NK and the attribute classification accuracy as shown in Figure 8. Unsurprisingly, there is a strong correlations between the three metrics designed to measure disentanglement: DCI, SNC and NK. This, to some extent, verifies the reliability of these different disentanglement metrics. SNC and NK also correlate strongly with Vendi, as DCI does. This further supports the finding in our paper of a relationship between disentanglement and diversity.
356
+
357
+ ![](images/a0642538da43f9c70b439b18d69e513c602c824a9534548103738f7059eab098.jpg)
358
+ (a)
359
+
360
+ ![](images/932658943b878db0c56f4cd676939e18a0635cea11dfbec43846dbef7670e0eb.jpg)
361
+ (b)
362
+
363
+ ![](images/9466d2834390e0aefbac922486ed6f95b4438bcba928af10d8865f9ef803cc3f.jpg)
364
+ (c)
365
+
366
+ ![](images/c5290557b4d6db958037aa0128f7794c637faa48da2f5ad5b5cd399a60b3aea6.jpg)
367
+ (d)
368
+
369
+ ![](images/bd3192537b08b4f97dbb73a54cfab66d75693ba942d70ea1c2ea39746cfba69d.jpg)
370
+ (e)
371
+ Figure 10: Correlations between all metrics we measure, both for the generated images and the representations.
372
+ Figure 11 shows the results for neuron knockout (NK), the second metric introduced by Mahon et al. (2023) alongside SNC, which is shown in Figure 5. Similar to SNC, the NK score for $\alpha$ -TCVAE
373
+
374
+ ![](images/57b407b4bb016e7bf35b46e033e370f5c659a653c949de299b747ae6dfb553b7.jpg)
375
+ Figure 11: Comparison of the neuron-knockout score of $\alpha$ -TCVAE with that of baseline models. As with other metrics presented in the main paper, the improvement of $\alpha$ -TCVAE is minor on 3DShapes and Teapots, but more substantial on Cars3D and MPI3D-Real.
376
+
377
+ ![](images/ba16185f758a0c5ed0febafa58020d387d4900319b5461770adac8344e74f2e9.jpg)
378
+ Figure 12: Comparison of the MIG score of $\alpha$ -TCVAE with that of baseline models. As with other metrics presented in the main paper, the improvement of $\alpha$ -TCVAE is minor on 3DShapes and Teapots, but more substantial on Cars3D and MPI3D-Real.
379
+
380
+ ![](images/d52d664b14d86cd8959048bad4584afa7a6be80bf95383564436cd3d474d023a.jpg)
381
+ Figure 13: Comparison of the DCI-C completeness score of $\alpha$ -TCVAE with that of baseline models. As with other metrics presented in the main paper, the performance of $\alpha$ -TCVAE is comparable on 3DShapes, CelebA and Teapots, and better on Cars3D and MPI3D-Real.
382
+
383
+ is higher than that for baseline VAE models and, while the errorbars often overlap, the superiority of $\alpha$ -TCVAE is consistent across all five datasets and is most substantial on MPI3D-Real. Figure 12 shows the results for mutual information gap (MIG), which follows the same trend of NK, SNC and DCI scores. Figures 13, 14 and 15 present the results of Completeness, Disentanglement and
384
+
385
+ ![](images/a364c32999e05a5f2eccfad31c6a6728a37696c42c09e0bd14fef5a5d9c72bf5.jpg)
386
+ Figure 14: Comparison of the DCI-D disentanglement score of $\alpha$ -TCVAE with that of baseline models. As with other metrics presented in the main paper, the performance of $\alpha$ -TCVAE is comparable on 3DShapes, CelebA and Teapots, and better on Cars3D and MPI3D-Real.
387
+
388
+ ![](images/06cfb67800ae26a74ff794c5c989186fb4eb9a5eb6777b08b0ca7dbcd574a84f.jpg)
389
+ Figure 15: Comparison of the DCI-I informativeness score of $\alpha$ -TCVAE with that of baseline models. As with other metrics presented in the main paper, the performance of $\alpha$ -TCVAE is comparable on 3DShapes, CelebA and Teapots, and better on Cars3D and MPI3D-Real.
390
+
391
+ Informativeness metrics (DCI-C, DCI-D and DCI-I, respectively). The final DCI scores shown in fig. 4 is computed as geometric mean of the three scores.
392
+
393
+ # E DISCOVERING NOVEL FACTOR OF VARIATIONS
394
+
395
+ Figure 16 presents $\alpha$ -TCVAE traversals across 3DShapes, Teapots and MPI3D-Real datasets. The red boxes indicate the discovered novel generative factors, that are not present within the train dataset, namely object position and vertical camera perspective. While we do not have a comprehensive explanation of why such an intriguing phenomenon is shown, we believe that the intuition behind can be explained considering the effects of VIB and CEB terms in the defined bound. Indeed, while VIB pushes individual latent variables to represent different generative factors, CEB pushes them to be informative. As a result, the otherwise noisy dimensions, are pushed to be informative (i.e., CEB) and to represent a distinct generative factor (i.e., VIB), resulting in the discovery of novel generative factors.
396
+
397
+ ![](images/2ef4284a96584e8d41167952333cd2e8ab07a81eb0152467979fd17e10d03df3.jpg)
398
+ (a) 3DShapes Traversals
399
+
400
+ ![](images/25ee5adc770ea549278c538107487caaecaf27e7ff757a3fb97c8572a3f23c72.jpg)
401
+ (b) Teapots Traversals
402
+
403
+ ![](images/b5b6eec83a57522e4c524c50930843a541e44983176985451f93021367fd85e8.jpg)
404
+ (c) MPI3D-Real Traversals
405
+ Figure 16: $\alpha$ -TCVAE generated latent traversals the 3DShapes, Teapots and MPI3D-Real datasets. The generated latent traversals reveal that $\alpha$ -TCVAE can learn and represent generative factors that are not present in the ground-truth dataset, namely vertical perspective and object position. The discovered generative factors are indicated with a red box.
406
+
407
+ # F RELATIONSHIP BETWEEN CEB AND DIVERSITY
408
+
409
+ # F.1 FISHER'S DEFINITION OF CONDITIONAL ENTROPY BOTTLENECK
410
+
411
+ Fisher's approach to the Conditional Entropy Bottleneck Fischer & Alemi (2020) is an extension of the Information Bottleneck (IB) principle Alemi et al. (2017), aimed at finding an optimally compressed representation of a variable $X$ that remains highly informative about another variable $Y$ , under the influence of a conditioning variable $Z$ . The CEB objective, according to Fisher, is formalized as a trade-off between two competing conditional mutual information terms:
412
+
413
+ $$
414
+ \min _ {p (z | x)} \left[ I (X; Z | C) - \beta I (Y; Z | C) \right]
415
+ $$
416
+
417
+ Here, $I(X;Z|C)$ quantifies the amount of information that the representation $Z$ shares with $X$ , conditioned on $C$ . Simultaneously, $I(Y;Z|C)$ measures how much information $Z$ retains about $Y$ , also under the condition of $C$ . The parameter $\beta$ serves as a crucial tuning parameter, balancing these two aspects.
418
+
419
+ # F.2 ADAPTING CEB TO VAES WITHOUT CONDITIONING VARIABLES
420
+
421
+ In the realm of Variational Autoencoders, where the training strategy is to reconstruct the input data $X$ using a latent representation $Z$ without any external conditioning C, the CEB framework
422
+
423
+ undergoes a significant simplification. Given that $X = Y$ in a typical VAE setup, the CEB objective reduces to a form where the focus shifts to optimizing the mutual information between $X$ and its latent representation $Z$ :
424
+
425
+ $$
426
+ \min _ {p (z | x)} \left[ (1 - \beta) I (X; Z) \right]
427
+ $$
428
+
429
+ This objective can be further broken down as $(1 - \beta)(H(X) - H(X|Z))$ , where $H(X)$ represents the entropy of the input data, and $H(X|Z)$ is the conditional entropy of the input given its latent representation. This formulation underscores the trade-off between compressing the input data in the latent space and retaining essential information for accurate reconstruction.
430
+
431
+ # F.3 INCORPORATING DIVERSITY INTO THE CEB OBJECTIVE
432
+
433
+ Following Friedman & Dieng (2022), Diversity can be quantitatively expressed as the exponential of the entropy of the latent space distribution $q(Z|X)$ :
434
+
435
+ $$
436
+ \text {D i v e r s i t y} = \exp (H (Z | X))
437
+ $$
438
+
439
+ To understand how the CEB framework relates to this notion of diversity, we utilize the entropy chain rule $\mathrm{H}(Y|X) = \mathrm{H}(X,Y) - \mathrm{H}(X)$ , which allows to decompose $H(X|Z)$ in terms of the joint entropy $H(X,Z)$ and the conditional entropy $H(Z)$ . Consequently, the CEB objective evolves into a more comprehensive form that explicitly accounts for the diversity of the latent space:
440
+
441
+ $$
442
+ \min _ {q (z \mid x)} [ (1 - \beta) (H (X) - H (X, Z) + H (Z)) ]
443
+ $$
444
+
445
+ $$
446
+ \min _ {q (z \mid x)} [ (1 - \beta) (- H (Z \mid X) + H (Z)) ]
447
+ $$
448
+
449
+ The latter one, makes clear the connection between the CEB term and Diversity as defined in Friedman & Dieng (2022). Indeed, we can see that when minimizing the CEB term the Diversity term is maximized.
450
+
451
+ # G DISENTANGLEMENT AND VARIATIONAL INFORMATION BOTTLENECK
452
+
453
+ Disentanglement in VAEs, following Higgins' $\beta$ -VAE framework, seeks to learn representations where individual latent variables capture distinct, independent factors of variation in the data. This is achieved by modifying the traditional VAE objective to apply a stronger constraint on the latent space information bottleneck, controlled by a hyperparameter $\beta$ . The $\beta$ -VAE, introduced by Higgins et al. (2016), represents a seminal approach to disentanglement, promoting the learning of factorized and interpretable latent representations.
454
+
455
+ On a related front, the Variational Information Bottleneck (VIB) method, formulated by Alemi et al. (2017), extends the Information Bottleneck principle to deep learning. The VIB approach seeks to find an optimal trade-off between the compression of input data and the preservation of relevant information for prediction tasks. By employing a variational approximation, VIB efficiently learns compressed representations that are predictive of desired outcomes. Interestingly, Alemi formulates a VIB objective that is equivalent to Higgins' $\beta$ -VAE one. Such result makes evident how imposing a higher information bottleneck leads to higher disentanglement.
456
+
457
+ # H SENSITIVITY ANALYSIS OF $\alpha$
458
+
459
+ In this section we present a sensitivity analysis of how $\alpha$ affects Vendi and FID results across the considered datasets. To be consistent and analyze how Alpha influences disentanglement scores, we also report a sensitivity analysis of the DCI metric and a correlation study showing how alpha is statistically correlated with FID, Vendi, and DCI metrics.
460
+
461
+ # H.1 DIVERSITY AND VISUAL FIDELITY SENSITIVITY ANALYSIS WITH RESPECT TO $\alpha$
462
+
463
+ To analyse how $\alpha$ influences the presented results, we performed an evaluation of FID, Vendi and DCI using $\alpha \in [0.00, 0.25, 0.50, 0.75, 1.00]$ , where for $\alpha = 0.00$ we obtain $\beta$ -VAE model, while for $\alpha = 0.25$ we get the results presented in the main paper. Figures 17 and 18 show that, when $\alpha \in [0.25, 0.50]$ $\alpha$ -TCVAE presents the highest diversity scores, while keeping a FID score comparable to $\beta$ -VAE.
464
+
465
+ ![](images/b841dd5f76ce225afb313d261af680133a2e0ce90cc8fa039a14452d60dbdd9e.jpg)
466
+ Figure 17: Sensitivity Analysis of the Diversity of generated images with respect to $\alpha$ . Only one sampling strategy is considered: sampled from traversals. The green dashed line represents ground truth dataset diversity. It can be seen that the higher alpha the higher will be the Vendi Score.
467
+
468
+ ![](images/312b18337e636249a0d9dc6e5f10a279c345c92c7d001d98899a2c462b6383a5.jpg)
469
+ Figure 18: Sensitivity Analysis of the Faithfulness of generated images to the data distribution, as measured by FID score, with respect to $\alpha$ . Only one sampling strategy is considered: sampled from traversals. It can be seen that for $\alpha \in [0.25, 0.50]$ the model presents higher visual fidelity.
470
+
471
+ Interestingly, the two sensitivity analyses show two main trends:
472
+
473
+ - Diversity increases when using higher values of Alpha.
474
+ - FID score improves when using smaller values of Alpha.
475
+
476
+ Indeed, when using higher values of $\alpha$ , we increase the contribution of the CEB term in equation 6, which enhances diversity at the cost of visual fidelity. As a result, the higher the value of $\alpha$ , the more diverse the generated batch of images, and the lower will be the generation quality. However, it can be noticed that when using values of $\alpha$ between 0.25 and 0.50, we get a set of generated images that are more diverse and still have a better or comparable visual fidelity than $\beta$ -VAE (i.e., $\alpha = 0$ ).
477
+
478
+ # H.2 DISENTANGLEMENT SENSITIVITY ANALYSIS WITH RESPECT TO $\alpha$
479
+
480
+ Here, we present a sensitivity analysis of the DCI metric. Figure 19 shows that the interval [0.25-0.50] presents higher values of disentanglement, following Diversity and Visual Fidelity analyses that show the best results in the same range. Such a trend can be explained by considering that $\alpha$ weights the contributions of VIB and CEB terms. While the CEB term enhances diversity, the VIB term encourages disentanglement. As a result, we can see that DCI scores decrease when $\alpha$ gets closer to 1. Interestingly, when $\alpha$ is in [0.25,0.50], the combination of CEB and VIB terms produces a better bound for the Total Correlation objective than when using, which results in higher DCI scores.
481
+
482
+ ![](images/45356d9798c35ee1127a82d40b108deb0dee5355ddc3bd82e4a7c930ab6e2683.jpg)
483
+ Figure 19: DCI scores sensitivity analysis with respect to $\alpha$ . On average when $\alpha \in [0.25, 0.50]$ $\alpha$ -TCVAE presents the best DCI scores.
484
+
485
+ # H.3 CORRELATION STUDY: HOW IS $\alpha$ CORRELATED WITH VENDI, FID, AND DCI METRICS?
486
+
487
+ Here, we present correlation matrices for all the considered datasets. We computed them using the models trained for the alpha sensitivity analyses. The correlation matrices in fig. 20 confirm the trends observed in the other sensitivity analyses (i.e., Vendi, FID, and DCI). Indeed, $\alpha$ has a strong positive correlation with both FID and Vendi, showing that when $\alpha$ increases, diversity increases and FID deteriorates. On the other hand, $\alpha$ has a strong negative correlation with DCI for all datasets besides the Cars 3D dataset, showing that, on average, the higher the value of $\alpha$ , the lower the disentanglement.
488
+
489
+ ![](images/05651ef02a81d99cab8537567961add153e44bdc8e71d449a5e02382034b5839.jpg)
490
+
491
+ ![](images/663e0289dbb4ac3a7949f90b41a0559a63ffb96b6657d9c9179dce0f7b3a2800.jpg)
492
+
493
+ ![](images/b3c57d649cbe9606759b7641f3dd9ced353955de23a018ae6f1069a88ceb8f4f.jpg)
494
+
495
+ ![](images/e5cd8236088792179327c616c343010653476d6787193b0519efff30f095c418.jpg)
496
+
497
+ ![](images/c0abf2b7da6c766ef18732c5e4597622196cc9639799ab4c40993f7366cb0a5d.jpg)
498
+ Figure 20: Correlations between $\alpha$ , diversity (Vendi score), generation faithfulness (FID score), and disentanglement (DCI). Correlations are computed using the results from all models across 5 different seeds.
2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14192a4483c6b0255013d3ea11b114cca5476bbc0504c404f2f376964bb016ca
3
+ size 1475238
2024/$_alpha$TC-VAE_ On the relationship between Disentanglement and Diversity/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/f6938fce-0e4f-4932-bd85-a1090dddb4f2_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/f6938fce-0e4f-4932-bd85-a1090dddb4f2_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/f6938fce-0e4f-4932-bd85-a1090dddb4f2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:606970e7d4bc9651a4a8569d1311ac48d6cee48eaa87530b421697a09b81f82f
3
+ size 16273020
2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/full.md ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # $\infty$ -DIFF: INFINITE RESOLUTION DIFFUSION WITH SUBSAMPLED MOLLIFIED STATES
2
+
3
+ Sam Bond-Taylor, Chris G. Willcocks
4
+
5
+ Department of Computer Science
6
+
7
+ Durham University
8
+
9
+ {samuel.e.bond-taylor, christopher.g.willcocks}@durham.ac.uk
10
+
11
+ # ABSTRACT
12
+
13
+ This paper introduces $\infty$ -Diff, a generative diffusion model defined in an infinite-dimensional Hilbert space, which can model infinite resolution data. By training on randomly sampled subsets of coordinates and denoising content only at those locations, we learn a continuous function for arbitrary resolution sampling. Unlike prior neural field-based infinite-dimensional models, which use point-wise functions requiring latent compression, our method employs non-local integral operators to map between Hilbert spaces, allowing spatial context aggregation. This is achieved with an efficient multi-scale function-space architecture that operates directly on raw sparse coordinates, coupled with a mollified diffusion process that smooths out irregularities. Through experiments on high-resolution datasets, we found that even at an $8\times$ subsampling rate, our model retains high-quality diffusion. This leads to significant run-time and memory savings, delivers samples with lower FID scores, and scales beyond the training resolution while retaining detail.
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Denoising diffusion models (Song and Ermon, 2019; Ho et al., 2020) have become a dominant choice for data generation, offering stable training and the ability to generate diverse and high quality samples. These methods function by defining a forward diffusion process which gradually destroys information by adding Gaussian noise, with a neural network then trained to denoise the data, in turn approximating the data distribution. Scaling diffusion models to higher resolutions has been the topic of various recent research, with approaches including iteratively upsampling lower resolution images (Ho et al., 2022a) and operating in a compressed latent space (Rombach et al., 2022).
18
+
19
+ Deep neural networks typically assume that data can be represented with a fixed uniform grid, however, the underlying signal is often continuous. As such, these approaches scale poorly with resolution. Neural fields (Xie et al., 2022; Sitzmann et al., 2020; Mildenhall et al., 2021) address this problem by directly representing data as a mapping from coordinates to intensities (such as pixel values), making the parameterisation and memory/run-time from resolution independent from resolution, thereby allowing training on data that would not usually fit in memory. Neural field-based generative models (Dupont et al., 2022a;b; Bond-Taylor and Willcocks, 2021; Du et al., 2021) have been developed to take advantage of these properties. Being inherently independent between coordinates, these
20
+
21
+ ![](images/f6ac2faa1f1776c2d16f27c5809d7abc830f130f5b8dfc6f90cd9b31f63615ee.jpg)
22
+ Figure 1: We define a diffusion process in an infinite dimensional image space by randomly sampling coordinates and training a model parameterised by neural operators to denoise at those coordinates.
23
+
24
+ ![](images/376c6c637522e4a6f54784551b981fd3aa850e230f493e4782c9621a1fa91e47.jpg)
25
+ Figure 2: Modelling data as functions allows sampling at arbitrary resolutions using the same model with different sized noise. Left to right: $64 \times 64$ , $128 \times 128$ , $256 \times 256$ (original), $512 \times 512$ , $1024 \times 1024$ .
26
+
27
+ approaches condition networks on compressed latent vectors to provide global information. However, the sample quality of these methods is significantly lower than finite-dimensional generative models.
28
+
29
+ In this work we develop an approach that substantially improves upon the quality and scaling of existing infinite dimensional generative models, reducing the gap to finite-dimensional methods, while retaining the benefits of infinite dimensional models: subsampling coordinates to decouple memory/run-time from resolution, making scaling more computationally feasible, while also allowing training and sampling at arbitrary resolutions. We achieve this by designing a Gaussian diffusion model in an infinite dimensional state space<sup>1</sup>. We argue that compressed latent-based neural fields cannot effectively be used to parameterise such diffusion models due to the reliance on compression, going against standard diffusion architecture design, with it also being impractical to compress states to latents at every step. Instead, we propose using non-local integral operators to model the denoising function, aggregating both global and local information in order to effectively denoise the data.
30
+
31
+ Specifically, we propose $\infty$ -Diff, addressing these issues:
32
+
33
+ - We introduce a Gaussian diffusion model defined in an infinite-dimensional state space that allows complex arbitrary resolution data to be generated (see Fig. 2)<sup>1</sup>.
34
+ - We design a powerful and scalable, function-space architecture that operates directly on raw sparsely subsampled coordinates, enabling improvements in run-time and memory usage.
35
+ - We achieve state-of-the-art FID scores on multiple high-res image datasets, trained with up to $8 \times$ subsampling, substantially outperforming prior infinite resolution generative models.
36
+
37
+ # 2 BACKGROUND
38
+
39
+ Here we review Gaussian diffusion models (Section 2.1) and generative neural fields (Section 2.2).
40
+
41
+ # 2.1 DIFFUSION MODELS
42
+
43
+ Gaussian diffusion models (Sohl-Dickstein et al., 2015; Ho et al., 2020) are formed by defining a forward process $q(\pmb{x}_{1:T}|\pmb{x}_0)$ that gradually adds noise to the data, $\pmb{x}_0 \sim q(\pmb{x}_0)$ , over $T$ steps, resulting in a sequence of latent variables $\pmb{x}_1, \dots, \pmb{x}_T$ such that $q(\pmb{x}_T) \approx \mathcal{N}(\pmb{x}_T; \pmb{0}, \pmb{I})$ . The reverse of this process can also be expressed as a Markov chain $p(\pmb{x}_{0:T})$ . Choosing Gaussian transition densities chosen to ensure these properties hold, the densities may be expressed as
44
+
45
+ $$
46
+ q \left(\boldsymbol {x} _ {1: T} \mid \boldsymbol {x} _ {0}\right) = \prod_ {t = 1} ^ {T} q \left(\boldsymbol {x} _ {t} \mid \boldsymbol {x} _ {t - 1}\right), \quad q \left(\boldsymbol {x} _ {t} \mid \boldsymbol {x} _ {t - 1}\right) = \mathcal {N} \left(\boldsymbol {x} _ {t}; \sqrt {1 - \beta_ {t}} \boldsymbol {x} _ {t - 1}, \beta_ {t} \boldsymbol {I}\right), \tag {1}
47
+ $$
48
+
49
+ $$
50
+ p \left(\boldsymbol {x} _ {0: T}\right) = p \left(\boldsymbol {x} _ {T}\right) \prod_ {t = 1} ^ {T} p \left(\boldsymbol {x} _ {t - 1} \mid \boldsymbol {x} _ {t}\right), \quad p \left(\boldsymbol {x} _ {t - 1} \mid \boldsymbol {x} _ {t}\right) = \mathcal {N} \left(\boldsymbol {x} _ {t - 1}; \boldsymbol {\mu} _ {\theta} \left(\boldsymbol {x} _ {t}, t\right), \boldsymbol {\Sigma} _ {\theta} \left(\boldsymbol {x} _ {t}, t\right)\right), \tag {2}
51
+ $$
52
+
53
+ where $0 < \beta_{1},\ldots ,\beta_{T} < 1$ is a pre-defined variance schedule and the covariance is typically of the form $\pmb{\Sigma}_{\theta}(\pmb{x}_t,t) = \sigma_t^2\pmb{I}$ where often $\sigma_t^2 = \beta_t$ . Aiding training efficiency, $q(\pmb{x}_t|\pmb{x}_0)$ can be expressed in closed form as $q(\pmb{x}_t|\pmb{x}_0) = \mathcal{N}(\pmb{x}_t;\sqrt{\bar{\alpha}_t}\pmb{x}_0,(1 - \bar{\alpha}_t)\pmb{I})$ where $\bar{\alpha}_{t} = \prod_{s = 1}^{t}\alpha_{s}$ for $\alpha_{s} = 1 - \beta_{s}$ . Training is possible by optimising the evidence lower bound on the negative log-likelihood, expressed as the KL-divergence between the forward process posteriors and backward transitions at each step
54
+
55
+ $$
56
+ \mathcal {L} = \sum_ {t \geq 1} \mathbb {E} _ {q} \left[ D _ {\mathrm {K L}} \left(q \left(\boldsymbol {x} _ {t - 1} \mid \boldsymbol {x} _ {t}, \boldsymbol {x} _ {0}\right) \| p \left(\boldsymbol {x} _ {t - 1} \mid \boldsymbol {x} _ {t}\right)\right) \right] = \sum_ {t \geq 1} \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \| \tilde {\boldsymbol {\mu}} _ {t} \left(\boldsymbol {x} _ {t}, \boldsymbol {x} _ {0}\right) - \boldsymbol {\mu} _ {\theta} \left(\boldsymbol {x} _ {t}, t\right) \| _ {2} ^ {2} \right], \tag {3}
57
+ $$
58
+
59
+ for $q(\pmb{x}_{t-1}|\pmb{x}_t, \pmb{x}_0) = \mathcal{N}(\pmb{x}_{t-1}; \tilde{\pmb{\mu}}_t(\pmb{x}_t, \pmb{x}_0), \tilde{\beta}_t\pmb{I})$ , where $\tilde{\pmb{\mu}}_t$ and $\tilde{\beta}_t$ can be derived in closed form. It is typical to reparameterise $\pmb{\mu}_{\theta}$ to simplify the variational bound, one example of such found to improve visual quality is $\pmb{\mu}_{\theta}(\pmb{x}_t, t) = \frac{1}{\alpha_t} (\pmb{x}_t - \frac{\beta_t}{\sqrt{1 - \bar{\alpha}_t}}\pmb{\epsilon}_{\theta}(\pmb{x}_t, t))$ , where the denoising network instead predicts the added noise. Diffusion models are closely connected with score-matching models; this can be seen by making the approximation (De Bortoli et al., 2021),
60
+
61
+ $$
62
+ \begin{array}{l} p \left(\boldsymbol {x} _ {t - 1} \mid \boldsymbol {x} _ {t}\right) = p \left(\boldsymbol {x} _ {t} \mid \boldsymbol {x} _ {t - 1}\right) \exp \left(\log p \left(\boldsymbol {x} _ {t - 1}\right) - \log p \left(\boldsymbol {x} _ {t}\right)\right) (4) \\ \approx \mathcal {N} \left(\boldsymbol {x} _ {t - 1}; \sqrt {1 - \beta_ {t}} \boldsymbol {x} _ {t} + \beta_ {t} \nabla_ {\boldsymbol {x} _ {t}} \log p (\boldsymbol {x} _ {t}), \beta_ {t} \boldsymbol {I}\right), (5) \\ \end{array}
63
+ $$
64
+
65
+ which holds for small values of $\beta_{t}$ . While $\nabla_{\boldsymbol{x}_t}\log p(\boldsymbol{x}_t)$ is not available, it can be approximated using denoising score matching methods (Hyvarinen, 2005; Vincent, 2011). Given that $\nabla_{\boldsymbol{x}_t}\log p(\boldsymbol{x}_t) = \mathbb{E}_{p(\boldsymbol{x}_0|\boldsymbol{x}_t)}[\nabla_{\boldsymbol{x}_t}\log p(\boldsymbol{x}_t|\boldsymbol{x}_0)]$ we can learn an approximation to the score with a neural network parameterised by $\theta$ , $s_{\theta}(\boldsymbol{x}_t,t)\approx \nabla \log p(\boldsymbol{x}_t)$ (Song and Ermon, 2019), by minimising a reweighted variant of the ELBO (Eq. 3). One problem with diffusion models is the slow sequential sampling; to speed this up, denoising diffusion implicit models (DDIMs) transform diffusion models into deterministic models allowing fewer steps to yield the same quality, replacing the sampling steps with
66
+
67
+ $$
68
+ \boldsymbol {x} _ {t - 1} = \sqrt {\alpha_ {t - 1}} \left(\frac {\boldsymbol {x} _ {t} - \sqrt {1 - \alpha_ {t}} \boldsymbol {\epsilon} _ {\theta} (\boldsymbol {x} _ {t} , t)}{\sqrt {\alpha_ {t}}}\right) + \sqrt {1 - \alpha_ {t - 1}} \cdot \boldsymbol {\epsilon} _ {\theta} (\boldsymbol {x} _ {t}, t). \tag {6}
69
+ $$
70
+
71
+ # 2.2 GENERATIVE NEURAL FIELDS
72
+
73
+ Neural fields (Xie et al., 2022; Mildenhall et al., 2021; Sitzmann et al., 2020) are an approach for continuous data representation that map from coordinates $c$ to values $v$ (such as RGB intensities), $f_{\theta}(c) = v$ . This decouples the memory needed to represent data from its resolution. The mapping function, $f_{\theta}$ , typically an MLP network, is optimised by minimising a reconstruction loss with ground truth values at each coordinate. The local nature of $f_{\theta}$ allows the loss to be Monte-Carlo approximated by evaluating $f_{\theta}$ on subsets of coordinates, allowing higher resolution data than would fit in memory to be trained on. Since $f_{\theta}$ is independent per coordinate, being unable to transform over multiple points like convolutions/transformers, to represent spaces of functions approaches generally also condition on compressed latent vectors $z$ used to describe single data points, $f_{\theta}(c,z)$ . Dupont et al. (2022a) first uses meta-learning to compress the dataset into latent conditional neural fields, then approximates the distribution of latents with a DDPM (Ho et al., 2020) or Normalizing Flow (Rezende and Mohamed, 2015). Bond-Taylor and Willcocks (2021) form a VAE-like model with a single gradient step to obtain latents. Zhuang et al. (2023) design a diffusion model with a small subset of coordinates used to provide context. Finally, some approaches use hypernetworks to output the weight of neural fields: Dupont et al. (2022b) define the hypernetwork as a generator in an adversarial framework, and Du et al. (2021) use manifold learning to represent the latent space.
74
+
75
+ # 3 INFINITE DIMENSIONAL DIFFUSION MODELS
76
+
77
+ In this section we extend diffusion models to infinite dimensions in order to allow higher-resolution data to be trained on by subsampling coordinates during training and permit training/sampling at arbitrary resolutions. We argue that application of conditional neural fields to diffusion models is problematic due to the need to compress to a latent vector, adding complexity and opportunity for error, instead, the denoising function should be a non-local integral operator with no compression. A number of parallel works also developed diffusion models in infinite dimensions, including Kerrigan et al. (2023); Lim et al. (2023); and Franzese et al. (2023); we recommend also reading these works, which go further in the theoretical treatment, while ours focuses more on design and practical scaling.
78
+
79
+ To achieve this, we restrict the diffusion state space to a Hilbert space $\mathcal{H}$ , elements of which, $x\in \mathcal{H}$ are functions, e.g. $x\colon \mathbb{R}^n\to \mathbb{R}^d$ . Hilbert spaces are equipped with an inner product $\langle \cdot ,\cdot \rangle$ and corresponding norm $\| \cdot \|_{\mathcal{H}}$ . For simplicity we consider the case where $\mathcal{H}$ is the space of $L^2$ functions from $[0,1]^n$ to $\mathbb{R}^d$ although the following sections can be applied to other spaces. As such, a point in $\mathcal{H}$ could represent an image, audio signal, video, 3D model, etc. A Gaussian measure $\mu$ can be defined in $\mathcal{H}$ in terms of its characteristic function $\hat{\mu}$ (Da Prato and Zabczyk, 2014),
80
+
81
+ $$
82
+ \hat {\mu} (x) = \exp (i \langle x, m \rangle + \frac {1}{2} \langle C x, x \rangle), \tag {7}
83
+ $$
84
+
85
+ where the mean $m$ lies in $\mathcal{H}$ , $m\in \mathcal{H}$ and the covariance operator $(C:\mathcal{H}\to \mathcal{H})$ is self-adjoint (denoted $C = C^{*}$ ), non-negative (i.e. $C\geq 0$ ), and trace-class $(\int_{\mathcal{H}}\| x\|_{\mathcal{H}}d\mu (x) = \operatorname {tr}(C) < \infty)$
86
+
87
+ ![](images/caff19f6a57c03269d2bbc9e609d14e7a36e30f3099aa8474df643182a206416.jpg)
88
+ White Noise Diffusion
89
+ Mollified Diffusion
90
+ Figure 3: Example diffusion processes. Mollified diffusion smooths diffusion states allowing the space to be more effectively modelled with continuous operators.
91
+
92
+ (Kukush, 2020). For a Gaussian random element $x$ with distribution $\mu$ , $x \sim \mathcal{N}(m, C)$ . The Radon-Nikodym theorem states the existence of a density for a measure $v$ absolutely continuous with respect to a base measure $\mu$ : for example, the density between two Gaussians is given by Minh (2021); see Lim et al. (2023); Kerrigan et al. (2023) for more detail in the context of functional diffusion models.
93
+
94
+ # 3.1 MOLLIFICATION
95
+
96
+ When defining diffusion in infinite dimensions, it may seem natural to use white noise in the forwards process, where each coordinate is an independent and identically distributed Gaussian random variable; that is, $\mathcal{N}(0,C_I)$ where $C_I(z(s),z(s')) = \delta (s - s')$ , using the Dirac delta function $\delta$ . However, this noise does not lie in $\mathcal{H}$ (Da Prato and Zabczyk, 2014) with it not satisfying the trace-class requirement. Instead, obtain Gaussian noise in $\mathcal{H}$ by convolving white noise with a mollifier kernel $k(s) > 0$ corresponding to a linear operator $T$ , giving $\mathcal{N}(0,TT^{*})$ , smoothing the white noise to lie in $\mathcal{H}$ (Higdon, 2002). To ensure one-to-one correspondence between kernel and noise, $k$ must satisfy $\int_{\mathbb{R}^d}k(s)ds < \infty$ and $\int_{\mathbb{R}^d}k^2 (s)ds < \infty$ , making $TT^{*}$ self-adjoint and non-negative. Considering $k$ to be a Gaussian kernel with smoothing parameter $l > 0$ , $h = Tx$ is given by
97
+
98
+ $$
99
+ h (c) = \int_ {\mathbb {R} ^ {n}} K (c - y, l) x (y) \mathrm {d} y, \text {w h e r e} K (y, l) = \frac {1}{(4 \pi l) ^ {\frac {n}{2}}} e ^ {- \frac {| y | ^ {2}}{4 l}}. \tag {8}
100
+ $$
101
+
102
+ # 3.2 INFINITE DIMENSIONAL MOLLIFIED DIFFUSION
103
+
104
+ To formulate a diffusion model in $\mathcal{H}$ , we must specify the transition distributions. However, irregularity in data points $x$ can impact stability, leading to the model being unable to generalise across different subsampling rates/resolutions. This can be mitigated by careful hyperparameter tuning or, in our case, by also mollifying $x$ (as with the previous noise mollification). While the necessity of this depends on the nature of $x$ , we have included it for completeness. First, we define the marginals
105
+
106
+ $$
107
+ q \left(x _ {t} \mid x _ {0}\right) = \mathcal {N} \left(x _ {t}; \sqrt {\bar {\alpha} _ {t}} T x _ {0}, \left(1 - \bar {\alpha} _ {t}\right) T T ^ {*}\right), \tag {9}
108
+ $$
109
+
110
+ where coefficients are the same as in Section 2.1. From this we are able to derive a closed form representation of the posterior (proof in Appendix B.1),
111
+
112
+ $$
113
+ q \left(x _ {t - 1} \mid x _ {t}, x _ {0}\right) = \mathcal {N} \left(x _ {t - 1}; \tilde {\mu} _ {t} \left(x _ {t}, x _ {0}\right), \tilde {\beta} _ {t} T T ^ {*}\right),
114
+ $$
115
+
116
+ $$
117
+ \text {w h e r e} \quad \tilde {\mu} _ {t} \left(x _ {t}, x _ {0}\right) = \frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} T x _ {0} + \frac {\sqrt {\alpha_ {t}} \left(1 - \bar {\alpha} _ {t - 1}\right)}{1 - \bar {\alpha} _ {t}} x _ {t} \quad \text {a n d} \quad \tilde {\beta} _ {t} = \frac {1 - \bar {\alpha} _ {t - 1}}{1 - \bar {\alpha} _ {t}} \beta_ {t}. \tag {10}
118
+ $$
119
+
120
+ Defining the reverse transitions as $p_{\theta}(x_{t - 1}|x_t) = \mathcal{N}(x_{t - 1};\mu_\theta (x_t,t),\sigma_t^2 TT^*)$ , then we can parameterise $\mu_{\theta}:\mathcal{H}\times \mathbb{R}\to \mathcal{H}$ to directly predict $x_0$ . The loss in Eq. (3) can be extended to infinite dimensions (Pinski et al., 2015). However, since Ho et al. (2020) find that predicting the noise yields higher image quality, we parameterise $\mu_{\theta}$ to predict $\xi \sim \mathcal{N}(0,TT^{*})$ , motivated by the rewriting the loss as
121
+
122
+ $$
123
+ \mathcal {L} _ {t - 1} = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \left\| T ^ {- 1} \left(\frac {1}{\sqrt {\alpha_ {t}}} \left(x _ {t} \left(x _ {0}, \xi\right) - \frac {\beta_ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} \xi\right) - \mu_ {\theta} \left(x _ {t}, t\right)\right) \right\| _ {\mathcal {H}} ^ {2} \right], \tag {11}
124
+ $$
125
+
126
+ $$
127
+ \mu_ {\theta} \left(x _ {t}, t\right) = \frac {1}{\sqrt {\alpha_ {t}}} \left[ x _ {t} - \frac {\beta_ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} f _ {\theta} \left(x _ {t}, t\right) \right], \tag {12}
128
+ $$
129
+
130
+ where $x_{t}(x_{0},\xi) = \sqrt{\bar{\alpha}_{t}} x_{0} + \sqrt{1 - \bar{\alpha}_{t}}\xi$ . Since $T^{-1}$ does not affect the minima, we follow Ho et al. (2020) and use a simplified loss, $\mathcal{L}_{t-1}^{\mathrm{simple}} = \mathbb{E}_q[\|f_{\theta}(x_t,t)-\xi\|_{\mathcal{H}}^2]$ . The concurrent work by Kerrigan
131
+
132
+ et al. (2023) showed that in the infinite-dimensional limit, the loss will be finite only for specific choices of $\tilde{\beta}_t$ , while Lim et al. (2023) found similar only for specific parameterisations of $\mu_{\theta}$ ; however, since the loss is Monte-Carlo approximated, this is not problematic in practice.
133
+
134
+ Data Mollification By mollifying the training data $x_0$ to ensure regularity, resulting samples are similarly regular; directly predicting $x_0$ would give an estimate of the original data, but by predicting $\xi$ we are only able to sample $Tx_0$ . However, in the case of the Gaussian mollifier kernel with adequate boundary conditions, the existence of the inverse $T^{-1}$ is clear if we consider the Fourier transform of $x(c)$ , denoted $\hat{x}(\omega)$ , then the Gaussian convolution can be defined by $\hat{h}(\omega) = e^{-\omega^2 t} \hat{x}(\omega)$ . And so $Tx$ is one-to-one on any class of Fourier transformable functions, with $Tx$ being bounded ensuring uniqueness and therefore invertibility (John, 1955). Explicitly, the inverse is given by $\hat{x}(\omega) = e^{\omega^2 t} \hat{h}(\omega)$ (Hummel et al., 1987). However, inverting is ill-conditioned, with arbitrarily small changes (for instance by floating point error) destroying smoothness (Hummel et al., 1987). In this case, the Wiener filter can for instance be used as an approximate inverse, defined as $\tilde{x}(\omega) = \frac{e^{-\omega^2 t}}{e^{-2(\omega^2 t)} + \epsilon^2} \hat{h}(\omega)$ , where $\epsilon$ is an estimate of the inverse SNR (Biemond et al., 1990).
135
+
136
+ # 4 PARAMETERISING THE DIFFUSION PROCESS
137
+
138
+ In order to model the denoising function in Hilbert space, there are certain properties that is essential for the class of learnable functions to satisfy so as to allow training on infinite resolution data:
139
+
140
+ 1. Can take as input points positioned at arbitrary coordinates.
141
+ 2. Generalises to different numbers of input points than trained on, sampled on a regular grid.
142
+ 3. Able to capture both global and local information.
143
+ 4. Scales to very large numbers of input points, i.e. efficient in terms of runtime and memory.
144
+
145
+ Recent diffusion models often use a U-Net (Ronneberger et al., 2015) consisting of a convolutional encoder and decoder with skip-connections between resolutions allowing both global and local information to be efficiently captured. Unfortunately, U-Nets function on a fixed grid making them unsuitable. However, we can take inspiration to build an architecture satisfying the desired properties.
146
+
147
+ # 4.1 NEURAL OPERATORS
148
+
149
+ Neural Operators (Li et al., 2020; Kovachki et al., 2021) are a framework designed for efficiently solving partial differential equations by learning to directly map the PDE parameters to the solution in a single step. However, more generally they are able to learn a map between two infinite dimensional function spaces making them suitable for parameterising an infinite dimensional diffusion model.
150
+
151
+ Let $\mathcal{X}$ and $S$ be separable Banach spaces representing the spaces of noisy and denoised data respectively; a neural operator is a map $\mathcal{F}_{\theta} \colon \mathcal{X} \to S$ . Since $x \in \mathcal{X}$ and $s \in S$ are both functions, we only have access to pointwise evaluations. Let $c \in \binom{D}{m}$ be an $m$ -point discretisation of the domain $D = [0,1]^n$ (i.e. $c$ is $m$ coordinates), and assume we have observations $x(c) \in \mathbb{R}^{m \times d}$ . To be discretisation invariant, the neural operator may be evaluated at any $c \in D$ , potentially $c \notin c$ , thereby allowing a transfer of solutions between discretisations. Each layer is built using a non-local integral kernel operator, $\mathcal{K}(x;\phi)$ , parameterised by neural network $\kappa_{\phi}$ , aggregating information spatially,
152
+
153
+ $$
154
+ \left(\mathcal {K} (x; \phi) v _ {l}\right) (c) = \int_ {D} \kappa_ {\phi} (c, b, x (c), x (b)) v _ {l} (b) \mathrm {d} b, \quad \forall c \in D. \tag {13}
155
+ $$
156
+
157
+ Deep networks can be built in a similar manner to conventional methods, by stacking layers of linear operators with non-linear activation functions, $v_{0} \mapsto v_{1} \mapsto \dots \mapsto v_{L}$ where $v_{l} \mapsto v_{l + 1}$ is defined as
158
+
159
+ $$
160
+ v _ {l + 1} (c) = \sigma \left(W v _ {l} (c) + \left(\mathcal {K} (x; \phi) v _ {l}\right) (c)\right), \quad \forall c \in D, \tag {14}
161
+ $$
162
+
163
+ for input $v_{0} = x$ , activations $v_{l}$ , output $v_{L} = s$ , pointwise linear transformation $W$ : $\mathbb{R}^d \to \mathbb{R}^d$ , and activation function $\sigma$ : $\mathbb{R} \to \mathbb{R}$ . One example is the Fourier Neural Operator (FNO) (Li et al., 2021),
164
+
165
+ $$
166
+ \left. \left(\mathcal {K} (x; \phi) v _ {l}\right) (c) = \mathcal {G} ^ {- 1} \left(R _ {\phi} \cdot \left(\mathcal {G} v _ {t}\right)\right) (c), \quad \forall c \in D, \right. \tag {15}
167
+ $$
168
+
169
+ where $\mathcal{G}$ is the Fourier transform, and $R_{\phi}$ is learned transformation in Fourier space. When coordinates lie on a regular grid, the fast Fourier transform can be used, making FNOs fast and scalable.
170
+
171
+ ![](images/0d166108e8c55081824fb16f17c2c173ac056f13e9db2a184c60a9e0df294514.jpg)
172
+ Figure 4: $\infty$ -Diff uses a hierarchical architecture that operates on irregularly sampled functions at the top level to efficiently capture fine details, and on fixed grids at the other levels to capture global structure. This approach allows scaling to intricate high-resolution data.
173
+
174
+ # 4.2 MULTI-SCALE ARCHITECTURE
175
+
176
+ While neural operators which satisfy all required properties (1-4) exist, such as Galerkin attention (Cao, 2021) (softmax-free linear attention) and MLP-Mixers (Tolstikhin et al., 2021), scaling beyond small numbers of coordinates is still challenging due to the high memory costs. Instead we design a U-Net inspired multi-scale architecture (Fig. 4) that separately aggregates local/global information.
177
+
178
+ In a continuous setting, there are two main approaches to downsampling: (1) selecting a subset of coordinates (Wang and Golland, 2022) and (2) interpolating points to a regularly spaced grid (Rahman et al., 2022). We found that with repeated application of (1), approximating integral operators on non-uniformly spaced grids with very few points did not perform nor generalise well, likely due to the high variance. On the other hand, while working with a regular grid removes some sparsity properties, issues with variance are much lower. As such, we use a hybrid approach with sparse operators applied on the raw irregularly sampled data to local regions; after this points are interpolated to a regular grid and a grid-based architecture is applied in order to aggregate global information; if the regular grid is of sufficiently high dimension, this combination should be sufficient. While an FNO (Li et al., 2021; Rahman et al., 2022) architecture could be used, we achieved better results with dense convolutions (Nichol and Dhariwal, 2021), with sparse operators used for resolution changes.
179
+
180
+ # 4.3 EFFICIENT SPARSE OPERATORS
181
+
182
+ At the sparse level we use convolution operators (Kovachki et al., 2021), finding them to be more performant than Galerkin attention, with global context provided by the multiscale architecture. This is defined using a translation invariant kernel restricted to the local region of each coordinate, $N(c)$ ,
183
+
184
+ $$
185
+ x (c) = \int_ {\mathrm {N} (c)} \kappa (c - y) v (y) \mathrm {d} y, \quad \forall c \in D. \tag {16}
186
+ $$
187
+
188
+ We restrict $\kappa$ to be a depthwise kernel due to the greater parameter efficiency for large kernels (particularly for continuously parameterised kernels) and finding that they are more able to generalise when trained with fewer sampled coordinates; although the sparsity ratio is the same for regular and depthwise convolutions, because there are substantially more values in a regular kernel, there is more spatial correlation between values. When a very large number of sampled coordinates are used, fully continuous convolutions are extremely impractical in terms of memory usage and run-time. In practice, however, images are obtained and stored on a discrete grid. As such, by treating images as high dimensional, but discrete entities, we can take advantage of efficient sparse convolution libraries (Choy et al., 2019; Contributors, 2022), making memory usage and run-times much more reasonable. Specifically, we use TorchSparse (Tang et al., 2022), modified to allow depthwise convolutions. Wang and Golland (2022) proposed using low discrepancy coordinate sequences to approximate the integrals due to their better convergence rates. However, we found uniformly sampled points to be more effective, likely because high frequency details are able to be more easily captured.
189
+
190
+ ![](images/8b02bce5ea1e92956d445de7d0a4e7ace5b311cdf72275bbfc0d6022e57ed96a.jpg)
191
+ Figure 5: Samples from $\infty$ -Diff models trained on sets of randomly subsampled coordinates.
192
+
193
+ <table><tr><td>Method</td><td>CelebAHQ-64</td><td>CelebAHQ-128</td><td>FFHQ-256</td><td>Church-256</td></tr><tr><td>Finite-Dimensional</td><td></td><td></td><td></td><td></td></tr><tr><td>CIPS (Anokhin et al., 2021)</td><td>-</td><td>-</td><td>5.29</td><td>10.80</td></tr><tr><td>StyleSwin (Zhang et al., 2022)</td><td>-</td><td>3.39</td><td>3.25</td><td>8.28</td></tr><tr><td>UT (Bond-Taylor et al., 2022)</td><td>-</td><td>-</td><td>3.05</td><td>5.52</td></tr><tr><td>StyleGAN2 (Karras et al., 2020)</td><td>-</td><td>2.20</td><td>2.35</td><td>6.21</td></tr><tr><td>Infinite-Dimensional</td><td></td><td></td><td></td><td></td></tr><tr><td>D2F (Dupont et al., 2022a)</td><td>40.4*</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DPF (Zhuang et al., 2023)</td><td>13.21*</td><td>-</td><td>-</td><td>-</td></tr><tr><td>GEM (Du et al., 2021)</td><td>14.65</td><td>23.73</td><td>35.62</td><td>87.57</td></tr><tr><td>GASP (Dupont et al., 2022b)</td><td>9.29</td><td>27.31</td><td>24.37</td><td>37.46</td></tr><tr><td>∞-Diff (Ours)</td><td>4.57</td><td>3.02</td><td>3.87</td><td>10.36</td></tr></table>
194
+
195
+ Table 1: $\mathrm{{FID}}_{\mathrm{{CLIP}}}$ (Kynkänniemi et al.,2023) evaluation against finite-dimensional methods as well as other infinite-dimensional approaches which are trained on coordinate subsets. * = Inception FID.
196
+
197
+ # 5 EXPERIMENTS
198
+
199
+ In this section we demonstrate that the proposed mollified diffusion process modelled with neural operator based networks and trained on coordinate subsets are able to generate high quality, high resolution samples. We explore the properties of this approach including discretisation invariance, the impact of the number of coordinates during training, and compare the sample quality of our approach with other infinite dimensional generative models. We train models on $256 \times 256$ datasets, FFHQ (Karras et al., 2019) and LSUN Church (Yu et al., 2015), as well as CelebA-HQ (Karras et al., 2018); unless otherwise specified models are trained on $1/4$ of pixels (to fit in memory), randomly selected.
200
+
201
+ Very large batch sizes are necessary to train diffusion models due the high variance (Hoogeboom et al., 2023), making training on high resolution data on a single GPU impractical. To address this, we use diffusion autoencoders (Preechakul et al., 2022) to reduce stochasticity: during training, sparsely sampled data are encoded to small latent vectors and used to condition our pixel-level infinite-dimensional diffusion model, allowing better estimation of the denoised data at high time steps. Subsequently, a small diffusion model is quickly trained to model these latents. Our encoder is the downsampling part of our architecture (left half of Fig. 4). When sampling, we use the deterministic DDIM interpretation with 100 steps. Additional details are in Appendix A. Source code is available at https://github.com/samb-t/infty-diff.
202
+
203
+ Sample Quality Samples from our approach can be found in Fig. 5 which are high quality, diverse, and capture fine details. In Table 1 we quantitatively compare with other approaches that treat inputs as infinite dimensional data, as well as more traditional approaches that assume data lies on a fixed grid. As proposed by Kynkäänniemi et al. (2023), we calculate FID (Heusel et al., 2017) using CLIP features (Radford et al., 2021) which is better correlated with human perception of image quality. Our approach scales to high resolutions much more effectively than the other function-based approaches as evidenced by the substantially lower scores. Visual comparison between samples from our approach and other function-based approaches can be found in Fig. 6 where samples from our approach can be seen to be higher quality and display more details without blurring or adversarial
204
+
205
+ ![](images/8f5c0bad593ea88b070b3b05f00fc9b71f674a86f677bae4e13437cc9bf0c9ea.jpg)
206
+ Figure 6: Qualitative comparison with other infinite dimensional approaches.
207
+
208
+ ![](images/bd9b98c4a3592c3789ac66c45c52d6e78200527bc34479e370d6c2435c09c82e.jpg)
209
+ Table 3: Impact of subsampling rate on quality for FFHQ 128. $\mathrm{FID}_{\mathrm{CLIP}}$ calculated with 10k samples.
210
+
211
+ ![](images/144166952d0148622fb71b6ace56e022edc7f7ca4ab24ab8cd595fa5334fc7ec.jpg)
212
+
213
+ ![](images/79c7ba1663f4625fa9bd680fa5efc6135a67bd16ede936e2b990c65f3bfa05d7.jpg)
214
+ Figure 7: $\mathrm{FID}_{\mathrm{CLIP}}$ at var- Table 2: Architectural component ious steps & resolutions. ablations in terms of $\mathrm{FID}_{\mathrm{CLIP}}$
215
+
216
+ <table><tr><td>Architecture</td><td>FIDCLIP</td></tr><tr><td>Sparse Downsample</td><td>85.99</td></tr><tr><td>Nonlinear Kernel</td><td>24.49</td></tr><tr><td>Quasi Monte Carlo</td><td>7.63</td></tr><tr><td>Regular ConvS</td><td>5.63</td></tr><tr><td>∞-Diff (Ours)</td><td>4.75</td></tr></table>
217
+
218
+ <table><tr><td>Rate</td><td>FIDCLIP</td><td>Speedup</td></tr><tr><td>1×</td><td>3.15</td><td>1.0×</td></tr><tr><td>2×</td><td>4.12</td><td>1.0×</td></tr><tr><td>4×</td><td>4.75</td><td>1.3×</td></tr><tr><td>8×</td><td>6.48</td><td>1.6×</td></tr></table>
219
+
220
+ artefacts. All of these approaches are based on neural fields (Xie et al., 2022) where coordinates are treated independently; in contrast, our approach uses neural operators to transform functions using spatial context thereby allowing more details to be captured. Both GASP (Dupont et al., 2022b) and GEM (Du et al., 2021) rely on compressed latent-conditional hypernetworks which makes efficiently scaling difficult. D2F (Dupont et al., 2022a) relies on a deterministic compression stage which loses detail due to the finite vector size. DPF (Zhuang et al., 2023) uses small fixed sized coordinate subsets as global context with other coordinates modelled implicitly, thereby causing blur.
221
+
222
+ Discretisation Invariance In Fig. 2 we demonstrate the discretisation invariance properties of our approach. After training on random coordinate subsets from $256 \times 256$ images, we can sample from this model at arbitrary resolutions which we show at resolutions from $64 \times 64$ to $1024 \times 1024$ by initialising the diffusion with different sized noise. We experimented with (alias-free) continuously parameterised kernels (Romero et al., 2022) but found bi-linearly interpolating kernels to be more effective. At each resolution, even exceeding the training data, samples are consistent and diverse. In Fig. 7 we analyse how the number of sampling steps affects quality at different sampling resolutions.
223
+
224
+ Coordinate Sparsity One factor influencing the quality of samples is the number of coordinates sampled during training; fewer coordinates means fewer points from which to approximate each integral. We analyse the impact of this in Table 3, finding that as expected, performance decreases with fewer coordinates, however, this effect is fairly minimal. With fewer coordinates also comes substantial speedup and memory savings; at $256 \times 256$ with $4 \times$ subsampling the speedup is $1.4 \times$ .
225
+
226
+ Architecture Analysis In Table 2 we ablate the impact of various architecture choices against the architecture described in Section 4.2, matching the architecture as closely as possible. In particular, sparse downsampling (performed by randomly subsampling coordinates; we observed similar with equidistant subsampling, Qi et al., 2017) fails to capture the distribution. Similarly using a spatially nonlinear kernel (Eq. 13), implemented as conv, activation, conv, does not generalise well unlike linear kernels (we observed similar for softmax transformers, Kovachki et al., 2021).
227
+
228
+ Super-resolution The discretisation invariance properties of the proposed approach makes superresolution a natural application. We evaluate this in a simple way, passing a low resolution image through the encoder, then sampling at a higher resolution; see Fig. 8 where it is clear that more details have been added. A downside of this specific approach is that information is lost in the encoding process, however, this could potentially by improved by incorporating DDIM encodings (Song et al., 2021).
229
+
230
+ ![](images/9a7883a48ccf443e9abffa394f497cd2ad8dad771df2673190e2146e9a55d832.jpg)
231
+ Figure 8: Super-resolution
232
+
233
+ Inpainting Inpainting is possible with mollified diffusion (Fig. 9), using reconstruction guidance (Ho et al., 2022b), $x_{t-1} \gets x_{t-1} - \lambda \nabla_{x_t} \| m \odot (\tilde{\mu}_0(x_t, t) - T\bar{x}) \|_2^2$ for inpainting mask $m$ , learned estimate of $Tx_0$ , $\tilde{\mu}_0$ , and image to be inpainted $\bar{x}$ . The diffusion autoencoder framework gives an additional level of control when inpainting since the reverse diffusion process can be applied to encodings from a chosen time step $t_s$ , allowing control over how different the inpainted region is from the original image.
234
+
235
+ ![](images/dc3e7c261ab9444a426ea0a345fdd9d1da0081a8bd2ee01a56d1fce1ed259786.jpg)
236
+ Figure 9: Inpainting.
237
+
238
+ # 6 DISCUSSION
239
+
240
+ There are a number of interesting directions to improve our approach including more powerful/efficient neural operators, more efficient sparse methods, better integral approximations, and improved UNet design (Williams et al., 2023). Having demonstrated that diffusion models can be trained with $8 \times$ subsampling rates, we believe there is substantial room for further performance gains. Also of interest are recent works which speed up diffusion sampling by iteratively upsampling throughout the backwards process, requiring a separate model for each resolution (Jing et al., 2022; Zhang et al., 2023); the resolution invariance of our approach permits this with a single model.
241
+
242
+ Recent diffusion advances are also complementary to our approach, these include consistency models (Song et al., 2023), stochastic interpolants (Albergo et al., 2023), Schrödinger bridges (De Bortoli et al., 2021), critically-damped diffusion (Dockhorn et al., 2022), architecture improvements (Hoogeboom et al., 2023), and faster solvers (Lu et al., 2022). Similar to our mollified diffusion, blurring has been used to improve diffusion (Rissanen et al., 2023; Hoogeboom and Salimans, 2023). Similar to GASP (Dupont et al., 2022b), other neural field GAN approaches exist such as CIPS (Anokhin et al., 2021) and Poly-INR (Singh et al., 2023), however, these approaches use convolutional discriminators requiring all coordinates on a fixed grid, preventing scaling to infinite resolutions. Also of relevance are Neural Processes (Garnelo et al., 2018; Dutordoir et al., 2022) which learn distributions over functions similar to Gaussian Processes, however, these approaches address conditional inference, whereas we construct an unconditional generative model for substantially more complex data.
243
+
244
+ Concurrent with this work, other papers independently proposed diffusion models in infinite dimensions (Lim et al., 2023; Franzese et al., 2023; Hagemann et al., 2023; Zhuang et al., 2023; Kerrigan et al., 2023; Baldassari et al., 2023; Pidstrigach et al., 2023), these approaches are complementary to ours and distinct in a number of ways. While our work focuses on the practical development necessary to efficiently model complex high-dimensional data, these papers instead focus more on theoretical foundations, typically being only applied to simple data (e.g. Gaussian mixtures and MNIST). Of particular interest, Kerrigan et al. (2023) also develop diffusion models in Hilbert space, going further than our work in foundational theory, including more on the requirements to obtain well-posed models, as well as considering different function spaces; (Lim et al., 2023) develop infinite-dimensional diffusion defined as SDEs; and Franzese et al. (2023) prove the existence of the backwards SDE. Unlike our work, these approaches make use of conditional neural fields or operate on uniform grids of coordinates, whereas our approach operates on raw sparse data, enabling better scaling. The closest to this work in terms of scaling is Diffusion Probabilistic Fields (Zhuang et al., 2023) which denoises coordinates independently using small coordinate subsets for context; this is much more restrictive than our approach and resolutions are much smaller than ours (up to $64 \times 64$ ).
245
+
246
+ # 7 CONCLUSION
247
+
248
+ In conclusion, we found that our infinite-dimensional Hilbert space diffusion model with transition densities represented by non-local integral operators is able to generate high-quality arbitrary resolution samples. Despite only observing small subsets of pixels during training, sample quality significantly surpasses prior infinite-dimensional generative models, and is competitive with state-of-the-art finite-dimensional models trained on all pixels at once. While prior infinite-dimensional approaches use latent conditional neural fields, our findings demonstrate that sparse neural operators which operate directly on raw data are a capable alternative, offering significant advantages by removing the point-wise constraint and thus the need for latent compression. Future work would benefit from improved neural operators that can effectively operate at greater levels of sparsity to improve the efficiency of our approach and enable even further scaling.
249
+
250
+ # REFERENCES
251
+
252
+ Michael S Albergo, Nicholas M Boffi, and Eric Vanden-Eijnden. Stochastic Interpolants: A Unifying Framework for Flows and Diffusions. arXiv preprint arXiv:2303.08797, 2023.
253
+ Ivan Anokhin, Kirill Demochkin, Taras Khakhulin, Gleb Sterkin, Victor Lempitsky, and Denis Korzhenkov. Image Generators with Conditionally-Independent Pixel Synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14278-14287, 2021.
254
+ Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer Normalization. arXiv preprint arXiv:1607.06450, 2016.
255
+ Lorenzo Baldassari, Ali Siahkoohi, Josselin Garnier, Knut Solna, and Maarten V de Hoop. Conditional Score-Based Diffusion Models for Bayesian Inference in Infinite Dimensions. arXiv preprint arXiv:2305.19147, 2023.
256
+ Jan Biemond, Reginald L Lagendijk, and Russell M Mersereau. Iterative Methods for Image Deblurring. Proceedings of the IEEE, 78(5):856-883, 1990.
257
+ Christopher M Bishop and Nasser M Nasrabadi. Pattern Recognition and Machine Learning, volume 4. Springer, 2006.
258
+ Sam Bond-Taylor and Chris G Willcocks. Gradient Origin Networks. In International Conference on Learning Representations, 2021.
259
+ Sam Bond-Taylor, Peter Hessey, Hiroshi Sasaki, Toby P Breckon, and Chris G Willcocks. Unleashing Transformers: Parallel Token Prediction with Discrete Absorbing Diffusion for Fast High-Resolution Image Generation from Vector-Quantized Codes. In European Conference on Computer Vision, pages 170–188. Springer, 2022.
260
+ Shuhao Cao. Choose a Transformer: Fourier or Galerkin. Advances in Neural Information Processing Systems, 34:24924-24940, 2021.
261
+ Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3075-3084, 2019.
262
+ Spconv Contributors. Spconv: Spatially Sparse Convolution Library. https://github.com/traveller59/spconv, 2022.
263
+ Giuseppe Da Prato and Jerzy Zabczyk. Stochastic Equations in Infinite Dimensions. Cambridge university press, 2014.
264
+ Valentin De Bortoli, James Thornton, Jeremy Heng, and Arnaud Doucet. Diffusion Schrödinger Bridge with Applications to Score-Based Generative Modeling. Advances in Neural Information Processing Systems, 34:17695-17709, 2021.
265
+ Tim Dockhorn, Arash Vahdat, and Karsten Kreis. Score-Based Generative Modeling with Critically-Damped Langevin Diffusion. In International Conference on Learning Representations, 2022.
266
+ Yilun Du, Katie Collins, Josh Tenenbaum, and Vincent Sitzmann. Learning Signal-Agnostic Manifolds of Neural Fields. Advances in Neural Information Processing Systems, 34:8320-8331, 2021.
267
+ Emilien Dupont, Hyunjik Kim, SM Ali Eslami, Danilo Jimenez Rezende, and Dan Rosenbaum. From data to functa: Your data point is a function and you can treat it like one. In International Conference on Machine Learning, pages 5694-5725. PMLR, 2022a.
268
+ Emilien Dupont, Yee Whye Teh, and Arnaud Doucet. Generative Models as Distributions of Functions. In International Conference on Artificial Intelligence and Statistics, pages 2989-3015. PMLR, 2022b.
269
+ Vincent Dutordoir, Alan Saul, Zoubin Ghahramani, and Fergus Simpson. Neural Diffusion Processes. arXiv preprint arXiv:2206.03992, 2022.
270
+
271
+ Giulio Franzese, Simone Rossi, Dario Rossi, Markus Heinonen, Maurizio Filippone, and Pietro Michiardi. Continuous-Time Functional Diffusion Processes. arXiv preprint arXiv:2303.00800, 2023.
272
+ Marta Garnelo, Jonathan Schwarz, Dan Rosenbaum, Fabio Viola, Danilo J Rezende, SM Eslami, and Yee Whye Teh. Neural processes. ICML Workshop, 2018.
273
+ Paul Hagemann, Lars Ruthotto, Gabriele Steidl, and Nicole Tianjiao Yang. Multilevel Diffusion: Infinite Dimensional Score-Based Diffusion Models for Image Generation. arXiv preprint arXiv:2303.04772, 2023.
274
+ Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium. Advances in neural information processing systems, 30, 2017.
275
+ Dave Higdon. Space and Space-Time Modeling using Process Convolutions. In Quantitative methods for current environmental issues, pages 37-56. Springer, 2002.
276
+ Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising Diffusion Probabilistic Models. Advances in Neural Information Processing Systems, 33:6840-6851, 2020.
277
+ Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded Diffusion Models for High Fidelity Image Generation. J. Mach. Learn. Res., 23(47): 1-33, 2022a.
278
+ Jonathan Ho, Tim Salimans, Alexey A Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video Diffusion Models. In Advances in Neural Information Processing Systems, 2022b.
279
+ Emiel Hoogeboom and Tim Salimans. Blurring Diffusion Models. In International Conference on Learning Representations, 2023.
280
+ Emiel Hoogeboom, Jonathan Heek, and Tim Salimans. Simple Diffusion: End-to-End Diffusion for High Resolution Images. arXiv preprint arXiv:2301.11093, 2023.
281
+ Robert A Hummel, B Kimia, and Steven W Zucker. Deblurring Gaussian Blur. Computer Vision, Graphics, and Image Processing, 38(1):66-80, 1987.
282
+ Aapo Hyvarinen. Estimation of Non-Normalized Statistical Models by Score Matching. Journal of Machine Learning Research, 6(4), 2005.
283
+ Bowen Jing, Gabriele Corso, Renato Berlinghieri, and Tommi Jaakkola. Subspace Diffusion Generative Models. In European Conference on Computer Vision, pages 274-289. Springer, 2022.
284
+ Fritz John. Numerical Solution of the Equation of Heat Conduction for Preceding Times. Annali di Matematica pura ed Applicata, 40:129-142, 1955.
285
+ Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive Growing of GANs for Improved Quality, Stability, and Variation. In International Conference on Learning Representations, 2018.
286
+ Tero Karras, Samuli Laine, and Timo Aila. A Style-Based Generator Architecture for Generative Adversarial Networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4401-4410, 2019.
287
+ Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and Improving the Image Quality of StyleGAN. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020.
288
+ Gavin Kerrigan, Justin Ley, and Padhraic Smyth. Diffusion Generative Models in Infinite Dimensions. In International Conference on Artificial Intelligence and Statistics, pages 9538-9563. PMLR, 2023.
289
+ Diederik P Kingma and Jimmy Ba. Adam: A Method for Stochastic Optimization. In International Conference on Learning Representations, 2015.
290
+
291
+ Nikola Kovachki, Zongyi Li, Burigede Liu, Kamyar Azizzadenesheli, Kaushik Bhattacharya, Andrew Stuart, and Anima Anandkumar. Neural Operator: Learning Maps Between Function Spaces. arXiv preprint arXiv:2108.08481, 2021.
292
+ Alexander Kukush. Gaussian Measures in Hilbert Space: Construction and Properties. John Wiley & Sons, 2020.
293
+ Tuomas Kynkänniemi, Tero Karras, Miika Aittala, Timo Aila, and Jaakko Lehtinen. The Role of ImageNet Classes in Fréchet Inception Distance. In International Conference on Learning Representations, 2023.
294
+ Zongyi Li, Nikola Kovachki, Kamyar Azizzadenesheli, Burigede Liu, Kaushik Bhattacharya, Andrew Stuart, and Anima Anandkumar. Neural Operator: Graph Kernel Network for Partial Differential Equations. arXiv preprint arXiv:2003.03485, 2020.
295
+ Zongyi Li, Nikola Borislavov Kovachki, Kamyar Azizzadenesheli, Kaushik Bhattacharya, Andrew Stuart, Anima Anandkumar, et al. Fourier Neural Operator for Parametric Partial Differential Equations. In International Conference on Learning Representations, 2021.
296
+ Jae Hyun Lim, Nikola B Kovachki, Ricardo Baptista, Christopher Beckham, Kamyar Azizzadenesheli, Jean Kossaifi, Vikram Voleti, Jiaming Song, Karsten Kreis, Jan Kautz, et al. Score-Based Diffusion Models in Function Space. arXiv preprint arXiv:2302.07400, 2023.
297
+ Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11976-11986, 2022.
298
+ Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu. Dpm-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps. In Advances in Neural Information Processing Systems, 2022.
299
+ Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. Communications of the ACM, 65(1):99-106, 2021.
300
+ Hà Quang Minh. Regularized Divergences Between Covariance Operators and Gaussian Measures on Hilbert Spaces. Journal of Theoretical Probability, 34:580-643, 2021.
301
+ Alexander Quinn Nichol and Prafulla Dhariwal. Improved Denoising Diffusion Probabilistic Models. In International Conference on Machine Learning, pages 8162-8171. PMLR, 2021.
302
+ Jakiw Pidstrigach, Youssef Marzouk, Sebastian Reich, and Sven Wang. Infinite-Dimensional Diffusion Models for Function Spaces. arXiv preprint arXiv:2302.10130, 2023.
303
+ Francis J Pinski, Gideon Simpson, Andrew M Stuart, and Hendrik Weber. Kullback-Leibler Approximation for Probability Measures on Infinite Dimensional Spaces. SIAM Journal on Mathematical Analysis, 47(6):4091-4122, 2015.
304
+ Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion Autoencoders: Toward a Meaningful and Decodable Representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10619-10629, 2022.
305
+ Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space. Advances in neural information processing systems, 30, 2017.
306
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning Transferable Visual Models from Natural Language Supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021.
307
+ Md Ashiqur Rahman, Zachary E Ross, and Kamyar Azizzadenesheli. U-NO: U-Shaped Neural Operators. arXiv preprint arXiv:2204.11127, 2022.
308
+
309
+ Danilo Rezende and Shakir Mohamed. Variational Inference with Normalizing Flows. In International Conference on Machine Learning, pages 1530-1538. PMLR, 2015.
310
+ Severi Rissanen, Markus Heinonen, and Arno Solin. Generative Modelling with Inverse Heat Dissipation. In International Conference on Learning Representations, 2023.
311
+ Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-Resolution Image Synthesis with Latent Diffusion Models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684–10695, 2022.
312
+ David W Romero, Robert-Jan Bruintjes, Jakub Mikolaj Tomczak, Erik J Bekkers, Mark Hoogendoorn, and Jan van Gemert. Flexconv: Continuous kernel convolutions with differentiable kernel sizes. In International Conference on Learning Representations, 2022.
313
+ Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional Networks for Biomedical Image Segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015.
314
+ Tim Salimans and Jonathan Ho. Progressive Distillation for Fast Sampling of Diffusion Models. In International Conference on Learning Representations, 2022.
315
+ Rajhans Singh, Ankita Shukla, and Pavan Turaga. Polynomial Implicit Neural Representations For Large Diverse Datasets. arXiv preprint arXiv:2301.11093, 2023.
316
+ Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit Neural Representations with Periodic Activation Functions. Advances in Neural Information Processing Systems, 33:7462-7473, 2020.
317
+ Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep Unsupervised Learning using Nonequilibrium Thermodynamics. In International Conference on Machine Learning, pages 2256-2265. PMLR, 2015.
318
+ Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising Diffusion Implicit Models. In International Conference on Learning Representations, 2021.
319
+ Yang Song and Stefano Ermon. Generative Modeling by Estimating Gradients of the Data Distribution. Advances in Neural Information Processing Systems, 32, 2019.
320
+ Yang Song, Prafulla Dhariwal, Mark Chen, and Ilya Sutskever. Consistency Models. arXiv preprint arXiv:2303.01469, 2023.
321
+ Haotian Tang, Zhijian Liu, Xiuyu Li, Yujun Lin, and Song Han. TorchSparse: Efficient Point Cloud Inference Engine. In Conference on Machine Learning and Systems (MLSys), 2022.
322
+ Ilya O Tolstikhin, Neil Houlsby, Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Thomas Unterthiner, Jessica Yung, Andreas Steiner, Daniel Keysers, Jakob Uszkoreit, et al. MLP-Mixer: An All-MLP Architecture for Vision. Advances in Neural Information Processing Systems, 34: 24261-24272, 2021.
323
+ Pascal Vincent. A Connection Between Score Matching and Denoising Autoencoders. Neural computation, 23(7):1661-1674, 2011.
324
+ Clinton Wang and Polina Golland. Discretization Invariant Learning on Neural Fields. In arXiv preprint arXiv:2206.01178, 2022.
325
+ Christopher Williams, Fabian Falck, George Deligiannidis, Chris Holmes, Arnaud Doucet, and Saifuddin Syed. A Unified Framework for U-Net Design and Analysis. arXiv preprint arXiv:2305.19638, 2023.
326
+ Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin, Vincent Sitzmann, and Srinath Sridhar. Neural Fields in Visual Computing and Beyond. In Computer Graphics Forum, volume 41, pages 641-676. Wiley Online Library, 2022.
327
+
328
+ Fisher Yu, Yinda Zhang, Shuran Song, Ari Seff, and Jianxiong Xiao. LSUN: Construction of a Large-scale Image Dataset using Deep Learning with Humans in the Loop. arXiv preprint arXiv:1506.03365, 2015.
329
+ Bowen Zhang, Shuyang Gu, Bo Zhang, Jianmin Bao, Dong Chen, Fang Wen, Yong Wang, and Baining Guo. StyleSwin: Transformer-Based GAN for High-Resolution Image Generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11304–11314, 2022.
330
+ Han Zhang, Ruili Feng, Zhantao Yang, Lianghua Huang, Yu Liu, Yifei Zhang, Yujun Shen, Deli Zhao, Jingren Zhou, and Fan Cheng. Dimensionality-Varying Diffusion Process. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14307-14316, 2023.
331
+ Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018.
332
+ Peiye Zhuang, Samira Abnar, Jiatao Gu, Alex Schwing, Joshua M Susskind, and Miguel Ángel Bautista. Diffusion Probabilistic Fields. In International Conference on Learning Representations, 2023.
333
+
334
+ # A IMPLEMENTATION DETAILS
335
+
336
+ All $256 \times 256$ models are trained on a single NVIDIA A100 80GB GPU using automatic mixed precision. Optimisation is performed using the Adam optimiser (Kingma and Ba, 2015) with a batch size of 32 and learning rate of $5 \times 10^{-5}$ ; each model being trained to optimise validation loss. Each model is trained as a diffusion autoencoder to reduce training variance, allowing much smaller batch sizes thereby permitting training on a single GPU. A latent size of 1024 is used and the latent model architecture and diffusion hyperparameters are the same as used by Preechakul et al. (2022). In image space, the diffusion model uses a cosine noise schedule (Nichol and Dhariwal, 2021) with 1000 steps. Mollifying is performed with Gaussian blur with a variance of 1.0.
337
+
338
+ For the image-space architecture, 3 sparse residual convolution operator blocks are used on the sparse data. Each of these consists of a single depthwise sparse convolution layer with kernel size 7 and 64 channels with the output normalised by the total number of coordinates in each local region, followed by a three layer MLP; modulated layer normalisation (Ba et al., 2016; Nichol and Dhariwal, 2021) is used to normalise and condition on the diffusion time step. These blocks use larger convolution kernels than typically used in diffusion model architectures to increase the number of coordinates present in the kernel when a small number of coordinates are sampled. Using large kernel sizes paired with MLPs has found success in recent classification models such as ConvNeXt (Liu et al., 2022).
339
+
340
+ As mentioned in Section 4.2, for the grid-based component of our architecture we experimented with a variety of U-Net shaped fourier neural operator (Li et al., 2021; Rahman et al., 2022) architectures; although these more naturally allow resolution changes at those levels, we found this came with a substantial drop in performance. This was even the case when operating at different resolutions. As such, we use the architecture used by Nichol and Dhariwal (2021) fixed at a resolution of $128 \times 128$ ; the highest resolution uses 128 channels which is doubled at each successive resolution up to a maximum factor of 8; attention is applied at resolutions 16 and 8, as is dropout, as suggested by Hoogeboom et al. (2023). Although this places more emphasis on the sparse operators for changes in sampling resolution, we found this approach to yield better sample quality across the board.
341
+
342
+ # A.1 TABLE 1 EXPERIMENT DETAILS
343
+
344
+ In Table 1 we compare against a number of other approaches in terms of $\mathrm{FID}_{\mathrm{CLIP}}$ . In each case scores are calculated by comparing samples against full datasets, with 30k samples used for CelebA-HQ and 50k samples used for FFHQ and LSUN Church. For the finite-dimensional approaches we use the official code and released models in all cases to generate samples for calculating $\mathrm{FID}_{\mathrm{CLIP}}$ scores. To provide additional context for CelebA-HQ $128\times 128$ we provide scores for StyleGAN2 and StyleSwin where samples are downsampled from $256\times 256$ .
345
+
346
+ For GASP (Dupont et al., 2022b) we use the official code and pretrained models for CelebA-HQ at resolutions $64 \times 64$ and $128 \times 128$ ; for FFHQ and LSUN Church we train models using the same generator hyperparameters as used for all experiments in that work and for the discriminator we scale the model used for CelebA-HQ $128 \times 128$ by adding an additional block to account for the resolution change. These models were trained by randomly sampling $1/4$ of coordinates at each training step to match what was used to train our models.
347
+
348
+ Similarly, for GEM (Du et al., 2021) we use the official code and train models on CelebA-HQ $128 \times 128$ , FFHQ and LSUN Church, using the same hyperparameters as used for all experiments in that work (which are independent of modality/resolution); we implement sampling as described in Section 4.4 of that paper by randomly sampling a latent corresponding to a data point, linearly interpolating to a neighbouring latent and adding a small amount of Gaussian noise (chosen with standard deviation between 0 and 1 to optimise sample quality/diversity) and then projected onto the local manifold. Because GEM is based on the autodecoder framework where latents are allocated per datapoint and jointly optimised with the model parameters, training times are proportional to the number of points in the dataset. As such, the FID scores for LSUN Church in particular are especially poor due to the large scale of the dataset.
349
+
350
+ # B MOLLIFIED DIFFUSION DERIVATION
351
+
352
+ In this section we derive the mollified diffusion process proposed in Section 3.2.
353
+
354
+ # B.1 FORWARD PROCESS
355
+
356
+ We start by specifically choosing $q(x_0|x_t)$ as defined by Section 2.1 where states are mollified by $T$ ,
357
+
358
+ $$
359
+ q \left(x _ {t - 1} \mid x _ {0}\right) = \mathcal {N} \left(x _ {t - 1}; \sqrt {\bar {\alpha} _ {t - 1}} T x _ {0}, (1 - \bar {\alpha} _ {t - 1}) T T ^ {*}\right), \tag {17}
360
+ $$
361
+
362
+ $$
363
+ q \left(x _ {t} \mid x _ {0}\right) = \mathcal {N} \left(x _ {t}; \sqrt {\bar {\alpha} _ {t}} T x _ {0}, \left(1 - \bar {\alpha} _ {t}\right) T T ^ {*}\right), \tag {18}
364
+ $$
365
+
366
+ from which we wish to find the corresponding representation of $q(x_{t-1}|x_t, x_0)$ . The solution to this is given by Bishop and Nasrabadi (2006, Equations 2.113 to 2.117), where we write
367
+
368
+ $$
369
+ q \left(x _ {t} \mid x _ {0}\right) = \mathcal {N} \left(x _ {t}; \mu , \Lambda^ {- 1}\right), \tag {19}
370
+ $$
371
+
372
+ $$
373
+ q \left(x _ {t - 1} \mid x _ {0}\right) = \mathcal {N} \left(x _ {t - 1}; A \mu + b, L ^ {- 1} + A \Lambda^ {- 1} A ^ {*}\right), \tag {20}
374
+ $$
375
+
376
+ $$
377
+ q \left(x _ {t - 1} \mid x _ {t}, x _ {0}\right) = \mathcal {N} \left(x _ {t - 1}; A x _ {t} + b, L ^ {- 1}\right). \tag {21}
378
+ $$
379
+
380
+ From this we can immediately see that $\mu = \sqrt{\bar{\alpha}_t} Tx_0$ and $\Lambda^{-1} = (1 - \bar{\alpha}_t)TT^*$ . Additionally, $A\mu + b = A\sqrt{\bar{\alpha}_t} Tx_0 + b = \sqrt{\bar{\alpha}_{t-1}} Tx_0$ , therefore we can modify the approach by Ho et al. (2020), including $T$ where relevant and set $A$ , $b$ and $L^{-1}$ as
381
+
382
+ $$
383
+ A = \frac {\sqrt {\alpha_ {t}} (1 - \bar {\alpha} _ {t - 1})}{1 - \bar {\alpha} _ {t}} I, \quad b = \frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} T x _ {0}, \quad L ^ {- 1} = \frac {1 - \bar {\alpha} _ {t - 1}}{1 - \bar {\alpha} _ {t}} \beta_ {t} T T ^ {*} \tag {22}
384
+ $$
385
+
386
+ This can be shown to be correct by passing $A, b$ , and $L^{-1}$ into equations Eqs. (19) to (21), first:
387
+
388
+ $$
389
+ \begin{array}{l} A \mu + b = \frac {\sqrt {\bar {\alpha} _ {t}} \sqrt {\alpha_ {t}} \left(1 - \bar {\alpha} _ {t - 1}\right)}{1 - \bar {\alpha} _ {t}} T x _ {0} + b (23) \\ = \frac {\sqrt {\bar {\alpha} _ {t}} \sqrt {\alpha_ {t}} \left(1 - \bar {\alpha} _ {t - 1}\right) + \sqrt {\bar {\alpha} _ {t - 1}} \left(1 - \alpha_ {t}\right)}{1 - \bar {\alpha} _ {t}} T x _ {0} (24) \\ = \frac {\sqrt {\bar {\alpha} _ {t - 1}} \left(\alpha_ {t} - \bar {\alpha} _ {t} + 1 - \alpha_ {t}\right)}{1 - \bar {\alpha} _ {t}} T x _ {0} = \sqrt {\bar {\alpha} _ {t - 1}} T x _ {0}, (25) \\ \end{array}
390
+ $$
391
+
392
+ and secondly,
393
+
394
+ $$
395
+ \begin{array}{l} L ^ {- 1} + A \Lambda^ {- 1} A ^ {*} = \frac {\left(1 - \bar {\alpha} _ {t - 1}\right) \left(1 - \alpha_ {t}\right)}{1 - \bar {\alpha} _ {t}} T T ^ {*} + \frac {\alpha_ {t} \left(1 - \bar {\alpha} _ {t - 1}\right) ^ {2} \left(1 - \bar {\alpha} _ {t}\right)}{\left(1 - \bar {\alpha} _ {t}\right) ^ {2}} T T ^ {*} (26) \\ = \frac {1 - \bar {\alpha} _ {t - 1} - \alpha_ {t} + \bar {\alpha} _ {t} + \alpha_ {t} - 2 \bar {\alpha} _ {t} + \alpha_ {t} \bar {\alpha} _ {t - 1} ^ {2}}{1 - \bar {\alpha} _ {t}} = (1 - \bar {\alpha} _ {t - 1}) T T ^ {*} (27) \\ = \frac {\left(1 - \bar {\alpha} _ {t}\right) \left(1 - \bar {\alpha} _ {t - 1}\right)}{1 - \bar {\alpha} _ {t}} T T ^ {*} = \left(1 - \bar {\alpha} _ {t - 1}\right) T T ^ {*}, (28) \\ \end{array}
396
+ $$
397
+
398
+ which together form $q(x_{t - 1}|x_0)$ .
399
+
400
+ # B.2 REVERSE PROCESS
401
+
402
+ In this section we derive the loss for optimising the proposed mollified diffusion process and discuss the options for parameterising the model. As stated in Section 3.2, we define the reverse transition densities as
403
+
404
+ $$
405
+ p _ {\theta} \left(x _ {t - 1} \mid x _ {t}\right) = \mathcal {N} \left(x _ {t - 1}; \mu_ {\theta} \left(x _ {t}, t\right), \sigma_ {t} ^ {2} T T ^ {*}\right), \tag {29}
406
+ $$
407
+
408
+ where $\sigma_t^2 = \beta_t$ or $\sigma_t^2 = \tilde{\beta}_t$ . From this we can calculate the loss at time $t - 1$ in the same manner as the finite-dimensional case (Eq. 3) by calculating the Kullback-Leibler divergence in infinite dimensions (Pinski et al., 2015), for conciseness we ignore additive constants throughout since they do not affect optimisation,
409
+
410
+ $$
411
+ \mathcal {L} _ {t - 1} = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \left\| T ^ {- 1} \left(\tilde {\mu} _ {t} \left(x _ {t}, x _ {0}\right) - \mu_ {\theta} \left(x _ {t}, t\right)\right) \right\| _ {\mathcal {H}} ^ {2} \right]. \tag {30}
412
+ $$
413
+
414
+ To find a good representation for $\mu_{\theta}$ we expand out $\tilde{\mu}_t$ as defined in Eq. (10) in the above loss giving
415
+
416
+ $$
417
+ \mathcal {L} _ {t - 1} = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \left\| T ^ {- 1} \left(\frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} T x _ {0} + \frac {\sqrt {\alpha_ {t}} (1 - \bar {\alpha} _ {t - 1})}{1 - \bar {\alpha} _ {t}} x _ {t} - \mu_ {\theta} \left(x _ {t}, t\right)\right) \right\| _ {\mathcal {H}} ^ {2} \right]. \tag {31}
418
+ $$
419
+
420
+ From this we can see that one possible parameterisation is to directly predict $x_0$ , that is,
421
+
422
+ $$
423
+ \mu_ {\theta} \left(x _ {t}, t\right) = \frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} T f _ {\theta} \left(x _ {t}, t\right) + \frac {\sqrt {\alpha_ {t}} \left(1 - \bar {\alpha} _ {t - 1}\right)}{1 - \bar {\alpha} _ {t}} x _ {t}. \tag {32}
424
+ $$
425
+
426
+ This parameterisation is interesting because when sampling, we can use the output of $f_{\theta}$ to directly obtain an estimate of the unmollified data. Additionally, when calculating the loss $\mathcal{L}_{t-1}$ , all $T$ and $T^{-1}$ terms cancel out meaning there are no concerns with reversing the mollification during training, which can be numerically unstable. To see this, we can further expand out Eq. (31) using the parameterisation of $\mu_{\theta}$ defined in Eq. (32),
427
+
428
+ $$
429
+ \begin{array}{l} \mathcal {L} _ {t - 1} = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \right\rVert T ^ {- 1} \left(\frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} T x _ {0} + \frac {\sqrt {\alpha_ {t}} (1 - \bar {\alpha} _ {t - 1})}{1 - \bar {\alpha} _ {t}} x _ {t} \right. (33) \\ \left. - \frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} T f _ {\theta} (x _ {t}, t) - \frac {\sqrt {\alpha_ {t}} (1 - \bar {\alpha} _ {t - 1})}{1 - \bar {\alpha} _ {t}} x _ {t}\right) \Bigg | _ {\mathcal {H}} ^ {2} \Bigg ] \\ = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \left\| \frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} x _ {0} - \frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} f _ {\theta} (x _ {t}, t) \right\| _ {\mathcal {H}} ^ {2} \right] (34) \\ = \mathbb {E} _ {q} \left[ \frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{2 \sigma_ {t} ^ {2} (1 - \bar {\alpha} _ {t})} \| x _ {0} - f _ {\theta} (x _ {t}, t) \| _ {\mathcal {H}} ^ {2} \right]. (35) \\ \end{array}
430
+ $$
431
+
432
+ Using this parameterisation, we can sample from $p_{\theta}(x_{t - 1}|x_t)$ as
433
+
434
+ $$
435
+ x _ {t - 1} = \frac {\sqrt {\alpha_ {t}} (1 - \bar {\alpha} _ {t - 1})}{1 - \bar {\alpha} _ {t}} x _ {t} + T \left(\frac {\sqrt {\bar {\alpha} _ {t - 1}} \beta_ {t}}{1 - \bar {\alpha} _ {t}} f _ {\theta} \left(x _ {t}, t\right) + \sigma_ {t} \xi\right) \quad \text {w h e r e} \quad \xi \sim \mathcal {N} (0, C _ {I}). \tag {36}
436
+ $$
437
+
438
+ Alternatively, we can parameterise $\tilde{\mu}$ to predict the noise $\xi$ rather than $x_0$ , which was found by Ho et al. (2020) to yield higher sample quality. To see this, we can write Eq. (9) as $x_{t}(x_{0},\xi) = \sqrt{\bar{\alpha}_{t}} Tx_{0} + \sqrt{1 - \bar{\alpha}_{t}} T\xi$ . Expanding out Eq. (30) with this gives the following loss,
439
+
440
+ $$
441
+ \begin{array}{l} \mathcal {L} _ {t - 1} = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \left\| T ^ {- 1} \left(\tilde {\mu} \left(x _ {t}, x _ {0}\right) - \mu_ {\theta} \left(x _ {t}, t\right)\right) \right\| _ {\mathcal {H}} ^ {2} \right] (37) \\ = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \left\| T ^ {- 1} \left(\tilde {\mu} \left(x _ {t} \left(x _ {0}, \xi\right), \frac {1}{\sqrt {\bar {\alpha} _ {t}}} T ^ {- 1} \left(x _ {t} \left(x _ {0}, \xi\right) - \sqrt {1 - \bar {\alpha} _ {t}} T \xi\right)\right) - \mu_ {\theta} \left(x _ {t}, t\right)\right) \right\| _ {\mathcal {H}} ^ {2} \right] (38) \\ = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \left\| T ^ {- 1} \left(\frac {1}{\sqrt {\alpha_ {t}}} \left(x _ {t} \left(x _ {0}, \xi\right) - \frac {\beta_ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} T \xi\right) - \mu_ {\theta} \left(x _ {t}, t\right)\right) \right\| _ {\mathcal {H}} ^ {2} \right]. (39) \\ \end{array}
442
+ $$
443
+
444
+ Since directly predicting $\xi$ would require predicting a non-continuous function, we instead propose predicting $T\xi$ which is a continuous function, giving the following parameterisation and loss,
445
+
446
+ $$
447
+ \mu_ {\theta} \left(x _ {t}, t\right) = \frac {1}{\sqrt {\alpha_ {t}}} \left[ x _ {t} - \frac {\beta_ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} f _ {\theta} \left(x _ {t}, t\right) \right], \tag {40}
448
+ $$
449
+
450
+ $$
451
+ \mathcal {L} _ {t - 1} = \mathbb {E} _ {q} \left[ \frac {1}{2 \sigma_ {t} ^ {2}} \left\| \frac {1}{\sqrt {\alpha_ {t}}} T ^ {- 1} \left(\frac {\beta_ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} f _ {\theta} \left(x _ {t}, t\right) - \frac {\beta_ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} T \xi\right) \right\| _ {\mathcal {H}} ^ {2} \right]. \tag {41}
452
+ $$
453
+
454
+ In this case $T^{-1}$ is a linear transformation that does not affect the minima. In addition to this, we can remove the weights as suggested by Ho et al. (2020), giving the following proxy loss,
455
+
456
+ $$
457
+ \mathcal {L} _ {t - 1} ^ {\text {s i m p l e}} = \mathbb {E} _ {q} \left[ \| f _ {\theta} \left(x _ {t}, t\right) - T \xi \| _ {\mathcal {H}} ^ {2} \right]. \tag {42}
458
+ $$
459
+
460
+ An alternative parameterisation which can train more reliably is $v$ -prediction (Salimans and Ho, 2022; Hoogeboom et al., 2023); we experimented with this parameterisation but found mollified noise prediction to yield higher quality samples.
461
+
462
+ # C ADDITIONAL RESULTS
463
+
464
+ In this section, we provide additional details on the training speedup and memory reductions possible using our multi-scale architecture, from sparse coordinate sampling at different rates, and with different fixed resolutions of the inner U-Net. In Table 4 we calculate the training speedup for a fixed memory budget of 10GB, while in Table 5 we calculate the memory reduction for a fixed batch size of 16. Additionally, we add here an extra quantitative result to assess sample quality at when sampling from the FFHQ model trained only on $256 \times 256$ images at a resolution of $1024 \times 1024$ : calculating $\mathrm{FID}_{\mathrm{CLIP}}$ on 5000 samples using 50 sampling steps, the model achieves a score of 15.84. We also provide additional samples from our models to visually assess quality. Detecting overfitting is crucial when training generative models. Scores such as FID are unable to detect overfitting, making identifying overfitting difficult in approaches such as GANs. Because diffusion models are trained to optimise a bound on the likelihood, training can be stopped to minimise validation loss. As further evidence we provide nearest neighbour images from the training data to samples from our model, measured using LPIPS (Zhang et al., 2018).
465
+
466
+ <table><tr><td></td><td>Data Resolution</td><td colspan="2">128</td><td colspan="2">256</td><td colspan="2">512</td></tr><tr><td></td><td>Inner Resolution</td><td>32</td><td>64</td><td>64</td><td>128</td><td>128</td><td>256</td></tr><tr><td rowspan="4">Rate</td><td>2×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>1.00×</td><td>∞×</td></tr><tr><td>4×</td><td>1.64×</td><td>1.53×</td><td>1.97×</td><td>2.08×</td><td>3.61×</td><td>∞×</td></tr><tr><td>8×</td><td>2.65×</td><td>2.17×</td><td>3.51×</td><td>3.18×</td><td>7.98×</td><td>∞×</td></tr><tr><td>16×</td><td>2.89×</td><td>2.49×</td><td>4.57×</td><td>3.83×</td><td>11.78×</td><td>∞×</td></tr></table>
467
+
468
+ Table 4: Training speedup for a fixed memory budget. Larger is better. Calculated for different subsampling rates, training data resolutions, and resolutions of the grid within the multi-scale architecture.
469
+
470
+ <table><tr><td rowspan="2"></td><td>Data Resolution</td><td colspan="2">128</td><td colspan="2">256</td><td colspan="2">512</td></tr><tr><td>Inner Resolution</td><td>32</td><td>64</td><td>64</td><td>128</td><td>128</td><td>256</td></tr><tr><td rowspan="4">Rate</td><td>2×</td><td>1.56×</td><td>1.44×</td><td>1.42×</td><td>1.52×</td><td>1.43×</td><td>1.32×</td></tr><tr><td>4×</td><td>2.44×</td><td>2.02×</td><td>1.99×</td><td>1.66×</td><td>2.00×</td><td>1.67×</td></tr><tr><td>8×</td><td>3.27×</td><td>2.46×</td><td>2.44×</td><td>1.88×</td><td>2.44×</td><td>1.90×</td></tr><tr><td>16×</td><td>3.90×</td><td>2.75×</td><td>2.73×</td><td>2.01×</td><td>2.73×</td><td>2.03×</td></tr></table>
471
+
472
+ Table 5: Memory reduction during training for a fixed batch size. Larger is better. Calculated for different subsampling rates, training data resolutions, and resolutions of the grid within the multi-scale architecture.
473
+
474
+ ![](images/dc7b62f7e83ba94de8a8c97e3a4cb998c3aed81ef05c8c4328f9e59fc737751a.jpg)
475
+ Figure 10: Non-cherry picked, CelebA-HQ $256 \times 256$ samples.
476
+
477
+ ![](images/f99dbe5af29aa902c767943c08a0c65cc8302b4c8f2de30d19791f440cb615bb.jpg)
478
+ Figure 11: Non-cherry picked, LSUN Church $256 \times 256$ samples.
479
+
480
+ ![](images/547282e6d87a68485b8329963824e7c05ece42565fa75194c681145235f87ba3.jpg)
481
+ Figure 12: Non-cherry picked, FFHQ $256 \times 256$ samples.
482
+
483
+ ![](images/8c2e968ba623ca72808389d86017966904fee1da2999d2ea0d8a8e0c0b23ef35.jpg)
484
+ Figure 13: Nearest neighbours for a model trained on CelebA-HQ based on LPIPS distance. The left column contains samples from our model and the right column contains the nearest neighbours in the training set (increasing in distance from left to right)
485
+
486
+ ![](images/849fd0fee0bf9841ace966c8f994bcd0c1f05cd68a4067119c456189144e3da8.jpg)
487
+ Figure 14: Nearest neighbours for a model trained on LSUN Church based on LPIPS distance. The left column contains samples from our model and the right column contains the nearest neighbours in the training set (increasing in distance from left to right)
488
+
489
+ ![](images/8fe8a0f2a270ec8ee6bfac05d787981131564a895b85fdd70bbcf1fdd91a59fc.jpg)
490
+ Figure 15: Nearest neighbours for a model trained on FFHQ based on LPIPS distance. The left column contains samples from our model and the right column contains the nearest neighbours in the training set (increasing in distance from left to right)
2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:809757a5dc55446314a2852a90d26eebf85d202f4446d4fa741cd41286f7e620
3
+ size 2214785
2024/$_infty$-Diff_ Infinite Resolution Diffusion with Subsampled Mollified States/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/2c3324bd-ac05-4b1f-babe-e2a0781cbbf4_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/2c3324bd-ac05-4b1f-babe-e2a0781cbbf4_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/2c3324bd-ac05-4b1f-babe-e2a0781cbbf4_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bd1d5139b86b14ce1ad5750a71eddd9fccaae809b3efb89877c8cc5bb47c496
3
+ size 27556143
2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/full.md ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # $\mathbb{D}^2$ PRUNING:MESSAGE PASSING FOR BALANCING DIVERSITY & DIFFICULTY IN DATA PRUNING
2
+
3
+ Adyasha Maharana, Prateek Yadav & Mohit Bansal
4
+
5
+ Department of Computer Science
6
+
7
+ University of North Carolina at Chapel Hill
8
+
9
+ {adyasha,praty,mbansal}@cs.unc.edu
10
+
11
+ # ABSTRACT
12
+
13
+ In recent years, data quality has emerged as an important factor for training massive models. Analytical theories suggest that higher-quality data can lead to lower test errors in models trained on a fixed data budget. Moreover, a model can be trained on a lower compute budget without compromising performance if a dataset can be stripped of its redundancies. Coreset selection (or data pruning) seeks to select a subset of the training data so as to maximize the performance of models trained on this subset, also referred to as coreset. There are two dominant approaches: (1) geometry-based data selection for maximizing data diversity in the coreset, and (2) functions that assign difficulty scores to samples based on training dynamics. Optimizing for data diversity leads to a coreset that is biased towards easier samples, whereas, selection by difficulty ranking omits easy samples that are necessary for the training of deep learning models. This demonstrates that data diversity and importance scores are two complementary factors that need to be jointly considered during coreset selection. In this work, we represent a dataset as an undirected graph and propose a novel pruning algorithm, $\mathbb{D}^2$ PRUNING, that uses message passing over this dataset graph for coreset selection. $\mathbb{D}^2$ PRUNING updates the difficulty scores of each example by incorporating the difficulty of its neighboring examples in the dataset graph. Then, these updated difficulty scores direct a graph-based sampling method to select a coreset that encapsulates both diverse and difficult regions of the dataset space. We evaluate supervised and self-supervised versions of our method on various vision and NLP datasets. Results show that $\mathbb{D}^2$ PRUNING improves coreset selection over previous state-of-the-art methods at low-to-medium pruning rates. Additionally, we find that using $\mathbb{D}^2$ PRUNING for filtering large multimodal datasets leads to increased diversity in the dataset and improved generalization of pretrained models. Our work shows that $\mathbb{D}^2$ PRUNING is a versatile framework for understanding and processing datasets. $^1$
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Deep learning models are evolving into massive architectures with trillions of learnable parameters requiring enormous training datasets for optimal performance. Empirical experiments demonstrate that the test error in such models falls off as a power law with model size as well as training dataset size (Kaplan et al., 2020). Recently, Sorscher et al. (2022) developed an analytical theory that shows that the power law association of test error with data size can be demoted to exponential scaling if one has access to a high-quality data pruning metric for careful data selection. This has the implication that for a fixed data budget, high-quality training data can yield lower test loss in deep learning models. Coreset selection $^{2}$ (Mirzasoleiman et al., 2020; Guo et al., 2022) is a similar line of work that aims to select a subset (coreset) of the most informative samples $\mathcal{S}$ from a large training dataset $\mathcal{T}$ without significantly compromising the performance of the model. Existing coreset selection methods (Toneva et al., 2018; Killamsetty et al., 2021a;b; Yang et al., 2022; Sorscher et al., 2022) demonstrate promising performance on many vision datasets for one-shot coreset selection. However, significant
18
+
19
+ progress remains to be made on the selection of better coresets, especially using self-supervised approaches. Moreover, there is a lack of systematic evaluation of these methods on NLP datasets.
20
+
21
+ Real-world data distributions comprise high-density as well as low-density regions. Yu et al. (2020); Chan et al. (2022) claim that maximizing the variance of intra-class features results in robust representations. To this end, geometry-based coreset selection methods (Sener & Savarese, 2018; Chen et al., 2010) operate under the assumption that samples located close to each other provide redundant information, and try to remove those data points by selecting the samples most distant from $k$ -means cluster centers (Sorscher et al., 2022) or at a median distance from the class center (Xia et al., 2023), in order to maximize diversity in the coreset. On the other hand, uncertainty-based methods (Coleman et al., 2019) and error or loss-based methods (Toneva et al., 2018; Paul et al., 2021) propose a score-based function to estimate the difficulty of each sample in the training dataset from the model's training dynamics and retain the most difficult samples. However, the distribution of difficulty scores for the original data is highly skewed and contain way more low-difficulty (or easy) samples (Swayamdipta et al., 2020), as we show in Figure 2(a). As low-difficulty samples predominantly arise in densely populated regions (Sorscher et al., 2022), incorporating some of these well-connected, low-difficulty samples into the coreset guarantees adequate representation of these dense areas within the coreset (Zheng et al., 2022). At the same time, selecting high-difficulty samples with higher connectivity increases the information content of the (Kim & Shin, 2022). Evidently, example difficulty and data diversity are two crucial factors for selecting effective coresets, yet, there has been little work towards combining them into a unifying framework for coreset selection.
22
+
23
+ To unify these two factors, we propose the $\mathbb{D}^2$ PRUNING method, where we represent the dataset $S$ as an undirected graph $\mathcal{G}$ and design a message-passing algorithm that unifies the difficulty scores and the underlying spatial distribution of the dataset to select a coreset with balanced difficulty and diversity. $\mathbb{D}^2$ PRUNING consists of three simple steps: (1) Graph Initialization: First, we create a graph, $\mathcal{G}$ , where each node is an example from the dataset $S$ and is connected to its $k$ -closest neighbors based on a notion of distance in the embedding space (see Fig. 1(A)). Each node has a feature value that represents the difficulty score of the example. This graph can be used to understand the connectivity of each sample with respect to the rest of the dataset (Ebert et al., 2012). (2) Forward Message Passing: Next, we perform message passing (Gasteiger et al., 2020; Yadav et al., 2019) over the dataset graph to update the difficulty scores of all examples by taking into account the distance and difficulty of its neighboring examples in the graph (see Fig. 1(B)). Specifically, each node collects a message from all of its neighbors (where the message is their difficulty scores scaled by their distance) and uses these messages to update its own difficulty score. (3) Coreset Selection & Reverse Message Passing: Finally, we use these updated scores to iteratively select a balanced subset of samples from high-density low-difficulty regions and low-density high-difficulty regions. At each step of selection, the neighbors of the selected sample are down-weighted via reverse message-passing to promote diversity in the coreset (see Fig. 1(C)). Our design ensures that highly connected nodes of low difficulty are on equal footing with sparsely connected nodes of high difficulty during selection.
24
+
25
+ We refer to this diversity-difficulty $(\mathbb{D}^2)$ approach of coreset selection using message-passing as $\mathbb{D}^2$ PRUNING and evaluate this pruning method on multiple image classification and natural language processing (NLP) datasets. We find that $\mathbb{D}^2$ PRUNING outperforms state-of-art methods for coreset selection at low-to-medium pruning rates. Our analysis shows that $\mathbb{D}^2$ PRUNING selects a coreset with a higher distribution of difficult samples for low pruning rates and with equitable distribution over easy and difficult samples for medium-to-high pruning rates. Further, we adapt $\mathbb{D}^2$ PRUNING for self-supervised and unsupervised data selection approaches and show improvements over existing methods for self-supervised coreset selection and data filtering respectively. Importantly, the message-passing framework for coreset selection opens up possibilities for exploring different message schemes, possibly incorporating factors other than data diversity and difficulty, in an easy plug-and-play framework. In summary, our contributions are:
26
+
27
+ - We propose $\mathbb{D}^2$ PRUNING, a one-shot coreset selection algorithm that represents datasets as undirected graphs and uses message-passing to combine the influence of two important factors, example difficulty and data diversity, for data selection.
28
+ - We evaluate our method on several image classification, NLP benchmarks and show state-of-the-art results for low-to-medium pruning rates for supervised & self-supervised approaches.
29
+ - We show that $\mathbb{D}^2$ PRUNING selects diverse data pools when filtering massive multimodal datasets, which improves the generalization of pretrained multimodal models.
30
+
31
+ ![](images/de7936912987efe484d06a93fb32841a908737ae7e695eb76328f682f74b0f3b.jpg)
32
+ A. Graph Initialization
33
+
34
+ ![](images/6320f71abffaa2086a7964a2c2e9277bf375471caa0d485ab137a3710840d516.jpg)
35
+ B. Forward Message Passing
36
+
37
+ ![](images/e309ebe517ca8ca4fd226f42bd1566d108a03dacd2b198b82b87c8ffba2e1bc0.jpg)
38
+ Figure 1: Overview of $\mathbb{D}^2$ PRUNING. (left) Our proposed algorithm contains three steps: (a) Initialization of graph $\mathcal{G}$ using difficulty scores and edge weights based on feature distance, (b) message passing between connected nodes to propagate difficulty scores of neighboring samples, and (c) data selection and reverse message passing to avoid sampling from the same neighborhood. (right) $\mathbb{D}^2$ PRUNING selects a balanced subset of samples (red) from sparse and dense regions.
39
+
40
+ # 2 PRELIMINARIES
41
+
42
+ In this section, we describe one-shot coreset selection and discuss the motivation behind our work.
43
+
44
+ # 2.1 ONE-SHOT CORESET SELECTION
45
+
46
+ Consider a training dataset $S$ containing $N$ examples $\{(x_i, y_i)\}_{i=1}^N$ drawn i.i.d. from an underlying distribution $P$ . One-shot coreset selection refers to the selection of a subset $S'$ of the data at a given pruning rate $\alpha$ such that the loss of the model $\theta$ trained on $S'$ using loss function $L$ is minimized on an evaluation set drawn from $P$ . This results in the optimization problem as follows:
47
+
48
+ $$
49
+ \min _ {S ^ {\prime} \subset S: \frac {| S ^ {\prime} |}{| S |} \leq (1 - \alpha)} E _ {x, y \sim P} \left[ L \left(x, y; \theta^ {*} \left(S ^ {\prime}\right)\right) \right] \tag {1}
50
+ $$
51
+
52
+ # 2.2 DESIDERATA OF CORESET
53
+
54
+ Coresets are representative subsets of larger datasets and aim to preserve the performance achieved by training on the full dataset. Prior works on understanding training dynamics point towards two important factors for ensuring the same i.e. example difficulty and data diversity.
55
+
56
+ Example difficulty. Multiple works have sought to define example difficulty in order to understand how deep neural networks process data. Statistical metrics like consistency score (Jiang et al., 2021) measure the probability of predicting the correct label of an instance when it is left out of the training dataset. Sorscher et al. (2022) provide theoretical justification for retaining the hardest examples when pruning large datasets for a perceptron learning setting. Swayamdipta et al. (2020) show that examples that have a high degree of variance in the model's predictions during training have the largest impact on the model's overall performance. Accordingly, coreset selection methods based on difficulty score functions prioritize the selection of difficult examples for coresets (Guo et al., 2022). However, it has been shown that deep learning models learn easy data and simple functions earlier in training (Jiang et al., 2021; Toneva et al., 2018; Baldock et al., 2021) and easy examples ease the optimization of deep learning networks in the high-dimensional data manifold. Moreover, Zheng et al. (2022) demonstrate that it is necessary to include easy examples to ensure coverage in high-density areas of the data distribution, which leads to the next factor of consideration i.e. data diversity.
57
+
58
+ Data diversity. Representation structure has been explored in several works as the key to the generalization of deep learning models; variance in representations for each class should be as large as possible while also being uncorrelated from other classes (Xia et al., 2023). The diversity of a dataset can be captured in many ways such as coding rate (Yu et al., 2020; Chan et al., 2022), max dispersion or convex hull volume (Yu et al., 2022) and coverage (Sener & Savarese, 2018; Zheng et al., 2022). A set $S'$ is a $r$ -cover of another set $S$ , when a set of $r$ -radius balls centered at each element in
59
+
60
+ ![](images/37f85701eca3a2761e8cf87325961eec9d66d03e484a39a8a7f4dd667b511540.jpg)
61
+ Figure 2: Sampling Methods. Density heat map of data distribution (left) and histogram of importance scores (right) in (A) a single class in the CIFAR10 dataset, and coresets selected under $90\%$ pruning rate via (B) random sampling, diversity-only submodular approaches (C) facility location, (D) graphcut (Iyer et al., 2021), (E) Moderate selection (Xia et al., 2023) and (F) our method, $\mathbb{D}^2$ PRUNING, designed to balance data diversity (pretrained ResNet18 features) and difficulty (Toneva et al., 2018).
62
+
63
+ $S^{\prime}$ covers the entire $S$ . The radius $r$ can be used as a metric to measure coverage of $S^{\prime}$ on $S$ (Sener & Savarese, 2018). Zheng et al. (2022) introduce the metric $\mathrm{AUC}_{pr}$ (Area under coverage), which is computed against test set $D_{test}$ i.e. $\mathrm{AUC}_{pr}(S) = E_{x \in D_{test}}[min_{x' \in S} d(x', x)]$ and theoretically show that it is important to minimize the $\mathrm{AUC}_{pr}$ for better generalization. Difficult samples tend to be rarer samples found in the low-density areas of the data distribution whereas easy samples tend to lie in high-density areas. An effective coreset should contain sufficient samples from both areas to ensure maximum coverage. However, optimizing for diversity only leads to coresets with a skewed distribution over example difficulty. As we show in Fig. 2(c), $k$ -center selection minimizes the distance of samples in $S$ from $S^{\prime}$ and has high coverage of the underlying data distribution. But, the selected coreset contains a disproportionate number of easy samples, rendering it ineffective.
64
+
65
+ Example difficulty and diversity are two complementary factors that make an effective coreset. Hence, coreset selection methods need to unify the influence of these factors in a constructive manner. To this end, we represent the dataset $S$ as a graph and introduce a novel message-passing algorithm (Vashisht et al., 2019a;b), $\mathbb{D}^2$ PRUNING, that accounts for both factors when selecting samples for coreset.
66
+
67
+ # 3 $\mathbb{D}^2$ PRUNING:MESSAGE PASSING FOR CORESET SELECTION
68
+
69
+ Consider a dataset $S$ , where each sample $s$ is represented in an embedding space, i.e., $s \in \mathbf{R}^d$ . We seek to select a coreset $S'$ consisting of a subset of the samples in $S$ as outlined in Sec. 2.1. Moreover, our goal is to combine the influence of embedding distance and difficulty scores when selecting samples for coreset (see Sec. 2.2). This setting naturally lends itself to a representation using undirected graph $\mathcal{G}$ , where each sample is represented as a node with node-feature $x_i$ , and edge weights $e_{ij}$ to indicate its connectivity with other samples in the embedding space (see Fig. 1(a)). We use message-passing to 'inform' a sample about (a) its proximity to adjacent samples in an embedding space, and (b) the difficulty scores of its neighbors. First, we briefly discuss message passing for graphs, and then we discuss our proposed algorithm, $\mathbb{D}^2$ PRUNING.
70
+
71
+ # 3.1MESSAGEPASSING
72
+
73
+ Message passing (Hamilton et al., 2017) is a widely-used operation performed on graphs to propagate information from a node's neighbors to itself and update the state of the node based on the newly acquired information. For instance, Gilmer et al. (2017); Gasteiger et al. (2020) use message-passing to encode molecular structures for chemical prediction. The message-passing phase is defined in terms of a message function $M$ and a node update function $U$ . In the message passing phase, a given node $i$ receives messages from each of its neighbors and aggregates them to update its feature value:
74
+
75
+ $$
76
+ m _ {i} = \sum_ {j \in \mathcal {N} (i)} m _ {i j}; \text {w h e r e} m _ {i j} = M \left(x _ {j}, e _ {i, j}\right) \tag {2}
77
+ $$
78
+
79
+ $$
80
+ x _ {i} = U \left(x _ {i}, m _ {i}\right) \tag {3}
81
+ $$
82
+
83
+ where $\mathcal{N}(i)$ denotes the neighbors of node $i$ in graph $\mathcal{G}$ . $U$ is an aggregation function that accounts for the messages received from all neighbors, as well as the node's own feature.
84
+
85
+ # 3.2 $\mathbb{D}^2$ PRUNING
86
+
87
+ $\mathbb{D}^2$ PRUNING consists of 3 stages i.e., (a) Graph initialization, (b) forward message passing, and (c) data selection via reverse message passing.
88
+
89
+ Graph initialization. We create a single, sparse graph for the dataset $S$ where each sample in $S$ is represented by a node $i$ in the graph. In order to account for example difficulty during coreset selection, we initialize the node feature as the difficulty score of the sample based on training dynamics of the model $\theta$ trained on $S$ , i.e., $x_{i} = f_{\theta}(s_{i})$ , where $f(.)^{\cdot}$ is the scoring function. In practice, the scoring function can be one of the many metrics used to measure difficulty such as forgetting (Toneva et al., 2018), consistency score (Jiang et al., 2021), and self-supervised metrics like prototypicality (Sorscher et al., 2022) etc. Next, we collect the $k$ nearest neighboring samples for every sample in the dataset. Within the graph, the connecting edges between each node $i$ and its $k$ nearest neighbors are initialized with a non-zero edge weight $e_{i,j}$ , where node $j$ is one of the $k$ nearest neighbors (see Fig. 1(a)). All other edge weights are set to zero, leading to a sparse graphical representation of the entire dataset $S$ . The edge weight $e_{i,j}$ represents the proximity of the two nodes $i,j$ using the RBF kernel of the distance $d(i,j)$ . We use the Euclidean distance as the distance function i.e., $d(i,j) = ||v_i - v_j||$ where $v_{i}$ is the embedding vector for sample $i$ .
90
+
91
+ Forward message passing. In this step, each node $i$ in the graph receives information about its neighborhood via a single step of message propagation. Every connected node $j$ sends a message $M$ to node $i$ about its importance score which is scaled by the edge weight as,
92
+
93
+ $$
94
+ M \left(x _ {j}, e _ {i j}\right) = e _ {i, j} * x _ {j}; \text {w h e r e} e _ {i, j} = \exp \left(- \gamma_ {f} * d (i, j) ^ {2}\right) \tag {4}
95
+ $$
96
+
97
+ The intuition behind this definition is that samples that are farther away from the node but are of higher difficulty should be weighted similarly to samples that are closer to the node and have lower difficulty. This promotes diversity in the coreset by ensuring representation from all regions of the data distribution. Finally, the receiving node $i$ aggregates all of the messages received from its neighboring nodes and updates its own feature value as,
98
+
99
+ $$
100
+ U _ {f} \left(x _ {i}, m _ {i}\right) = x _ {i} + \sum_ {j \in \mathcal {N} (i)} M \left(x _ {j}, e _ {i, j}\right) \tag {5}
101
+ $$
102
+
103
+ This reinforces the importance of dense regions comprising easy samples or sparse regions comprising difficult samples. Existing methods (Ash et al., 2019; Das et al., 2023) do not make a distinction between easy-to-learn and hard-to-learn areas in the data representation space whereas, this step in $\mathbb{D}^2$ PRUNING increases the importance of a sample by an amount that is proportional to the importance scores of the samples surrounding it, thus ranking an easy sample in a hard-to-learn area higher than that in an easy-to-learn area. Therefore, in this way, we start with a graph $\mathcal{G}$ where connectivity is based on the distance between two samples in the feature space and convert it into a graph based on distance as well as difficulty scores via message passing.
104
+
105
+ Data selection via reverse message passing. In the final step, samples in $S$ are ranked according to their corresponding updated node feature values in $\mathcal{G}$ . Iteratively, the highest ranking sample $x_{k} = \arg \max_{i\in S}x_{i}$ is selected (Ebert et al., 2012), and its neighboring nodes are down-weighted to maximize the diversity of the coreset. However, since the distance between two nodes is a representation of their semantic similarity, neighboring nodes that are farther away from the selected node must be down-weighted relatively less than those that are closer. We implement this via reverse message passing, where the neighboring nodes receive a weighted message from the selected node and use it to update their feature value as,
106
+
107
+ $$
108
+ x _ {j} = x _ {j} - e _ {k, j} * x _ {k}, \forall j \in \mathcal {N} (k); \text {w h e r e} e _ {k, j} = \exp \left(- \gamma_ {r} * d (k, j) ^ {2}\right), \tag {6}
109
+ $$
110
+
111
+ where a lower value of $\gamma_{r}$ causes larger updates in connected nodes and vice-versa. With these steps, $\mathbb{D}^2$ PRUNING selects a coreset that contains samples from all regions of the data distribution and are more uniformly distributed over the range of difficulty scores (see Fig. 2(f)).
112
+
113
+ # 4 EXPERIMENTAL SETUP
114
+
115
+ Tasks, Models & Datasets. We evaluate $\mathbb{D}^2$ PRUNING on three vision datasets i.e., CIFAR10, CIFAR100 (Krizhevsky et al., 2009) and Imagenet-1K (Deng et al., 2009), and two NLP datasets
116
+
117
+ i.e., a subset (2k train examples) of ImDB reviews for sentiment analysis, and the Adversarial NLI (ANLI) dataset (Nie et al., 2020) for natural language inference. To the best of our knowledge, we are the first to perform a systematic evaluation of coreset selection methods on NLP datasets. We evaluate unsupervised $\mathbb{D}^2$ PRUNING on the DataComp (small) dataset (Gadre et al., 2023). We use ResNet-18 for CIFAR10 and CIFAR100, ResNet-34 for ImageNet-1K and RoBERTa for NLP datasets.
118
+
119
+ Baselines. (Supervised) We compare $\mathbb{D}^2$ PRUNING with several score-based and geometry-based coreset selection methods derived from the training dynamics of a model trained on the full dataset: A) Random selection. B) Entropy (Coleman et al., 2019) of prediction vector. C) Forgetting (Toneva et al., 2018) score. D) EL2N (Paul et al., 2021) i.e. L2 norm of error vectors. E) Area under the margin (Pleiss et al., 2020) score. E) Moderate (Xia et al., 2023) coreset consisting of samples at median distance from class center, F) CCS (Zheng et al., 2022) divides a range of difficulty scores into equal-sized bins and randomly samples from each bin, G) $\mathrm{CCS + k}$ -Center, where k-center samples are selected within each CCS bin, H) BADGE (Ash et al., 2019) that selects samples using k-means++ in the gradient vector space, I) GLISTER (Killamsetty et al., 2021b) uses bi-level optimization to select robust coresets, J) CAL-SDS2 (Das et al., 2023) combines a facility location submodular function (Iyer et al., 2021) with forgetting scores, and J) INGENIOUS (Renduchintala et al., 2023), a diversity-only approach using facility location function for NLP tasks. (Unsupervised) We use self-supervised embeddings to compare $\mathbb{D}^2$ PRUNING with A) Prototypicality (Sorscher et al., 2022) computes k-means clusters using embeddings and selects samples farthest from cluster center, B) CCS over prototypicality scores, and C) Moderate selection (Xia et al., 2023).
120
+
121
+ Implementation. In the supervised approach of $\mathbb{D}^2$ PRUNING, graph nodes are initialized with supervised difficulty score values and feature embeddings extracted from the model trained on the entire dataset. We use the forgetting score for CIFAR10, CIFAR100 and AUM score for ImageNet-1K (Zheng et al., 2022). We substitute the forgetting score with variance (Swayamdipta et al., 2020) for NLP datasets since they are trained for fewer epochs and the [CLS] token representation in RoBERTa models for feature embeddings. Self-supervised $\mathbb{D}^2$ PRUNING is initialized with feature embeddings from SwAV (Caron et al., 2020) for ImageNet-1K and uniform difficulty scores over the dataset.
122
+
123
+ Computational Complexity of $\mathbb{D}^2$ PRUNING. Graph initialization involves getting the $k$ -nearest neighbors which are computed on a A100 GPU using PyTorch, taking $< 2$ minutes for CIFAR10, CIFAR100, Adversarial NLI and ImDB datasets, and approx. 12 minutes for ImageNet-1K at quadratic time complexity $\mathcal{O}(vn^2)$ , where $v$ is the vector dimension. We use faiss indexing (CPU) to get the approximate nearest neighbors for the $12.8\mathrm{M}$ samples in the Datacomp dataset taking nearly 55 minutes (8 workers) at $\mathcal{O}(d\log(d))$ time complexity, where $d = 256\mathrm{K}$ is the number of documents in the faiss index (Johnson et al., 2019). Forward message passing is a parallelizable step of linear time complexity that scales with $k$ as $\mathcal{O}(nk)$ . The iterative selection step in $\mathbb{D}^2$ PRUNING takes $\mathcal{O}(n)$ time in our optimized implementation, completing in $< 5$ minutes for DataComp (Sec. B, Appendix).
124
+
125
+ Algorithm Hyperparameters. We use the best reported hyperparameters for baseline methods. For $\mathbb{D}^2$ PRUNING, we set the forward message passing weight $\gamma_{f}$ to 1.0 and perform a sweep over $k = \{1,5,10,15\}$ and $\gamma_r = \{0,0.1,0.2\dots 1.0\}$ for CIFAR10, CIFAR100 datasets. Insights from these runs are used to select three configurations for each run on ImageNet-1K (see Sec. 5.2).
126
+
127
+ # 5 RESULTS & DISCUSSION
128
+
129
+ # 5.1 COMPARISON TO SUPERVISED CORESET SELECTION METHODS
130
+
131
+ We evaluate $\mathbb{D}^2$ PRUNING and other coreset selection methods outlined in Sec. 4 on three vision datasets and present results in Tab. 1. $\mathbb{D}^2$ PRUNING demonstrates consistent gains over the previous state-of-art for all datasets at low and medium pruning rates. $\mathbb{D}^2$ PRUNING yields significant gains $(p < 0.05)$ i.e., $1.0\%$ and $1.4\%$ , over the previous best for $50\%$ and $80\%$ pruning rates on ImageNet-1K, showing the efficacy of graphs and message passing for coreset selection. Notably, random pruning works surprisingly well for ImageNet-1K, especially for low pruning rates, and is hard to beat. CCS (Zheng et al., 2022) remains a strong baseline for $90\%$ pruning rate and only benefits a little from additional diversity-based selection within the CCS bins (see $\mathrm{CCS} + \mathrm{k}$ -Center in Tab. 1). CCS enforces a uniform distribution of sample difficulty scores in the coreset, which is beneficial at
132
+
133
+ Table 1: Results on Vision Datasets. Performance (acc.) of $\mathbb{D}^2$ PRUNING and baselines on CIFAR10, CIFAR100 using ResNet18, and ImageNet-1k using ResNet34 models. Higher is better.
134
+
135
+ <table><tr><td rowspan="2">Dataset (→)Pruning Rate (→)</td><td colspan="6">CIFAR10</td><td colspan="6">CIFAR100</td><td colspan="6">ImageNet-1K</td></tr><tr><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td></tr><tr><td>Random</td><td>95.5</td><td>94.3</td><td>93.4</td><td>90.9</td><td>88.0</td><td>79.0</td><td>78.7</td><td>74.6</td><td>71.1</td><td>65.3</td><td>57.4</td><td>44.8</td><td>73.1</td><td>72.2</td><td>70.3</td><td>66.7</td><td>62.5</td><td>52.3</td></tr><tr><td>Entropy (Coleman et al., 2019)</td><td>-</td><td>94.8</td><td>92.9</td><td>90.1</td><td>84.1</td><td>72.1</td><td>-</td><td>74.7</td><td>68.9</td><td>60.3</td><td>49.6</td><td>35.0</td><td>-</td><td>72.3</td><td>70.8</td><td>64.0</td><td>55.8</td><td>39.0</td></tr><tr><td>Forgetting (Toneva et al., 2018)</td><td>-</td><td>95.7</td><td>94.9</td><td>88.1</td><td>73.8</td><td>46.3</td><td>-</td><td>76.0</td><td>68.1</td><td>49.3</td><td>30.3</td><td>20.6</td><td>-</td><td>72.6</td><td>70.9</td><td>66.5</td><td>62.9</td><td>52.3</td></tr><tr><td>EL2N (Paul et al., 2021)</td><td>-</td><td>95.4</td><td>94.8</td><td>89.2</td><td>78.6</td><td>30.3</td><td>-</td><td>75.6</td><td>68.1</td><td>47.2</td><td>24.8</td><td>11.8</td><td>-</td><td>72.2</td><td>67.2</td><td>48.8</td><td>31.2</td><td>12.9</td></tr><tr><td>AUM (Pleiss et al., 2020)</td><td>-</td><td>95.6</td><td>95.1</td><td>87.9</td><td>68.0</td><td>40.0</td><td>-</td><td>75.0</td><td>67.9</td><td>40.1</td><td>26.4</td><td>13.1</td><td>-</td><td>72.5</td><td>66.6</td><td>40.4</td><td>21.1</td><td>9.9</td></tr><tr><td>GLISTER (Killamsetty et al., 2021b)</td><td>-</td><td>95.1</td><td>94.5</td><td>90.9</td><td>85.8</td><td>69.3</td><td>-</td><td>78.1</td><td>74.1</td><td>68.2</td><td>58.1</td><td>52.4</td><td>-</td><td>68.7</td><td>65.6</td><td>61.4</td><td>60.3</td><td>52.0</td></tr><tr><td>CAL-SDS2 (Das et al., 2023)</td><td>-</td><td>95.7</td><td>94.4</td><td>92.1</td><td>88.9</td><td>84.6</td><td>-</td><td>77.6</td><td>74.5</td><td>69.1</td><td>64.7</td><td>56.2</td><td>-</td><td>71.8</td><td>70.5</td><td>68.0</td><td>64.2</td><td>56.3</td></tr><tr><td>Moderate (Xia et al., 2023)</td><td>-</td><td>93.9</td><td>92.6</td><td>90.6</td><td>87.3</td><td>81.0</td><td>-</td><td>74.6</td><td>71.1</td><td>65.3</td><td>58.5</td><td>45.5</td><td>-</td><td>72.0</td><td>70.3</td><td>65.9</td><td>61.3</td><td>52.1</td></tr><tr><td>CCS (Zheng et al., 2022)</td><td>-</td><td>95.4</td><td>95.0</td><td>93.0</td><td>91.0</td><td>86.9</td><td>-</td><td>77.1</td><td>74.4</td><td>68.9</td><td>64.0</td><td>57.3</td><td>-</td><td>72.3</td><td>70.5</td><td>67.8</td><td>64.5</td><td>57.3</td></tr><tr><td>CCS + k-Center</td><td>-</td><td>95.4</td><td>95.1</td><td>92.9</td><td>91.1</td><td>86.8</td><td>-</td><td>77.2</td><td>74.6</td><td>69.3</td><td>64.5</td><td>57.1</td><td>-</td><td>72.5</td><td>70.6</td><td>68.0</td><td>64.5</td><td>57.2</td></tr><tr><td>BADGE (Ash et al., 2019)</td><td>-</td><td>94.0</td><td>92.1</td><td>90.7</td><td>88.1</td><td>82.5</td><td>-</td><td>74.7</td><td>71.8</td><td>65.2</td><td>58.9</td><td>47.8</td><td>-</td><td>71.7</td><td>70.4</td><td>65.8</td><td>61.7</td><td>53.4</td></tr><tr><td>D² PRUNING</td><td>-</td><td>95.7</td><td>94.9</td><td>93.3</td><td>91.4</td><td>87.1</td><td>-</td><td>78.2</td><td>75.9</td><td>70.5</td><td>65.2</td><td>56.9</td><td>-</td><td>72.9</td><td>71.8</td><td>68.1</td><td>65.9</td><td>55.6</td></tr></table>
136
+
137
+ Table 2: Results on NLP Datasets. Comparison of performance (acc.) of $\mathbb{D}^2$ PRUNING with existing coreset selection methods on ANLI, ImDB reviews using pretrained RoBERTaLarge. Higher is better.
138
+
139
+ <table><tr><td rowspan="2">Dataset (→)Pruning Rate (→)</td><td colspan="6">Adversarial NLI (ANLI)</td><td colspan="6">ImDB Reviews (2k)</td></tr><tr><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td></tr><tr><td>Random</td><td>48.8</td><td>46.3</td><td>45.2</td><td>43.6</td><td>42.8</td><td>40.3</td><td>91.8</td><td>91.2</td><td>91.12</td><td>90.4</td><td>84.6</td><td>81.3</td></tr><tr><td>Entropy (Coleman et al., 2019)</td><td>-</td><td>48.9</td><td>45.8</td><td>43.6</td><td>42.4</td><td>34.0</td><td>-</td><td>90.6</td><td>90.4</td><td>52.8</td><td>60.1</td><td>51.3</td></tr><tr><td>Variance (Swayamdipta et al., 2020)</td><td>-</td><td>48.3</td><td>45.4</td><td>41.7</td><td>40.1</td><td>38.7</td><td>-</td><td>91.4</td><td>91.0</td><td>90.2</td><td>51.5</td><td>50.7</td></tr><tr><td>EL2N (Paul et al., 2021)</td><td>-</td><td>47.7</td><td>46.3</td><td>43.9</td><td>41.1</td><td>40.3</td><td>-</td><td>91.6</td><td>91.4</td><td>51.0</td><td>50.6</td><td>50.3</td></tr><tr><td>AUM (Pleiss et al., 2020)</td><td>-</td><td>47.9</td><td>46.2</td><td>42.7</td><td>41.0</td><td>39.6</td><td>-</td><td>91.6</td><td>91.6</td><td>53.4</td><td>50.3</td><td>50.3</td></tr><tr><td>GLISTER (Killamsetty et al., 2021b)</td><td>-</td><td>48.6</td><td>46.2</td><td>43.8</td><td>43.1</td><td>39.9</td><td>-</td><td>90.9</td><td>91.2</td><td>90.1</td><td>89.1</td><td>87.4</td></tr><tr><td>CAL-SDS2 (Das et al., 2023)</td><td>-</td><td>48.7</td><td>46.8</td><td>44.1</td><td>43.1</td><td>40.2</td><td>-</td><td>90.7</td><td>90.5</td><td>85.4</td><td>86.2</td><td>88.3</td></tr><tr><td>Moderate (Xia et al., 2023)</td><td>-</td><td>46.1</td><td>44.5</td><td>43.2</td><td>42.8</td><td>40.3</td><td>-</td><td>91.4</td><td>91.2</td><td>90.9</td><td>89.8</td><td>85.4</td></tr><tr><td>CCS (Zheng et al., 2022)</td><td>-</td><td>48.5</td><td>46.2</td><td>44.5</td><td>43.2</td><td>40.4</td><td>-</td><td>91.6</td><td>90.8</td><td>90.2</td><td>89.6</td><td>87.5</td></tr><tr><td>CCS + k-Center</td><td>-</td><td>48.4</td><td>46.3</td><td>44.1</td><td>43.2</td><td>40.2</td><td>-</td><td>91.4</td><td>91.0</td><td>90.6</td><td>90.2</td><td>88.2</td></tr><tr><td>BADGE (Ash et al., 2019)</td><td>-</td><td>47.3</td><td>45.8</td><td>44.0</td><td>43.1</td><td>39.5</td><td>-</td><td>91.3</td><td>90.9</td><td>90.0</td><td>90.1</td><td>89.5</td></tr><tr><td>INGENIOUS (Renduchintala et al., 2023)</td><td>-</td><td>44.3</td><td>46.1</td><td>43.8</td><td>41.1</td><td>40.3</td><td>-</td><td>91.1</td><td>87.6</td><td>89.5</td><td>87.8</td><td>82.4</td></tr><tr><td>D2PRUNING</td><td>-</td><td>48.9</td><td>46.7</td><td>45.3</td><td>44.5</td><td>40.3</td><td>-</td><td>91.7</td><td>91.6</td><td>91.2</td><td>90.9</td><td>90.3</td></tr></table>
140
+
141
+ high pruning rates for providing even coverage over easy and difficult samples. However, at lower pruning rates (or with increasing data budget), difficult training samples yield a lower test loss from deep learning models (Sorscher et al., 2022). The hyperparameters $k$ and $\gamma_{r}$ in $\mathbb{D}^2$ PRUNING (see Sec. 3) allow flexibility in the distribution of easy/difficult samples in coresets. We find that higher values of $\gamma_{r}$ and lower value of $k$ in $\mathbb{D}^2$ PRUNING leads to a coreset that is skewed towards more difficult samples and benefits performance at lower pruning rates. Conversely, low $\gamma_{r}$ and high $k$ lead to an equitable distribution over easy/difficult samples and are more useful for higher pruning rates. CAL-SDS2 (Das et al., 2023) also introduces a tunable hyperparameter for balancing difficulty and diversity, however, its use of facility location (Iyer et al., 2021) for measuring diversity yields lower gains than the graph-based local neighborhoods in $\mathbb{D}^2$ PRUNING. See discussion on hyperparameters in Sec.5.2 and qualitative analysis of coresets in Sec. D, Appendix.
142
+
143
+ Results from the evaluation of various coreset selection methods, including $\mathbb{D}^2$ PRUNING, on NLP datasets are presented in Tab. 2. First, we find that when pretrained language models (PLMs) are finetuned on task-specific datasets, the models do not suffer from a catastrophic decline in performance at high pruning rates, in contrast to models trained from scratch on vision datasets. For IMDB reviews, the performance of finetuned RoBERTa goes from $91.8\%$ at $0\%$ pruning to $81.3\%$ at $90\%$ pruning using random sampling. The performance improves to $87.5\%$ using CCS coreset selection and further improves to $90.3\%$ ( $p < 0.05$ ) using $\mathbb{D}^2$ PRUNING. The ANLI dataset has been carefully crafted with an iterative, adversarial human-and-model-in-the-loop process, and hence, is significantly less redundant than conventional NLP datasets. The performance for ANLI falls from $48.8\%$ to $42.8\%$ at $80\%$ pruning using random sampling. In this case, CCS coreset selection does not lead to a significant improvement in performance ( $43.2\%$ ), whereas $\mathbb{D}^2$ PRUNING improves the performance by $1.7\%$ to obtain $44.5\%$ ( $p < 0.05$ ). Score-based selection methods largely fail to yield results better than random pruning at high pruning rates. Additionally, the use of facility location function for representing diversity in CAL-SDS2 (Das et al., 2023) and INGENIOUS (Renduchintala et al., 2023) yield less gains than our graph-based approach in $\mathbb{D}^2$ PRUNING.
144
+
145
+ # 5.2 ANALYSIS OF $\mathbb{D}^2$ PRUNING
146
+
147
+ $\mathbb{D}^2$ PRUNING contains two hyperparameters, $k$ nearest neighbors and reverse message passing weight $\gamma_r$ (see Sec. 3) that allow various distributions of importance scores in the selected coreset. At low
148
+
149
+ ![](images/e895de08bcc9b169029e60b08c32e1288b396b5b244e9841395f8a929d4911c7.jpg)
150
+
151
+ ![](images/db2936a5f8a1142ee87af0003047113906fcfc02544556667cba8677029f2437.jpg)
152
+ Figure 3: Effect of $k$ , $\gamma_r$ . (A) Accuracy at 30%, 90% pruning of CIFAR100 for nearest neighbors ( $k$ ) and message passing weight $\gamma_r$ values; Distribution of difficulty scores in the best coresets selected via $\mathbb{D}^2$ PRUNING for 30% (center) and 70% (right) pruning of (B) CIFAR100, (C) ImageNet-1K.
153
+
154
+ ![](images/b86f60df3e065c79b4faf51e8ae6f0af89a97206798287b53a82cbf532befe5a.jpg)
155
+
156
+ ![](images/ad0b8eb7ce8a05fb4c17ec3111d79549a8cad2769a2086d7382169091990ac8c.jpg)
157
+
158
+ ![](images/15a10300dcf869093721348d52c4c787876cd36c986a6b8ad776f9fa53eb4f9e.jpg)
159
+
160
+ ![](images/b5916a2ecef5e17be7f2f976844c87e3512bc7ddd17b784238bb6183d901bde9.jpg)
161
+
162
+ ![](images/229a9678b108ff3ef14ffb074c60a09314490a45ad323a15640e36bf6d08d38a.jpg)
163
+
164
+ ![](images/901d2792b5b95e714e07c4524d58df12f704f0d1f589cbb9c79c0f7c50611f64.jpg)
165
+
166
+ pruning rates (see top, Fig. 3(a)), higher $k$ has a small effect on performance when the updates during reverse message passing are weak ( $\gamma_r = 1.0$ ). However, the coresets selected at high $k$ and low $\gamma_r$ include a majority of the difficult samples from the full dataset, which works best for low pruning rates on CIFAR100, as demonstrated by the distribution of importance scores in best-performing coreset at $30\%$ pruning rate (see Fig. 3(B), center). We use this insight to pick a similar configuration of $\mathbb{D}^2$ PRUNING for ImageNet-1K and find that it transfers well. The distribution of difficulty scores in the best-performing coreset of ImageNet-1K at $30\%$ pruning rate is presented in Fig. 3(C).
167
+
168
+ Higher $k$ improves performance when large updates $(\gamma_r = 0.0)$ are being made to the nodes connected to the selected node at high pruning rates (see bottom, Fig. 3(a)). This is because low $\gamma_r$ value leads to aggressive downweighting of semantically similar samples when a sample is selected and promotes diversity under a fixed data budget. The selected samples also form an equitable distribution over a small range of difficulty scores. Consequently, such coresets work best for medium-to-high pruning rates, as evidenced by the distribution of difficulty scores in the best performing coresets at 70% pruning rate for CIFAR100 and ImageNet-1K (see Fig. 3(B,C), right).
169
+
170
+ # 5.3 SELF-SUPERVISED AND UNSUPERVISED APPROACHES USING $\mathbb{D}^2$ PRUNING
171
+
172
+ Existing methods for obtaining sample difficulty scores generally rely on a model trained on the full dataset, which undermines their utility for scalably curating new datasets. Hence, we adopt $\mathbb{D}^2$ PRUNING for self-supervised and unsupervised data selection approaches.
173
+
174
+ Self-supervised coreset selection. Sorscher et al. (2022) use embeddings from SwAV, a model trained on ImageNet-1k in a self-supervised manner, and use the spatial distribution of the samples in the embedding space to assign difficulty scores (prototypicality). This method suffers drastically at over $30\%$ pruning rates (see Fig. 4.). When combined with CCS, it yields $10\%$ gain for $90\%$ pruning rate and lesser gains for $70\%$ , $80\%$ pruning rates. We adopt $\mathbb{D}^2$ PRUNING for a similar self-supervised approach by using SwAV embeddings to compute sample distances and initialize node features with a unit value. In the absence of difficulty scores, $\mathbb{D}^2$ PRUNING ranks the samples solely by the density of their neighborhood in the embedding space. $\mathbb{D}^2$ PRUNING improves performance by $3\%$ at $80\%$ pruning rate and provides similar gains over prototypicality for lower pruning rates.
175
+
176
+ ![](images/d3d28b5193f2e231f65166b838f1c48ebb854378237bdef1349a5503a490d2fa.jpg)
177
+ Figure 4: Self-supervised pruning of ImageNet-1K. $\mathbb{D}^2$ PRUNING performs as well as supervised pruning at $30\%$ and significantly improves over existing methods.
178
+
179
+ Unsupervised data filtering. Gadre et al. (2023) show that a simple strategy of retaining the samples with a high CLIP score is a strong baseline filtering method (see Tab. 3) on DataComp, a massive unfiltered corpus of images and texts to train CLIP-style models (Radford et al., 2021). However, a
180
+
181
+ Table 3: Results on DataComp. Comparison of performance (acc.) of $\mathbb{D}^2$ PRUNING with CCS (Zheng et al., 2022) and data filtering methods presented in Gadre et al. (2023). Higher is better.
182
+
183
+ <table><tr><td>Filtering Strategy</td><td>Dataset Size</td><td>ImageNet</td><td>ImageNet Dist. Shift</td><td>VTAB</td><td>Retrieval</td><td>Average</td></tr><tr><td>No filtering (Gadre et al., 2023)</td><td>12.8M</td><td>2.5</td><td>3.3</td><td>14.5</td><td>11.4</td><td>13.2</td></tr><tr><td>Text-based filtering (Gadre et al., 2023)</td><td>3.2M</td><td>4.6</td><td>5.2</td><td>16.9</td><td>12.5</td><td>15.7</td></tr><tr><td>Image-based filtering (Gadre et al., 2023)</td><td>3.2M</td><td>4.3</td><td>4.7</td><td>17.8</td><td>12.1</td><td>15.9</td></tr><tr><td>CLIP score (L/14 30%) (Gadre et al., 2023)</td><td>3.8M</td><td>5.1</td><td>5.5</td><td>19.0</td><td>11.7</td><td>17.3</td></tr><tr><td>CLIP score (L/14 30%, reproduced)</td><td>3.8M</td><td>5.1</td><td>5.6</td><td>17.0</td><td>11.9</td><td>16.0</td></tr><tr><td>CCS (Zheng et al., 2022)</td><td>3.8M</td><td>2.6</td><td>3.7</td><td>14.3</td><td>14.2</td><td>13.8</td></tr><tr><td>D² PRUNING (image + text)</td><td>3.8M</td><td>5.1</td><td>5.6</td><td>18.2</td><td>11.7</td><td>17.0</td></tr><tr><td>D² PRUNING (image only)</td><td>3.8M</td><td>4.4</td><td>5.1</td><td>16.9</td><td>12.1</td><td>15.9</td></tr><tr><td>D² PRUNING (text only)</td><td>3.8M</td><td>4.9</td><td>5.5</td><td>17.0</td><td>12.3</td><td>16.6</td></tr><tr><td>T-MARS (Maini et al., 2023)</td><td>2.5M</td><td>6.3</td><td>6.6</td><td>17.9</td><td>12.8</td><td>17.7</td></tr><tr><td>T-MARS + D² PRUNING (image + text)</td><td>2.5M</td><td>6.5</td><td>6.7</td><td>19.1</td><td>12.8</td><td>18.8</td></tr></table>
184
+
185
+ strategy based on individual sample scores only ignores potential redundancies in the dataset and may allot unnecessary data budget to an easy but dense region of the sample space. Hence, we adapt $\mathbb{D}^2$ PRUNING for filtering DataComp by treating the CLIP score as the difficulty score and using CLIP embeddings for computing sample distances. The data selected by $\mathbb{D}^2$ PRUNING using both, CLIP text and image embeddings, for computing sample distances improves average zero-shot performance on 38 image classification, multimodal datasets by $1\%$ at same data budget (see Tab. 3.). When combined with filtering method T-MARS (Maini et al., 2023) that removes images containing overlapping textual content (with caption), $\mathbb{D}^2$ PRUNING achieves cumulative improvements.
186
+
187
+ # 6 RELATED WORK
188
+
189
+ Coreset Selection. Coreset selection has been widely studied in machine learning (Welling, 2009; Chen et al., 2010; Feldman et al., 2011) for supervised learning. Recent works have focused on large datasets and deep networks. Geometry-based methods remove redundant information (Welling, 2009; Sener & Savarese, 2018). Uncertainty/loss/error-based methods estimate the difficulty of a sample from model confidence (Swayamdipta et al., 2020) or its training dynamics (Toneva et al., 2018; Paul et al., 2021; Bachem et al., 2015). Submodular functions (Wei et al., 2015; Killamsetty et al., 2023), gradient-matching (Mirzasoleiman et al., 2020), and optimization (Yang et al., 2022; Park et al., 2022) have been explored for coreset selection. Zhou & Bilmes (2018); Zhou et al. (2020); Das et al. (2023) combine submodular functions with difficulty scores for selecting data. Joshi & Mirzasoleiman (2023) study the importance of data samples for self-supervised learning via submodular optimization. We combine data diversity and sample difficulty via graphs for selection.
190
+
191
+ Data Pruning in NLP. Works exploring coreset selection methods for NLP datasets have been far and few (Fayyaz et al., 2022). Abbas et al. (2023) removes semantic duplicates from C4 dataset (Raffel et al., 2020) to reduce data size and improve performance. Kaddour (2023) introduce a small version of the Pile dataset (Gao et al., 2020) for pretraining BERT (Devlin et al., 2018; Liu et al., 2019). We evaluate coreset selection methods on sentiment analysis, natural language inference tasks.
192
+
193
+ Graphs & Message Passing for Data Selection. Neural message passing (Yadav et al., 2019; Yadati et al., 2019) is well-explored in graph neural networks for chemical structures (Gilmer et al., 2017), however, has seen less exploration in the representation of datasets. Kim et al. (2021) use message-passing to learn the topology of data in online learning. Ebert et al. (2012) use message-passing based on feature distance only for performing graph-based density sampling during active learning. Hongjin et al. (2022) construct a sparse graph from the $k$ -nearest neighbors of in-context examples and down-weight the selected example's connected nodes, which is similar to the reverse message-passing step in $\mathbb{D}^2$ PRUNING. In contrast, we initialize the nodes with sample importance scores and first use forward message-passing to merge the influence of importance score and density of local neighborhood to rank samples in $\mathbb{D}^2$ PRUNING.
194
+
195
+ # 7 CONCLUSION
196
+
197
+ We introduce a novel coreset selection algorithm, $\mathbb{D}^2$ PRUNING, based on message-passing within a graph representing the dataset. Our algorithm combines data diversity and difficulty to select a coreset that outperforms existing coreset selection methods at low-to-medium pruning rates on multiple vision and NLP benchmarks, and can be adapted into self-supervised, unsupervised data selection.
198
+
199
+ Reproducibility. We report the training hyperparameters used for our best models, as well as the best hyperparameters for $\mathbb{D}^2$ PRUNING (see Sec. 3 and a discussion in Sec. 5.2) in the Appendix. The code for running the experiments in our paper is available as part of the supplementary submission. All datasets used in our experiments are openly available.
200
+
201
+ # REFERENCES
202
+
203
+ Amro Abbas, Kushal Tirumala, Daniel Simig, Surya Ganguli, and Ari S Morcos. Semdedup: Data-efficient learning at web-scale through semantic dedduplication. arXiv preprint arXiv:2303.09540, 2023.
204
+ Jordan T Ash, Chicheng Zhang, Akshay Krishnamurthy, John Langford, and Alekh Agarwal. Deep batch active learning by diverse, uncertain gradient lower bounds. In International Conference on Learning Representations, 2019.
205
+ Olivier Bachem, Mario Lucic, and Andreas Krause. Coresets for nonparametric estimation-the case of dp-means. In International Conference on Machine Learning, pp. 209-217. PMLR, 2015.
206
+ Robert Baldock, Hartmut Maennel, and Behnam Neyshabur. Deep learning through the lens of example difficulty. Advances in Neural Information Processing Systems, 34:10876-10889, 2021.
207
+ Samuel Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. A large annotated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pp. 632-642, 2015.
208
+ Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. Advances in neural information processing systems, 33:9912-9924, 2020.
209
+ Kwan Ho Ryan Chan, Yaodong Yu, Chong You, Haozhi Qi, John Wright, and Yi Ma. Redunet: A white-box deep network from the principle of maximizing rate reduction. The Journal of Machine Learning Research, 23(1):4907-5009, 2022.
210
+ Yutian Chen, Max Welling, and Alex Smola. Super-samples from kernel herding. In Proceedings of the Twenty-Sixth Conference on Uncertainty in Artificial Intelligence, pp. 109-116, 2010.
211
+ Cody Coleman, Christopher Yeh, Stephen Mussmann, Baharan Mirzasoleiman, Peter Bailis, Percy Liang, Jure Leskovec, and Matei Zaharia. Selection via proxy: Efficient data selection for deep learning. In International Conference on Learning Representations, 2019.
212
+ Arnav Mohanty Das, Gantavya Bhatt, Megh Manoj Bhalerao, Vianne R Gao, Rui Yang, and Jeff Bilmes. Accelerating batch active learning using continual learning techniques. Transactions on Machine Learning Research, 2023.
213
+ Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009.
214
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
215
+ Sandra Ebert, Mario Fritz, and Bernt Schiele. Ralf: A reinforced active learning formulation for object class recognition. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, pp. 3626-3633. IEEE, 2012.
216
+ Mohsen Fayyaz, Ehsan Aghazadeh, Ali Modarressi, Mohammad Taher Pilehvar, Yadollah Yaghoobzadeh, and Samira Ebrahimi Kahou. Bert on a data diet: Finding important examples by gradient-based pruning. arXiv preprint arXiv:2211.05610, 2022.
217
+ Dan Feldman, Matthew Faulkner, and Andreas Krause. Scalable training of mixture models via coresets. Advances in neural information processing systems, 24, 2011.
218
+
219
+ Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. Datacomp: In search of the next generation of multimodal datasets. arXiv preprint arXiv:2304.14108, 2023.
220
+ Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020.
221
+ Johannes Gasteiger, Janek Groß, and Stephan Günnmann. Directional message passing for molecular graphs. In International Conference on Learning Representations, 2020.
222
+ Justin Gilmer, Samuel S Schoenholz, Patrick F Riley, Oriol Vinyals, and George E Dahl. Neural message passing for quantum chemistry. In International conference on machine learning, pp. 1263-1272. PMLR, 2017.
223
+ Chengcheng Guo, Bo Zhao, and Yanbing Bai. Deepcore: A comprehensive library for coreset selection in deep learning. In Database and Expert Systems Applications: 33rd International Conference, DEXA 2022, Vienna, Austria, August 22-24, 2022, Proceedings, Part I, pp. 181-195. Springer, 2022.
224
+ Will Hamilton, Zhitao Ying, and Jure Leskovec. Inductive representation learning on large graphs. Advances in neural information processing systems, 30, 2017.
225
+ Xiaochuang Han and Yulia Tsvetkov. Influence tuning: Demoting spurious correlations via instance attribution and instance-driven updates. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pp. 4398-4409, 2021.
226
+ SU Hongjin, Jungo Kasai, Chen Henry Wu, Weijia Shi, Tianlu Wang, Jiayi Xin, Rui Zhang, Mari Ostendorf, Luke Zettlemoyer, Noah A Smith, et al. Selective annotation makes language models better few-shot learners. In The Eleventh International Conference on Learning Representations, 2022.
227
+ Rishabh Iyer, Ninad Khargoankar, Jeff Bilmes, and Himanshu Asanani. Submodular combinatorial information measures with applications in machine learning. In Algorithmic Learning Theory, pp. 722-754. PMLR, 2021.
228
+ Ziheng Jiang, Chiyuan Zhang, Kunal Talwar, and Michael C Mozer. Characterizing structural regularities of labeled data in overparameterized models. In International Conference on Machine Learning, pp. 5034-5044. PMLR, 2021.
229
+ Jeff Johnson, Matthijs Douze, and Hervé Jégou. Billion-scale similarity search with GPUs. IEEE Transactions on Big Data, 7(3):535-547, 2019.
230
+ Siddharth Joshi and Baharan Mirzasoleiman. Data-efficient contrastive self-supervised learning: Most beneficial examples for supervised learning contribute the least. In International conference on machine learning, pp. 15356-15370. PMLR, 2023.
231
+ Jean Kaddour. The minipile challenge for data-efficient language models. arXiv preprint arXiv:2304.08442, 2023.
232
+ Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020.
233
+ Kirshnateja Killamsetty, Alexandre V Evfimievski, Tejaswini Pedapati, Kiran Kate, Lucian Popa, and Rishabh Iyer. Milo: Model-agnostic subset selection framework for efficient model training and tuning. arXiv preprint arXiv:2301.13287, 2023.
234
+ Krishnateja Killamsetty, Sivasubramanian Durga, Ganesh Ramakrishnan, Abir De, and Rishabh Iyer. Grad-match: Gradient matching based data subset selection for efficient deep model training. In International Conference on Machine Learning, pp. 5464-5474. PMLR, 2021a.
235
+
236
+ Krishnateja Killamsetty, Durga Sivasubramanian, Ganesh Ramakrishnan, and Rishabh Iyer. Glister: Generalization based data subset selection for efficient and robust learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 8110-8118, 2021b.
237
+ Taehyeong Kim, Injune Hwang, Hyundo Lee, Hyunseo Kim, Won-Seok Choi, Joseph J Lim, and Byoung-Tak Zhang. Message passing adaptive resonance theory for online active semi-supervised learning. In International Conference on Machine Learning, pp. 5519-5529. PMLR, 2021.
238
+ Yeachan Kim and Bonggun Shin. In defense of core-set: A density-aware core-set selection for active learning. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pp. 804-812, 2022.
239
+ Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009.
240
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019.
241
+ Andrew Maas, Raymond E Daly, Peter T Pham, Dan Huang, Andrew Y Ng, and Christopher Potts. Learning word vectors for sentiment analysis. In Proceedings of the 49th annual meeting of the association for computational linguistics: Human language technologies, pp. 142-150, 2011.
242
+ Pratyush Maini, Sachin Goyal, Zachary C Lipton, J Zico Kolter, and Aditi Raghunathan. T-mars: Improving visual representations by circumventing text feature learning. arXiv preprint arXiv:2307.03132, 2023.
243
+ Baharan Mirzasoleiman, Jeff Bilmes, and Jure Leskovec. Coresets for data-efficient training of machine learning models. In International Conference on Machine Learning, pp. 6950-6960. PMLR, 2020.
244
+ Yixin Nie, Adina Williams, Emily Dinan, Mohit Bansal, Jason Weston, and Douwe Kiela. Adversarial nli: A new benchmark for natural language understanding. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 4885-4901, 2020.
245
+ Eric W Noreen. Computer-intensive methods for testing hypotheses. Wiley New York, 1989.
246
+ Dongmin Park, Dimitris Papailiopoulos, and Kangwook Lee. Active learning is a strong baseline for data subset selection. In Has it Trained Yet? NeurIPS 2022 Workshop, 2022.
247
+ Mansheej Paul, Surya Ganguli, and Gintare Karolina Dziugaite. Deep learning on a data diet: Finding important examples early in training. Advances in Neural Information Processing Systems, 34: 20596-20607, 2021.
248
+ Geoff Pleiss, Tianyi Zhang, Ethan Elenberg, and Kilian Q Weinberger. Identifying mislabeled data using the area under the margin ranking. Advances in Neural Information Processing Systems, 33: 17044-17056, 2020.
249
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021.
250
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. The Journal of Machine Learning Research, 21(1):5485-5551, 2020.
251
+ HSVNS Kowndinya Renduchintala, Krishnateja Killamsetty, Sumit Bhatia, Milan Aggarwal, Ganesh Ramakrishnan, Rishabh Iyer, and Balaji Krishnamurthy. Ingenious: Using informative data subsets for efficient pre-training of large language models. arXiv preprint arXiv:2305.06677, 2023.
252
+ Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A core-set approach. In International Conference on Learning Representations, 2018.
253
+
254
+ Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Y Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pp. 1631-1642, 2013.
255
+ Ben Sorscher, Robert Geirhos, Shashank Shekhar, Surya Ganguli, and Ari Morcos. Beyond neural scaling laws: beating power law scaling via data pruning. Advances in Neural Information Processing Systems, 35:19523-19536, 2022.
256
+ Swabha Swayamdipta, Roy Schwartz, Nicholas Lourie, Yizhong Wang, Hannaneh Hajishirzi, Noah A Smith, and Yejin Choi. Dataset cartography: Mapping and diagnosing datasets with training dynamics. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 9275-9293, 2020.
257
+ Robert J Tibshirani and Bradley Efron. An introduction to the bootstrap. Monographs on statistics and applied probability, 57:1-436, 1993.
258
+ Mariya Toneva, Alessandro Sordoni, Remi Tachet des Combes, Adam Trischler, Yoshua Bengio, and Geoffrey J Gordon. An empirical study of example forgetting during deep neural network learning. In International Conference on Learning Representations, 2018.
259
+ Peter Turney. Thumbs up or thumbs down? semantic orientation applied to unsupervised classification of reviews. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 417-424, 2002.
260
+ Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008.
261
+ Shikhar Vashisht, Prateek* Yadav, Manik* Bhandari, Piyush Rai, Chiranjib Bhattacharyya, and Partha Talukdar. Incorporating syntactic and semantic information in word embeddings using graph convolutional networks. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 3308-3318, Florence, Italy, July 2019a. Association for Computational Linguistics. doi: 10.18653/v1/P19-1320. URL https://aclanthology.org/P19-1320.
262
+ Shikhar* Vashishth, Prateek* Yadav, Manik Bhandari, and Partha Talukdar. Confidence-based graph convolutional networks for semi-supervised learning. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics, volume 89 of Proceedings of Machine Learning Research, pp. 1792-1801. PMLR, 16-18 Apr 2019b. URL https://proceedings.mlr.press/v89/vashishth19a.html.
263
+ Kai Wei, Rishabh Iyer, and Jeff Bilmes. Submodularity in data subset selection and active learning. In International conference on machine learning, pp. 1954-1963. PMLR, 2015.
264
+ Max Welling. Herding dynamical weights to learn. In Proceedings of the 26th Annual International Conference on Machine Learning, pp. 1121-1128, 2009.
265
+ Xiaobo Xia, Jiale Liu, Jun Yu, Xu Shen, Bo Han, and Tongliang Liu. Moderate coreset: A universal method of data selection for real-world data-efficient deep learning. In The Eleventh International Conference on Learning Representations, 2023.
266
+ Naganand Yadati, Madhav Nimishakavi, Prateek Yadav, Vikram Nitin, Anand Louis, and Partha Talukdar. Hypergen: A new method for training graph convolutional networks on hypergraphs. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper/2019/file/1efa39bcaec6f3900149160693694536-Paper.pdf.
267
+ Prateek Yadav, Madhav Nimishakavi, Naganand Yadati, Shikhar Vashishth, Arun Rajkumar, and Partha Talukdar. Lovasz convolutional networks. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics, volume 89 of Proceedings of Machine Learning Research, pp. 1978–1987. PMLR, 16–18 Apr 2019. URL https://proceedings.mlr.press/v89/yadav19a.html.
268
+
269
+ Shuo Yang, Zeke Xie, Hanyu Peng, Min Xu, Mingming Sun, and Ping Li. Dataset pruning: Reducing training data by examining generalization influence. arXiv preprint arXiv:2205.09329, 2022.
270
+ Yu Yang, Hao Kang, and Baharan Mirzasoleiman. Towards sustainable learning: Coresets for data-efficient deep learning. In International Conference on Machine Learning, pp. 39314-39330. PMLR, 2023.
271
+ Yaodong Yu, Kwan Ho Ryan Chan, Chong You, Chaobing Song, and Yi Ma. Learning diverse and discriminative representations via the principle of maximal coding rate reduction. Advances in Neural Information Processing Systems, 33:9422-9434, 2020.
272
+ Yu Yu, Shahram Khadivi, and Jia Xu. Can data diversity enhance learning generalization? In Proceedings of the 29th international conference on computational linguistics, pp. 4933-4945, 2022.
273
+ Xiang Zhang, Junbo Zhao, and Yann LeCun. Character-level convolutional networks for text classification. Advances in neural information processing systems, 28, 2015.
274
+ Haizhong Zheng, Rui Liu, Fan Lai, and Atul Prakash. Coverage-centric coreset selection for high pruning rates. arXiv preprint arXiv:2210.15809, 2022.
275
+ Tianyi Zhou and Jeff Bilmes. Minimax curriculum learning: Machine teaching with desirable difficulties and scheduled diversity. In International conference on learning representations, 2018.
276
+ Tianyi Zhou, Shengjie Wang, and Jeffrey Bilmes. Curriculum learning by dynamic instance hardness. Advances in Neural Information Processing Systems, 33:8602-8613, 2020.
277
+
278
+ # OVERVIEW
279
+
280
+ The appendix is organized as follows:
281
+
282
+ Section A: Details of the datasets, baselines and the best hyperparameters for our models.
283
+
284
+ Section B: Computational complexity of $\mathbb{D}^2$ PRUNING for all datasets.
285
+
286
+ Section C: Additional results for high pruning rates and ablation experiments.
287
+
288
+ Section D: Qualitative analysis of coresets selected via $\mathbb{D}^2$ PRUNING.
289
+
290
+ Section E: Limitations and license.
291
+
292
+ # A DATASETS & HYPERPARAMETERS
293
+
294
+ # A.1 DATASETS
295
+
296
+ Vision Benchmarks. We use the CIFAR10, CIFAR100 (Krizhevsky et al., 2009) and ImageNet-1K (Deng et al., 2009) image classification datasets for our experiments on vision benchmarks. The CIFAR10 dataset consists of 60000 32x32 color images for 10 classes, with 6000 images per class. The training and test splits contain 50000 and 10000 images respectively. The CIFAR100 dataset has 100 classes containing 500 and 100 images per class in the training and test splits respectively. Details about the class labels in CIFAR10, CIFAR100 datasets can be found here. The ImageNet-1K dataset comprises approximately 1.2 million real-world images distributed over 1000 object classes. It contains 1,281,167 and 50,000 images in training and validation splits respectively.
297
+
298
+ NLP Benchmarks. We select two popularly used NLP tasks i.e. natural language inference (NLI) (Bowman et al., 2015) and sentiment analysis (Turney, 2002). For natural language inference, we use the Adversarial NLI dataset (Nie et al., 2020) that has been created in an iterative human-and-model-in-the-loop adversarial procedure. During each iteration, human annotators are instructed to devise examples that the current best models are unable to answer correctly. The models are trained on these challenging annotations for stronger performance. Multiple rounds of such iterations result in a challenging NLI benchmark. We use the data created in the third (and final) round of this process which contains 100459, 1200, and 1200 examples in the training, development, and test splits respectively. We use the ImDB reviews dataset (Maas et al., 2011) for the sentiment analysis task. The original dataset contains 25000 examples each in the training and test splits and is a binary
299
+
300
+ Table 4: Best values of nearest-neighbors $(k)$ and reverse message passing weight $(\gamma_r)$ for vision datasets. See a discussion on these hyperparameters in Sec. 5.2.
301
+
302
+ <table><tr><td>Dataset (→)</td><td colspan="6">CIFAR10</td><td colspan="6">CIFAR100</td><td colspan="6">ImageNet-1K</td></tr><tr><td>Pruning Rate (→)</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td></tr><tr><td>Nearest Neighbors (k)</td><td>-</td><td>10</td><td>5</td><td>1</td><td>2</td><td>2</td><td>-</td><td>10</td><td>10</td><td>10</td><td>5</td><td>15</td><td>-</td><td>50</td><td>50</td><td>100</td><td>10</td><td>10</td></tr><tr><td>Reverse Message Passing (γr)</td><td>-</td><td>0.9</td><td>1.0</td><td>0.1</td><td>0.0</td><td>0.0</td><td>-</td><td>0.9</td><td>0.8</td><td>0.3</td><td>0.3</td><td>0.0</td><td>-</td><td>1.0</td><td>1.0</td><td>0.3</td><td>0.1</td><td>0.0</td></tr></table>
303
+
304
+ Table 5: Best values of nearest-neighbors $(k)$ and reverse message passing weight $(\gamma_r)$ for NLP datasets and self-supervised $\mathbb{D}^2$ PRUNING of ImageNet-1K. See details in Sec. 5.2.
305
+
306
+ <table><tr><td>Dataset (→)</td><td colspan="6">Adversarial NLI</td><td colspan="6">ImDB(2K)</td><td colspan="6">ImageNet-1K (self-supervised)</td></tr><tr><td>Pruning Rate (→)</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td></tr><tr><td>Nearest Neighbors (k)</td><td>-</td><td>15</td><td>10</td><td>5</td><td>5</td><td>5</td><td>-</td><td>10</td><td>10</td><td>10</td><td>5</td><td>2</td><td>-</td><td>50</td><td>100</td><td>25</td><td>10</td><td>25</td></tr><tr><td>Reverse Message Passing (γr)</td><td>-</td><td>1.0</td><td>1.0</td><td>0.1</td><td>0.1</td><td>0.0</td><td>-</td><td>1.0</td><td>0.8</td><td>0.3</td><td>0.0</td><td>0.0</td><td>-</td><td>1.0</td><td>1.0</td><td>0.5</td><td>0.5</td><td>0.0</td></tr></table>
307
+
308
+ classification dataset. Our experiments showed that models trained on $10\%$ of this dataset achieved nearly the same performance as $100\%$ of the dataset. We observed similar trends for other popular sentiment analysis benchmarks as well such as Yelp Reviews (Zhang et al., 2015), SST2 (Socher et al., 2013) etc. Hence, we created an in-house version of the ImDB Reviews dataset that contains 2000, and 1000 samples in the training and development splits respectively, that are randomly selected from the original training set. We retain the original test split containing 25000 samples for evaluation in our experiments.
309
+
310
+ # A.2 BASELINES
311
+
312
+ (Supervised) We compare $\mathbb{D}^2$ PRUNING with several score-based and geometry-based coreset selection methods derived from the training dynamics of a model trained on the full dataset as discussed in Zheng et al. (2022): A) Random selection of examples. B) Entropy (Coleman et al., 2019) of a model's prediction vector. C) Forgetting (Toneva et al., 2018) score for each example i.e., the number of times a model predicts the example incorrectly after having predicted correctly in the previous epoch. D) EL2N (Paul et al., 2021) i.e. L2 norm of error vectors. E) Area under the margin (Pleiss et al., 2020) score that measures the gap between the prediction probability of the correct target and the next highest probability target. E) Moderate coresets (Xia et al., 2023) that selects samples at median distance from class center, F) Coverage-based Coreset Selection (CCS) (Zheng et al., 2022) that divides a range of difficulty scores into equal-sized bins and randomly samples from each bin, and is state-of-art for high pruning rates, G) $\mathbf{CCS} + \mathbf{k}$ -Center, where k-center samples are selected within each CCS bin, H) BADGE that selects diverse samples using k-means++ in the gradient vector space, I) GLISTER (Killamsetty et al., 2021b) uses bi-level optimization to select robust coresets, J) CAL-SDS2 (Das et al., 2023) combines a faculty location submodular function (Iyer et al., 2021) with entropy scores to unify the effects of difficulty score and diversity, and J) INGENIOUS (Renduchintala et al., 2023), a diversity-only approach that uses facility location as the information gain function for NLP tasks. (Unsupervised) We compare $\mathbb{D}^2$ PRUNING with A) Prototypicality (Sorscher et al., 2022) that uses self-supervised embeddings to compute k-means clusters and treats samples at a farther distance from the cluster center as more important, B) CCS over prototypicality scores, and C) Moderate coreset selection (Xia et al., 2023) over the self-supervised embeddings.
313
+
314
+ # A.3 TRAINING HYPERPARAMETERS
315
+
316
+ **Coreset Selection.** We use the recommended hyperparameters in Zheng et al. (2022) for experiments using Coverage-based coreset selection (CCS) i.e. 50 bins (or strata) for all pruning rates. Models trained on vision datasets are also subjected to a hard cutoff rate $\beta$ on the difficulty score for eliminating outliers or erroneous samples (see Zheng et al. (2022) for the values). We report the best hyperparameters for $\mathbb{D}^2$ PRUNING in Tabs. 4& 5.
317
+
318
+ Models. We follow the best training hyperparameters for ResNet18 model and ResNet34 models as suggested in Zheng et al. (2022) to remain cmparable to the numbers reported in their work. For fine-tuning of pretrained RoBERTa on NLP datasets, we perform a grid search over learning
319
+
320
+ rates $\{1e^{-5}, 2e^{-5}, 5e^{-5}, 1e^{-4}\}$ and batch sizes $\{8, 16, 32\}$ using $100\%$ of the data, which results in learning rate of $1e^{-4}$ and batch size of 32 for Adversarial NLI, ImDB (2k) datasets. Models are trained on pruned datasets using the same hyperparameters that are used for training $100\%$ of the data. The maximum number of training steps is kept constant across all pruning rates. RoBERTa models are trained for 10000 and 1500 training steps for Adversarial NLI and ImDB (2k) datasets respectively, with early stopping.
321
+
322
+ Algorithm 1 $\mathbb{D}^2$ PRUNING for Data Selection
323
+ 1: if selection = supervised then
324
+ 2: Input Data: D = < x, y >
325
+ 3: else
326
+ 4: Input Data: D = < x >
327
+ 5: end if
328
+ 6: Validation Data: D_val = < x, y >
329
+ 7: Input Data: D_test = < x, y >
330
+ 8:
331
+ 9: Train:
332
+ 10: $\theta_t$ ← initialize trainable parameters
333
+ 11: for epoch = 1, 2, ..., N do
334
+ 12: Train $\theta_t$ on D
335
+ 13: end for
336
+ 14:
337
+ 15: Optimize Data Selection using $\mathbb{D}^2$ PRUNING:
338
+ 16: $\{\mathbf{k}\} \gets$ grid search values for nearest-neighbor hyperparameter in $\mathbb{D}^2$ PRUNING
339
+ 17: $\{\gamma_r\} \gets$ grid search values for reverse message passing hyperparameter in $\mathbb{D}^2$ PRUNING
340
+ 18: $|\mathbf{D}_s|\gets$ number of samples to be selected
341
+ 19: for $k$ in $\{\mathbf{k}\}$ do
342
+ 20: for $\gamma_r$ in $\{\gamma_r\}$ do
343
+ 21:
344
+ 22: Select $\mathbf{D}_s \subset \mathbf{D}$ using $\mathbb{D}^2$ PRUNING:
345
+ 23: $\mathcal{G} \gets$ initialize graph in $\mathbb{D}^2$ PRUNING using $k, \gamma_r, \theta_t$
346
+ 24: for $d = 1, 2, \dots, D$ do
347
+ 25: Perform forward message passing
348
+ 26: end for
349
+ 27: for $i = 1, 2, \dots, |\mathbf{D}_s|$ do
350
+ 28: Select sample with highest node feature and add to $\mathbf{D}_s$
351
+ 29: Downweight neighbors of selected sample
352
+ 30: end for
353
+ 31: Obtain labels for $\mathbf{D}_s$
354
+ 32:
355
+ 33: Train on $\mathbf{D}_s$ :
356
+ 34: $\theta_v \gets$ initialize trainable parameters
357
+ 35: for epoch = 1, 2, ..., N do
358
+ 36: Train $\theta_v$ on $\mathbf{D}_s$
359
+ 37: end for
360
+ 38: Evaluate $\theta_v$ on $\mathbf{D}_{val}$
361
+ 39:
362
+ 40: end for
363
+ 41: end for
364
+ 42:
365
+ 43: Evaluate best $\theta_v$ on $\mathbf{D}_{test}$
366
+
367
+ # B COMPUTATIONAL COMPLEXITY OF $\mathbb{D}^2$ PRUNING
368
+
369
+ We divide the runtime of $\mathbb{D}^2$ PRUNING into: (1) Graph creation which includes graph initialization and forward message passing, (2) Iterative selection (see Sec. 3) and present results in Tab. 6 for $100\%$ data selection of the various datasets used in our experiments. Numbers are rounded to the nearest minute. Runtime for iterative selection is proportional to the size of the coreset being selected. Hence, in practice, the runtime for iterative selection is even lower since we only select a subset of the data in our experiments. Time estimates of graph creation for different datasets are not strictly comparable because we run jobs of different batch sizes according to the size of the dataset to prevent
370
+
371
+ Table 6: Computational Overhead for $\mathbb{D}^2$ PRUNING. Comparison of runtime of $\mathbb{D}^2$ PRUNING for $100\%$ selection of the various datasets in our experiments. $\mathbb{D}^2$ PRUNING can be divided into the 'Graph creation' and 'Iterative selection' steps (see General Response). Larger datasets like DataComp have a 'faiss indexing' step to enable fast nearest-neighbor lookup. Results are computed using a multi-thread implementation of $\mathbb{D}^2$ PRUNING using 8 workers on a CPU with 32 cores.
372
+
373
+ <table><tr><td>Dataset (→)</td><td>CIFAR10</td><td>CIFAR100</td><td>Adv. NLI</td><td>ImDB</td><td>DataComp</td><td>ImageNet-1K</td></tr><tr><td>faiss indexing</td><td>-</td><td>-</td><td>-</td><td>-</td><td>25m</td><td>-</td></tr><tr><td>Graph creation</td><td>2m</td><td>1m</td><td>4m</td><td>1m</td><td>30m</td><td>15m</td></tr><tr><td>Iterative selection</td><td>1m</td><td>1m</td><td>2m</td><td>1m</td><td>7m</td><td>8m</td></tr><tr><td>Total selection time</td><td>3m</td><td>2m</td><td>6m</td><td>2m</td><td>1h 2m</td><td>23m</td></tr><tr><td>Training time</td><td>4h 30m</td><td>4h 45m</td><td>2h</td><td>15m</td><td>4h 15m</td><td>125h</td></tr></table>
374
+
375
+ Table 7: Results on Vision Datasets. Comparison of performance (acc.) of $\mathbb{D}^2$ PRUNING with existing coreset selection methods for very high pruning rates on CIFAR10, CIFAR100 using ResNet18, and ImageNet-1k using ResNet34 models. Higher is better.
376
+
377
+ <table><tr><td rowspan="2">Dataset (→)Pruning Rate (→)</td><td colspan="6">CIFAR10</td><td colspan="6">CIFAR100</td><td colspan="6">ImageNet-1K</td></tr><tr><td>0%</td><td>90%</td><td>95%</td><td>99%</td><td>99.5%</td><td>99.9%</td><td>0%</td><td>90%</td><td>95%</td><td>99%</td><td>99.5%</td><td>99.9%</td><td>0%</td><td>90%</td><td>95%</td><td>99%</td><td>99.5%</td><td>99.9%</td></tr><tr><td>Random</td><td>95.5</td><td>79.0</td><td>70.0</td><td>39.8</td><td>35.8</td><td>23.8</td><td>78.7</td><td>44.8</td><td>28.7</td><td>10.8</td><td>6.13</td><td>3.5</td><td>73.1</td><td>52.3</td><td>41.1</td><td>9.6</td><td>4.1</td><td>0.9</td></tr><tr><td>CCS (Zheng et al., 2022)</td><td>-</td><td>86.9</td><td>77.2</td><td>41.8</td><td>33.0</td><td>25.7</td><td>-</td><td>57.3</td><td>36.9</td><td>13.5</td><td>8.8</td><td>3.6</td><td>-</td><td>57.3</td><td>45.9</td><td>10.1</td><td>6.2</td><td>1.1</td></tr><tr><td>D2PRUNING</td><td>-</td><td>87.1</td><td>74.5</td><td>44.4</td><td>33.8</td><td>24.6</td><td>-</td><td>56.9</td><td>35.8</td><td>14.2</td><td>5.9</td><td>2.4</td><td>-</td><td>55.6</td><td>44.8</td><td>7.6</td><td>7.2</td><td>1.9</td></tr></table>
378
+
379
+ OOM issues when computing similarity matrix. Additionally, we provide the approximate training times for each dataset computed on a single A100 GPU.
380
+
381
+ # C ADDITIONAL RESULTS
382
+
383
+ # C.1 RESULTS ON HIGH PRUNING RATES
384
+
385
+ We perform coreset selection at very high pruning rates (Guo et al., 2022) for CIFAR10, CIFAR100 and ImageNet-1K using $\mathbb{D}^2$ PRUNING and a select few baselines, and present results in Tab. 7. $\mathbb{D}^2$ PRUNING outperforms random selection as well as CCS (Zheng et al., 2022) in some scenarios, such as by $3\%$ and $0.5\%$ at $99\%$ and $99.5\%$ pruning of CIFAR10 respectively. However, we do not see any consistent trends in improvement using $\mathbb{D}^2$ PRUNING at very high pruning rates, especially for ImageNet-1K. Improvement margins using CCS also go down at high pruning rates, suggesting that diversity is not as important as difficulty when the data budget is extremely low (Sorscher et al., 2022).
386
+
387
+ # C.2 ABLATION EXPERIMENTS
388
+
389
+ Multiple message passing iterations. To better understand the effect of the forward message passing procedure in $\mathbb{D}^2$ PRUNING, we visualize a random subset of CIFAR10 samples in a 2-dimensional t-SNE (Van der Maaten & Hinton, 2008) embedding space in Fig. 5 before and after forward message passing under various scenarios. When the graph is first initialized in $\mathbb{D}^2$ PRUNING, the node feature is initialized with the sample's importance score (see Fig. 5A). A single iteration of a forward message passing over the local neighborhood of a sample consisting of $k$ nearest neighbors leads to the significant up-weighting of neighbors of a very important node. Thus, a higher $k$ leads up-weighting of a larger neighborhood of samples in the spatial dimension (see Fig. 5B vs. Fig. 5C). Consequently, the distribution of normalized node feature values has a heavier tail with increasing $k$ , as compared to the distribution of original importance scores. In contrast, multiple iterations of message passing at the same $k$ have an effect similar to that of Gaussian smoothing in the embedding space (see Fig. 5D). With increasing iterations, the local neighborhood of a node becomes increasingly similar to that of other nodes in the graph, and hence, all nodes receive similar updates (see Fig. 5D vs. Fig. 5E). As a result, the distribution of node features is biased towards a narrow spectrum of values that no longer benefits the data selection task (see results in Tab. 8 in Appendix).
390
+
391
+ ![](images/bb879b4106d1c19992ba71c176b970dbe072920b357d92ea46d2aacfdd850658.jpg)
392
+ Figure 5: Effect of forward message passing iterations. (top) Scatter plots of CIFAR10 samples' normalized node feature values in a t-SNE embedding space $(dim = 2)$ and (bottom) corresponding histograms for the following scenarios: (A) Initialization of graph in $\mathbb{D}^2$ PRUNING, one-shot forward message passing with $k = 5$ (B) and $k = 10$ (C) nearest neighbors, and two-shot (D), three-shot (E) forward message passing at $k = 10$ .
393
+
394
+ Table 8: Ablation Analysis. Results from ablations of $\mathbb{D}^2$ PRUNING by varying the embedding representation (rows A-B), difficulty score (rows C-F), and multiple iterations of message passing (rows H-J) on CIFAR10 and Adversarial NLI datasets. Higher is better.
395
+
396
+ <table><tr><td rowspan="2">Dataset (→)
397
+ Pruning Rate (→)</td><td colspan="6">CIFAR10</td><td colspan="6">Adversarial NLI</td></tr><tr><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td><td>0%</td><td>30%</td><td>50%</td><td>70%</td><td>80%</td><td>90%</td></tr><tr><td>A. Last Layer / [CLS]</td><td>95.5</td><td>95.7</td><td>94.9</td><td>93.3</td><td>91.4</td><td>87.1</td><td>48.8</td><td>48.9</td><td>46.7</td><td>45.3</td><td>44.5</td><td>40.3</td></tr><tr><td>B. Pre-final convolutional / non-[CLS]</td><td>-</td><td>95.1</td><td>94.9</td><td>92.7</td><td>90.5</td><td>85.4</td><td>-</td><td>45.3</td><td>45.3</td><td>42.7</td><td>42.8</td><td>39.1</td></tr><tr><td>C. Forgetting / Variance</td><td>95.5</td><td>95.7</td><td>94.9</td><td>93.3</td><td>91.4</td><td>87.1</td><td>48.8</td><td>48.9</td><td>46.7</td><td>45.3</td><td>44.5</td><td>40.3</td></tr><tr><td>D. Entropy</td><td>-</td><td>95.6</td><td>94.6</td><td>93.8</td><td>91.9</td><td>87.3</td><td>-</td><td>48.5</td><td>46.1</td><td>45.4</td><td>44.2</td><td>40.1</td></tr><tr><td>E. EL2N</td><td>-</td><td>94.9</td><td>94.2</td><td>93.1</td><td>91.1</td><td>86.0</td><td>-</td><td>49.3</td><td>47.9</td><td>45.2</td><td>44.3</td><td>40.3</td></tr><tr><td>F. 1-shot</td><td>95.5</td><td>95.7</td><td>94.9</td><td>93.3</td><td>91.4</td><td>87.1</td><td>48.8</td><td>48.9</td><td>46.7</td><td>45.3</td><td>44.5</td><td>40.3</td></tr><tr><td>G. 2-shot</td><td>-</td><td>94.0</td><td>94.1</td><td>89.9</td><td>89.2</td><td>85.6</td><td>-</td><td>47.4</td><td>45.2</td><td>44.9</td><td>43.1</td><td>40.4</td></tr></table>
398
+
399
+ Effect of importance scores. We experiment with different importance scores in $\mathbb{D}^2$ PRUNING and present results in Tab. 8 in Appendix. We find that the entropy score (Coleman et al., 2019) benefits performance on CIFAR10 at higher pruning rates, whereas EL2N (Paul et al., 2021) benefits performance on Adversarial NLI for low pruning rates. Importantly, we do not see large drops in performance with any of these score functions, suggesting that the idea of combining diversity and difficulty in $\mathbb{D}^2$ PRUNING is universally beneficial. Further, improved difficulty metrics can be paired with $\mathbb{D}^2$ PRUNING for larger improvements in data selection.
400
+
401
+ Effect of embedding sources. Next, we experiment with alternative sources of feature embeddings for measuring the distance between two samples. Since final layers in a task-specific model are known to be attuned to the task (Han & Tsvetkov, 2021), instead, we extract features from the last convolutional layer in ResNet18 for CIFAR10 and use the average of non-[CLS] tokens in RoBERTa for ImDB dataset (row B). We find that neither source is as effective as the features extracted from the last layer of the model trained on the full dataset. Especially, we see large drops in performance on the use of non-[CLS] token features for representing diversity. We leave the study of the utility of different embedding spaces for measuring diversity to future work.
402
+
403
+ # D ANALYSIS & DISCUSSION
404
+
405
+ Qualitative analysis of coresets selected by $\mathbb{D}^2$ PRUNING. In order to perform a qualitative analysis of the merits of $\mathbb{D}^2$ PRUNING, we first use the connectivity graph $\mathcal{G}$ to extract meaningful sub-populations from the entire ImageNet-1K dataset. For each sample, we recursively seek nearest neighbors that are situated at a distance in the embedding space that is less than a predefined threshold. Next, for each of these sub-populations, we differentiate the samples that appear in the coreset selected by $\mathbb{D}^2$ PRUNING at $30\%$ pruning of ImageNet-1K. We present and analyze a few representative sub-populations in Fig. 6. First, we observe several cases where $\mathbb{D}^2$ PRUNING successfully avoids selecting perceptual duplicates (Abbas et al., 2023) in the coreset (see top left and middle left in
406
+
407
+ ![](images/1c6ad2ece393ff552b5cd2574a54fa559e7454cc647b597be91a69cd575116fd.jpg)
408
+ Figure 6: Example of coresets selected by $\mathbb{D}^2$ PRUNING from ImageNet-1K at $30\%$ pruning rate. Image sub-populations are extracted from ImageNet-1K by a recursive traversal of the connectivity graph $\mathcal{G}$ initialized for $\mathbb{D}^2$ PRUNING. For each sub-population, we show the images retained in the coreset with $\checkmark$ and the images left out of the coreset with X.
409
+ Fig. 6). Next, we see multiple cases where a composite image is selected for the coreset, and images that contain one or more of the subjects/objects in the selected image are left out (see middle right in Fig. 6). Finally, we find that relying on the semantic similarity of pretrained embeddings can lead to the propagation of errors, as seen in the sub-population on the bottom right in Fig. 6. The images that contain dolphins are left out of the coreset because of their similarity to an image depicting a water landscape.
410
+
411
+ Visualization of data distribution in coresets. We showcase the results of various sampling methods for a single class in the CIFAR10 dataset in Fig. 2. The features are obtained from a ResNet18 network trained on the full training dataset and compressed to two dimensions using PCA (90% explained variance) for simpler visualization. As seen in Fig. 2(b), random sampling leads to relatively larger samples from the denser region of the distribution and consequently, a higher percentage of easy samples feature in the coreset after 90% pruning. By optimizing for diversity only via facility location (Iyer et al., 2021) submodular optimization (Fig. 2(c)), the diversity of the coreset remains high but it is plagued with the same problem as random sampling i.e. easier samples are preferred. Alternatively, the use of graph-cut function with cosine similarity distance (Iyer et al., 2021) as the information gain function results in the selection of a narrow sliver of data from the 2-D space (Fig. 2(d)). Moderate coresets (Xia et al., 2023) also sample from a narrow area in the distribution, resulting in poor diversity, but a slightly better balance between easy and difficult samples (Fig. 2(e)). Finally, with our proposed method, the diversity remains high and the distribution of difficulty scores in the coreset is also balanced (Fig. 2(f)).
412
+
413
+ # E LIMITATIONS & LICENSE
414
+
415
+ # E.1 LIMITATIONS
416
+
417
+ Access to Full Dataset & Pretrained Model. Similar to the many previous coreset selection methods, our method relies on a model that has been pretrained or finetuned on the full dataset. We leverage the pretrained embeddings as well as the difficulty scores from this model. In doing so, we risk capturing the biases of the model. Further, one cannot use $\mathbb{D}^2$ PRUNING to create datasets from scratch and reduce annotation costs by avoiding redundant samples in the dataset. We note that an ideal data pruning method would not rely on access to the full dataset so that it can be used for creating challenging and effective datasets in a cost-effective manner. Our experiments in self-supervised and unsupervised data selection show promising results in this direction.
418
+
419
+ # E.2 LICENSE
420
+
421
+ We will publicly release our code and models. We use standard licenses from the community and provide the following links to the licenses for the datasets that we used in the project.
422
+
423
+ CIFAR10, CIFAR100: Other
424
+
425
+ Adversarial NLI: Creative Commons
426
+
427
+ ImDB Reviews: Other
428
+
429
+ Counterfactual ImDB, NLI: Apache
430
+
431
+ DataComp:MIT
2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2372116692b6823e09b21bdf724e8fd4be9eaa83094210025fa7ca43dbe286f5
3
+ size 736443
2024/$_mathbb{D}^2$ Pruning_ Message Passing for Balancing Diversity & Difficulty in Data Pruning/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_pi$2vec_ Policy Representation with Successor Features/bcc4039e-8090-4f89-a7c1-3608bf3d1ab2_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_pi$2vec_ Policy Representation with Successor Features/bcc4039e-8090-4f89-a7c1-3608bf3d1ab2_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$_pi$2vec_ Policy Representation with Successor Features/bcc4039e-8090-4f89-a7c1-3608bf3d1ab2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a11fbae1372a41c35e4ee9f3f211f03204d87d75a93edd08feb1dc093d685199
3
+ size 2389864
2024/$_pi$2vec_ Policy Representation with Successor Features/full.md ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # π2VEC: POLICY REPRESENTATION WITH SUCCESSOR FEATURES
2
+
3
+ Gianluca Scarpellini*†
4
+
5
+ Istituto Italiano di Tecnologia
6
+
7
+ Ksenia Konyushkova
8
+
9
+ Google DeepMind
10
+
11
+ Claudio Fantacci
12
+
13
+ Google DeepMind
14
+
15
+ Tom Le Paine
16
+
17
+ Google DeepMind
18
+
19
+ Yutian Chen
20
+
21
+ Google DeepMind
22
+
23
+ Misha Denil
24
+
25
+ Google DeepMind
26
+
27
+ †: Work done during an internship at Google DeepMind
28
+
29
+ *: Corresponding author gianluca.scarpellini@iit.it
30
+
31
+ # ABSTRACT
32
+
33
+ This paper introduces $\pi 2\mathbf{vec}$ , a method for representing black box policies as comparable feature vectors. Our method combines the strengths of foundation models that serve as generic and powerful state representations and successor features that can model the future occurrence of the states for a policy. $\pi 2\mathbf{vec}$ represents the behaviors of policies by capturing statistics of how the behavior evolves the features from a pretrained model, using a successor feature framework. We focus on the offline setting where both policies and their representations are trained on a fixed dataset of trajectories. Finally, we employ linear regression on $\pi 2\mathbf{vec}$ vector representations to predict the performance of held out policies. The synergy of these techniques results in a method for efficient policy evaluation in resource constrained environments.
34
+
35
+ # 1 INTRODUCTION
36
+
37
+ Robot time is an important bottleneck in applying reinforcement learning in real life robotics applications. Constraints on robot time have driven progress in sim2real, offline reinforcement learning (offline RL), and data efficient learning. However, these approaches do not address the problem of policy evaluation which is often time intensive as well. Various proxy metrics were introduced to eliminate the need for real robots in the evaluation. For example, in sim2real we measure the performance in simulation (Lee et al., 2021). In offline RL we rely on Off-policy Evaluation (OPE) methods (Gulcehre et al., 2020; Fu et al., 2021). For the purpose of deploying a policy in the real world, recent works focused on Offline Policy Selection (OPS), where the goal is to select the best performing policy relying only on offline data. While these methods are useful for determining coarse relative performance of policies, one still needs time on real robot for more reliable estimates (Levine et al., 2020).
38
+
39
+ Our proposed $\pi 2\mathrm{vec}$ aims at making efficient use of the evaluation time. Efficient offline policy evaluation and selection is relevant in reinforcement learning projects, where researchers often face the challenge of validating improvements. $\pi 2\mathrm{vec}$ enables researchers to make more informed decisions regarding which new policy iterations to prioritize for real-world testing or to identify and discard less promising options early in the development process. In particular, we predict the values of unknown policies from a set of policies with known values in an offline setting, where a large dataset of historical trajectories from other policies and human demonstrations is provided. The last step requires policies to be represented as vectors which are comparable and thus can serve as an input to the objective function. Prior work from Konyushova et al. (2021) represents policies by the actions that they take on a set of canonical states, under the assumption that similar actions in similar states imply similar behaviour. However, this assumption is sometimes violated in practice. This work aims at finding more suitable representation by characterizing the policies based on how they change the environment.
40
+
41
+ To represent policies, our method $\pi 2\mathrm{vec}$ combines two components: successor features and foundation models. We adapt the framework of Q-learning of successor features (Barreto et al., 2017) to the
42
+
43
+ ![](images/d97de553a015d0d5055fb1b58a31274034d4cebd2c75d7ca506c9ea44f09ad06.jpg)
44
+ Figure 1: $\pi 2\mathrm{vec}$ method relies on the successor feature framework, that we adopt in combination with a dataset of offline demonstrations and a visual foundation model $\phi$ . $\pi 2\mathrm{vec}$ represents each policy $\pi_{i}$ as a feature vector $\Psi_{\pi_i}^{\phi} \in \mathbb{R}^n$ . $\Psi_{\pi_i}^{\phi}$ encodes the expected behavior of a policy when deployed on an agent.
45
+
46
+ offline setting by applying the Fitted Q evaluation (FQE) algorithm (Le et al., 2019) which is typically used for off-policy evaluation (OPE). In this work the features for individual states are provided by a general purpose pretrained visual foundation model (Bommasani et al., 2021). The resulting representations can be used as a drop in replacement for the action-based representation used by Konyushova et al. (2021).
47
+
48
+ Our experiments show that $\pi 2\mathrm{vec}$ achieves solid results in different tasks and across different settings. To summarize, our main contributions are the following:
49
+
50
+ - We propose $\pi 2\mathrm{vec}$ , a novel policy representation of how the policies change the environment, which combines successor features, foundation models, and offline data;
51
+ - We evaluate our proposal through extensive experiments predicting return values of held out policies in 3 simulated and 2 real environments. Our approach outperforms the baseline and achieves solid results even in challenging real robotic settings and out-of-distribution scenarios;
52
+ - We investigate various feature encoders, ranging from semantic to geometrical visual foundation models, to show strengths and weaknesses of various representations for the task at hand.
53
+
54
+ # 2 RELATED WORK
55
+
56
+ Representation of black-box policies. In this paper, our objective is to create vector representations for policies to predict their performance. We treat policies as black-boxes (i.e., no access to internal state, parameters, or architectures) that yield actions for a given observation. It is important to emphasize that our objective differs from representation learning for RL (Schwarzer et al., 2020; Jaderberg et al., 2016; Laskin et al., 2020), as we focus on representing policies rather than training feature encoders for downstream tasks.
57
+
58
+ Konyushova et al. (2021) studied a setting where the goal is to identify the best policy from a set of policies with a dataset of offline experience and limited access to the environment. Each policy is represented by a vector of actions at a fixed set of states. While this representation performs well in certain applications, it may not be the most effective for predicting policy performance. For instance, consider two policies that generate random actions at each state. These policies do not exhibit meaningfully different behaviour, so for policy evaluation purposes, we expect them to be similar. However, the action policy representation categorizes these policies as different. This paper proposes a method to address this limitation by measuring trajectory-level changes in the environment.
59
+
60
+ In BCRL (Chang et al., 2022), a state-action feature representation is proposed for estimating policy performance. However, the representation of each policy is independent of other policies and thus cannot be employed to regress the performance of new policies given a set of evaluated policies.
61
+
62
+ Offline Policy Evaluation. Off-policy Evaluation (OPE) aims to evaluate a policy given access to trajectories generated by another policy. It has been extensively studied across many domains (Li et al., 2010; Theocharous et al., 2015; Kalashnikov et al., 2018; Nie et al., 2019). Broad categories of OPE methods include methods that use importance sampling (Precup, 2000), binary classification (Irpan et al., 2019), stationary state distribution (Liu et al., 2018), value functions (Sutton et al., 2016;
63
+
64
+ Le et al., 2019), and learned transition models (Zhang et al., 2021), as well as methods that combine two or more approaches (Farajtabar et al., 2018). The main focus of the OPEs approaches is on approximating the return values function for a trained policy, while $\pi 2\mathrm{vec}$ goes beyond classical OPE and focuses on encoding the behavior of the policy as vectors, in such a way that those vectors are comparable, to fit a performance predictor.
65
+
66
+ Foundation Models for Robotics. Foundation models are large, self-supervised models (Bommasani et al., 2021) known for their adaptability in various tasks (Sharma et al., 2023). We compare three representative foundation models (Radford et al., 2021; Dosovitskiy et al., 2021; Doersch et al., 2022). Our proposal, $\pi 2\mathrm{vec}$ , is independent of the feature encoder of choice. Better or domain-specific foundation models may improve results but are not the focus of this study.
67
+
68
+ # 3 METHODOLOGY
69
+
70
+ # 3.1 OVERVIEW
71
+
72
+ Our setting is the following. We start with a large dataset of historical trajectories $\mathbb{D}$ , and a policy-agnostic state-feature encoder $\phi : \mathbb{S} \to \mathbb{R}^N$ . Given a policy $\pi$ , our objective is to use these ingredients to create a policy embedding $\Psi_{\pi}^{\phi} \in \mathbb{R}^{N}$ that represents the behavior of $\pi$ (and can be used to predict its performance).
73
+
74
+ We aim to create this embedding offline, without running the policy $\pi$ in the environment. Although we can evaluate $\pi$ for any state in our historical dataset $\mathbb{D}$ , we emphasize that we do not have access to any on policy trajectories from $\pi$ , which significantly complicates the process of creating an embedding that captures the behavior of $\pi$ .
75
+
76
+ Our method $\pi 2\mathrm{vec}$ has three steps:
77
+
78
+ 1. Choose a policy-agnostic state-feature encoder $\phi$ . We discuss several options for $\phi$ below and in the experiments; however, $\pi 2\mathrm{vec}$ treats the policy-agnostic state-feature encoder as a black box, allowing us to leverage generic state-feature representations in our work.
79
+ 2. Train a policy-specific state-feature encoder $\psi_{\pi}^{\phi}:(\mathbb{S},\mathcal{A})\to \mathbb{R}^{N}$ . In this step we combine the policy-agnostic state-feature encoder $\phi$ , and the policy $\pi$ , to create policy-specific state-feature encoder by training on the historical dataset $\mathbb{D}$ . The policy-specific state features $\psi_{\pi}^{\phi}(s)$ capture statistics of how $\pi$ would change the environment were it to be run starting from the state $s$ .
80
+ 3. Aggregate the policy-specific state-features to create state-agnostic policy features $\Psi_{\pi}^{\phi}$ that represent the behavior of $\pi$ in a state-independent way.
81
+
82
+ Using the steps outlined above we can collect a dataset of policy-specific state-independent features paired with measured policy performance. This dataset can be used to train a model that predicts the performance of a policy from its features using supervised learning. Because we compute features for a policy using only offline data, when we receive a new policy we can compute its policy-specific state-independent features and apply the performance model to predict its performance before running it in the environment. In the following sections we expand on each step.
83
+
84
+ # 3.2 POLICY-AGNOSTIC STATE FEATURES
85
+
86
+ The role of the state-feature encoder $\phi$ is to produce an embedding that represents an individual state of the environment. In this paper we focus on state encoders $\phi : I \to \mathbb{R}^N$ that consume single images $I$ . Generically our method is agnostic to the input space of the state-feature encoder, but practically speaking it is convenient to work with image encoders because that gives us access to a wide range of pretrained generic image encoders that are available in the literature.
87
+
88
+ We also consider a few simple ways to construct more complex features from single image features. When each state provides multiple images we embed each image separately and sum the result to create a state embedding. We also consider creating embeddings for transitions $(s, s')$ by computing $\Delta \phi(s, s') \triangleq \phi(s') - \phi(s)$ . Both cases allow us to leverage features from pretrained models.
89
+
90
+ ![](images/559bd0c799d396dfb40998e6db486d330f8d91f63a848e97d3c86bcfb7c38142.jpg)
91
+ Figure 2: Given a trajectory from the dataset of offline demonstrations, we train successor feature $\psi_{\pi}^{\phi}(s_t)$ to predict the discounted sum of features $\sum_{i}\gamma^{i}\phi (s_{t + i})$ , where $\phi$ is a visual feature extractor and $\pi$ is a policy. Intuitively, $\phi (s_i)$ represents semantic changes in the current state of the environment $s_i$ , while successor feature $\psi_{\pi}^{\phi}(s_t)$ summarizes all future features encoded by $\phi$ if actions came from policy $\pi$ .
92
+
93
+ # 3.3 POLICY-SPECIFIC STATE FEATURES
94
+
95
+ The next step is to use the policy-agnostic state-feature encoder $\phi$ that provides a generic representation for individual states to train a policy-specific state-feature encoder $\psi_{\pi}^{\phi}:(S,\mathcal{A})\to \mathbb{R}^{N}$ that represents the effect that $\pi$ would have on the environment if it were run starting from the given state.
96
+
97
+ The work of Dayan (1993); Barreto et al. (2017) on successor features provides a basis for our approach to policy representation. We briefly review successor features here, and comment below on how we make use of them. We refer the reader to recent literature covering successor features Lehnert & Littman (2020); Brantley et al. (2021); Reinke & Alameda-Pineda (2021).
98
+
99
+ Suppose that the reward function for a task can be written as a linear function
100
+
101
+ $$
102
+ r (s, a, s ^ {\prime}) = \langle \phi (s, a, s ^ {\prime}), \mathbf {w} _ {\text {t a s k}} \rangle , \tag {1}
103
+ $$
104
+
105
+ where $\phi(s, a, s') \in R^{N}$ encodes the state-transition as a feature vector and $\mathbf{w}_{\mathrm{task}} \in R^{N}$ are weights. Barreto et al. (2017) observe that if the reward can be factored as above, then the state-action-value function for a policy $\pi$ can be written as
106
+
107
+ $$
108
+ Q ^ {\pi} (s, a) = \mathbb {E} _ {(s ^ {\prime} | s) \sim D, a \sim \pi (s)} \left[ \sum_ {i = t} ^ {\infty} \gamma^ {i - t} r \left(s _ {i}, a _ {i}, s _ {i + 1}\right) \right] = \left\langle \psi_ {\pi} ^ {\phi} (s, a), \mathbf {w} _ {\text {t a s k}} \right\rangle , \tag {2}
109
+ $$
110
+
111
+ where
112
+
113
+ $$
114
+ \psi_ {\pi} ^ {\phi} (s, a) = \mathbb {E} _ {(s ^ {\prime} | s) \sim D, a \sim \pi (s)} \left[ \sum_ {i = t} ^ {\infty} \gamma^ {i - t} \phi \left(s _ {i}, a _ {i}, s _ {i + 1}\right) \right], \tag {3}
115
+ $$
116
+
117
+ $(s|s') \sim D$ is a transition from the environment, and $\gamma$ is the discount factor. The corresponding state-value function is $V^{\pi}(s) \triangleq Q^{\pi}(s, \pi(s)) = \langle \psi_{\pi}^{\phi}(s, \pi(s)), \mathbf{w}_{\mathrm{task}} \rangle \triangleq \langle \psi_{\pi}^{\phi}(s), \mathbf{w}_{\mathrm{task}} \rangle$ . We will use the notation $\psi_{\pi}^{\phi}(s) \triangleq \psi_{\pi}^{\phi}(s, \pi(s))$ frequently throughout the remainder of the paper.
118
+
119
+ The value of $\psi_{\pi}^{\phi}(s)$ is known as the successor features of the state $s$ under the policy $\pi$ . Successor features were originally motivated through the above derivation as a way of factoring the value function of a policy into a task-independent behavior component (the successor features) that is independent of the task, and a task-dependent reward component that is independent of behavior.
120
+
121
+ For our purposes we will mostly ignore the reward component (although we return to it in one of the experiments) and focus on the behavior term shown in Equation 3. This term is interesting to us for two reasons. First, we can see by inspection of the RHS that the value of $\psi_{\pi}^{\phi}(s) = \psi_{\pi}^{\phi}(s,\pi (s))$ represents the behavior of $\pi$ as a future discounted sum of state features along a trajectory obtained by running $\pi$ beginning from the state $s$ . In other words, $\psi_{\pi}^{\phi}$ represents the behavior of $\pi$ in terms of
122
+
123
+ the features of the states that $\pi$ will encounter, where the state features are themselves given by the policy-agnostic state-feature encoder from the previous section. Figure 2 summarizes the relationship between successor features $\psi$ and state encoders $\phi$ .
124
+
125
+ Second, Equation 3 satisfies the Bellman equation meaning that the function $\psi_{\pi}^{\phi}(s,a)$ can be estimated from off-policy data in a task-agnostic way using a modified version of Q-learning, where the scalar value reward in ordinary Q-learning is replaced with the vector valued transition features $\phi (s,a,s')$ . We rely on Fitted Q Evaluation (FQE, Le et al. (2019)), an offline Q-learning based algorithm, and thus, we obtain a representation of policy behavior purely from data without executing the policy in the environment. Given a dataset $\mathbb{D}$ and a policy $\pi$ , FQE estimates its state-action-value function $Q^{\pi}(s,a)$ according to the following bootstrap loss:
126
+
127
+ $$
128
+ L (\theta) = \mathbb {E} _ {(s, a, r, s ^ {\prime}) \sim D, a ^ {\prime} \sim \pi (s ^ {\prime})} \left[ \| \psi_ {\theta} ^ {\pi} (s, a) - \left(\phi (s, a, s ^ {\prime}) + \psi_ {\theta} ^ {\pi} \left(s ^ {\prime}, a ^ {\prime}\right)\right) \| ^ {2} \right]. \tag {4}
129
+ $$
130
+
131
+ FQE is simple to implement and it performs competitively with other OPE algorithms in a variety of settings (Fu et al., 2021) including simulated and real robotics domains (Paine et al., 2020; Konyushova et al., 2021). We use FQE with our historical dataset $\mathbb{D}$ to train a policy-specific state-action-feature network $\psi_{\pi}^{\phi}(s,a)$ , which we then use as the policy-specific state-feature encoder $\psi_{\pi}^{\phi}(s) \triangleq \psi_{\pi}^{\phi}(s,\pi(s))$ by plugging in the policy action.
132
+
133
+ # 3.4 STATE-AGNOSTIC POLICY FEATURES
134
+
135
+ We obtain a single representation $\Psi_{\pi}^{\phi}$ of a policy $\pi$ from the state-dependent successor features $\psi_{\pi}^{\phi}(s)$ for that policy by averaging the successor features over a set of canonical states:
136
+
137
+ $$
138
+ \Psi_ {\pi} ^ {\phi} = \mathbb {E} _ {s \sim D _ {\text {c a n}}} [ \psi_ {\pi} ^ {\phi} (s) ], \tag {5}
139
+ $$
140
+
141
+ where $D_{\mathrm{can}}$ is a set of states sampled from historical trajectories. We sample the canonical states set $D_{\mathrm{can}} \subset \mathbb{D}$ uniformly from our historical dataset, as in Konyushova et al. (2021), ensuring that each canonical state comes from a different trajectory for better coverage. We average successor features over the same set $D_{\mathrm{can}}$ for every policy. The intuition behind this representation is that $\psi_{\pi}^{\phi}(s)$ represents the expected change that $\pi$ induces in the environment by starting in the state $s$ ; by averaging over $D_{\mathrm{can}}$ , $\Psi_{\pi}^{\phi}$ represents an aggregated average effect of the behavior of $\pi$ .
142
+
143
+ # 3.5 PERFORMANCE PREDICTION
144
+
145
+ We aim at predicting the performance of novel, unseen policies. We begin with a dataset of historical policies for which we have measured performance $\Pi = \{\dots, (\pi_i, R_i), \dots\}$ . For each policy in this dataset we create an embedding using the above procedure to obtain a new dataset $\{\dots, (\Psi_{\pi_i}^\phi, R_i), \dots\}$ and then train a performance model $\hat{R}_i = f(\Psi_{\pi_i}^\phi)$ using supervised learning. Given a new policy $\pi_*$ we can then predict its performance before running it in the environment by computing the $\pi 2\mathrm{vec}$ features for the new policy using the above procedure and applying the performance model to obtain $\hat{R}_* = f(\Psi_{\pi_*}^\phi)$ .
146
+
147
+ # 4 EXPERIMENTAL SETUP
148
+
149
+ In this section we describe the feature encoders, domains, and evaluation procedures, followed by details about our baselines. More details about our architecture, domains, and training procedure can be found in the Appendix.
150
+
151
+ Feature encoder. Firstly, the Random feature encoder employs a randomly-initialized ResNet-50 (He et al., 2016). Random features are trivial to implement, and achieve surprisingly strong performance in many settings (Rahimi & Recht, 2007). Here they serve as a simple baseline.
152
+
153
+ Next, we explore with CLIP (Radford et al., 2021). CLIP-network is trained to match image and text embeddings on a large-scale dataset of image caption pairs. Intuitively, by aligning image and text features, CLIP network is trained to encode high-level semantic information.
154
+
155
+ Visual Transformers (ViT) (Dosovitskiy et al., 2021) treat images as a 1D sequence of patches and learn visual features via an attention mechanism. In our experiments the visual transformer is pre-trained onImagenet classification.
156
+
157
+ ![](images/10038d186cdd110bcc0a0c3ccd02f1f1aa6444bbd67f39978b9fbdd761078143.jpg)
158
+ Figure 3: We adopt 5 environments. (i) Kitchen: 5 tasks (Knob-on, Left door open, light on, microwave open, and right door open) and 3 points of views. (ii) Metaworld: 4 tasks (assembly, button press, bin picking, and drawer open) and 3 points of views. (iii) Insert gear in simulation (iii) and (iv) on a real robot. (v) RGB stacking on a real robot.
159
+
160
+ Lastly, we explore Track-any-point (TAP) (Doersch et al., 2022), a general-purpose network for point tracking in videos. The network is pre-trained to track arbitrary points over video sequences and as a result it learns to understand the low-level geometric features in a scene. We use an attention layer trained to select task-relevant features from the TAP model to reduce dimensionality.
161
+
162
+ This set of feature encoders spans a spectrum of properties as they are created by optimising different objectives. At one extreme CLIP features are trained to align image features with a text description, and encode the semantics of the image. At the other extreme TAP features are trained to track points in videos, and capture low level geometric and texture information. VIT features are in the middle, they need to encode both semantics and local texture to accomplish classification tasks. Depending on the environment and task at hand, better state representation is likely to result in better prediction properties of $\pi 2\mathrm{vec}$ . We leave the question of finding the best representation as future work.
163
+
164
+ Domains. We present extensive experiments to support $\pi 2\mathrm{vec}$ 's capabilities across three simulated domains—Insert Gear (Sim), Metaworld, and Franka-Kitchen, and two real domains—Insert Gear (Real) and RGB Stacking (Figure 3). In each domain we use a dataset of offline human demonstrations (Metaworld and Kitchen) and held out policies trajectories (RGBStacking and Insert Gear) for training policy representations. Each policy is treated as a black-box where we do not have any prior knowledge about the architecture or training parameters. We provide further details in Supplementary.
165
+
166
+ Evaluation. We assess the quality of the policy representations by measuring the ability of the model $f$ to predict the performance of held out policies (see Section 3.5). We adopt k-fold cross validation over the set $\Pi$ and report results averaged over cross validation folds. Following previous works on offline policy evaluation (Paine et al., 2020; Fu et al., 2021), we adopt the following three complementary metrics. We report further details in the Supplementary.
167
+
168
+ - Normalized Mean Absolute Error (NMAE) measures the accuracy of the prediction w.r.t. the ground-truth. We adopt MAE instead of MSE to be robust to outliers and we normalize the error to be in range between the return values for each environment. Lower is better.
169
+ - Rank Correlation measures how the estimated values correlate with the ground-truth. Correlation focuses on how many evaluations on the robot are required to find the best policy. Higher is better.
170
+ - Regret@1 measures the performance difference between the best policy and the predicted best policy, normalized w.r.t. the range of returns values for each environment. Lower is better.
171
+
172
+ Correlation and Regret@1 are the most relevant metric for evaluating $\pi 2\mathrm{vec}$ on OPS. On the other hand, NMAE refers to the accuracy of the estimated return value and is suited for OPE.
173
+
174
+ Baselines. The problem in this paper is to represent policies in such a way that the representations can be used to predict the performance of other policies given the performance of a subset of policies. Importantly, to address this problem the representation should 1) encode the behavior of the policy, 2) in a way that is comparable with the representations of other policies, and 3) does not require online
175
+
176
+ Table 1: We compare π2vec and Actions representations for Insert-gear (real) and Insert-gear (sim) tasks, as well as for the RGB stacking environment. The table shows the performance and confidence intervals for different feature representations and encoders.
177
+
178
+ <table><tr><td>Representation</td><td>NMAE ↓</td><td>Correlation ↑</td><td>Regret@1 ↓</td></tr><tr><td colspan="4">RGB Stacking</td></tr><tr><td>Actions</td><td>0.261 ±0.045</td><td>0.785 ±0.177</td><td>0.074 ±0.083</td></tr><tr><td>VIT</td><td>0.224 ±0.063</td><td>0.775 ±0.146</td><td>0.036 ±0.116</td></tr><tr><td>ΔVIT</td><td>0.344 ±0.050</td><td>0.030 ±0.332</td><td>0.375 ±0.206</td></tr><tr><td>CLIP</td><td>0.330 ±0.042</td><td>0.342 ±0.293</td><td>0.325 ±0.180</td></tr><tr><td>ΔCLIP</td><td>0.287 ±0.048</td><td>0.583 ±0.126</td><td>0.079 ±0.126</td></tr><tr><td>Random</td><td>0.304 ±0.066</td><td>0.330 ±0.334</td><td>0.226 ±0.177</td></tr><tr><td>ΔRandom</td><td>0.325 ±0.109</td><td>0.352 ±0.348</td><td>0.190 ±0.180</td></tr><tr><td colspan="4">Insert gear (real)</td></tr><tr><td>Actions</td><td>0.252 ±0.028</td><td>-0.545 ±0.185</td><td>0.578 ±0.148</td></tr><tr><td>Random</td><td>0.275 ±0.027</td><td>-0.207 ±0.267</td><td>0.360 ±0.162</td></tr><tr><td>CLIP</td><td>0.198 ±0.030</td><td>0.618 ±0.136</td><td>0.267 ±0.131</td></tr><tr><td>ΔCLIP</td><td>0.253 ±0.228</td><td>-0.109 ±0.100</td><td>0.429 ±0.100</td></tr><tr><td colspan="4">Insert gear (sim)</td></tr><tr><td>Actions</td><td>0.174 ±0.015</td><td>0.650 ±0.056</td><td>0.427 ±0.172</td></tr><tr><td>Random</td><td>0.215 ±0.026</td><td>0.555 ±0.104</td><td>0.422 ±0.143</td></tr><tr><td>TAP</td><td>0.164 ±0.022</td><td>0.680 ±0.095</td><td>0.359 ±0.184</td></tr><tr><td>VIT</td><td>0.224 ±0.025</td><td>0.402 ±0.129</td><td>0.448 ±0.195</td></tr><tr><td>ΔVIT</td><td>0.255 ±0.024</td><td>0.218 ±0.139</td><td>0.457 ±0.153</td></tr><tr><td>CLIP</td><td>0.180 ±0.031</td><td>0.502 ±0.068</td><td>0.298 ±0.126</td></tr><tr><td>ΔCLIP</td><td>0.189 ±0.020</td><td>0.586 ±0.077</td><td>0.314 ±0.147</td></tr></table>
179
+
180
+ data. Active Offline Policy Selection (AOPS) (Konyushova et al., 2021) stands alone as a notable work that delves into policy representation from offline data with the task of deciding which policies should be evaluated in priority to gain the most information about the system. AOPS showed that representing policies according to its algorithm leads to faster identification of the best policy. In AOPS's representation, which we call "Actions", policies are represented through the actions that the policies take on a fixed set of canonical states. We build Actions representation as follows. We run each policy $\pi$ on the set of states $D_{\mathrm{can}}$ sampled from historical trajectories. Next, we concatenate the resulting set of actions $\{\pi(s)\}_{s \in D_{\mathrm{can}}}^*$ into a vector.
181
+
182
+ To the best of our knowledge, the Actions representation is the only applicable baseline in the setting that we adopt in this paper. Nevertheless, OPE methods that estimate policy performance from a fixed offline dataset are standard methodology in offline RL literature. Although these methods do not take the full advantage of the problem setting in this paper (the performance of some of the policies is known) they can still serve for comparison. In this paper, we compared against FQE which is a recommended OPE method that strikes a good balance between performance (it is among the top methods) and complexity (it does not require a world model) (Fu et al., 2021).
183
+
184
+ # 5 RESULTS
185
+
186
+ We report results for various feature encoders for Insert gear (sim and real) and RGBStacking. Similarly, we report averaged results for over 4 tasks and 3 point of view for Metaworld and over 5 tasks and 3 point of view for Kitchen. Along with results for each feature encoder, we report the average results of picking the best feature encoder for each task (BEST-φ). Similarly, we report as BEST-CLIP and BEST-VIT the average results when adopting the best feature encoder between CLIP/VIT and ΔCLIP/ΔVIT. We identify the best feature encoder for a task by conducting cross-validation on previously evaluated policies and pick the best encoder in terms of regret@1.
187
+
188
+ Our results demonstrate that (i) $\pi 2\mathrm{vec}$ outperforms the Actions baseline models consistently across real and simulated robotics environments and multiple tasks, showcasing the framework's effectiveness in representing policies. Furthermore, we demonstrate the applicability to real-world robotic settings, specifically in the challenging Insert Gear (Real) environment, where even underperforming policies contribute to improved policy evaluation. We show that choosing the best model as
189
+
190
+ Table 2: We evaluate $\pi 2\mathrm{vec}$ on Metaworld and Kitchen. The results are averaged over all settings and confidence intervals are reported. BEST- $\phi$ is $\pi 2\mathrm{vec}$ average performance assuming that we adopt the best $\phi$ in terms of regret@1 for each task-POV setting. Similarly, BEST-CLIP and BEST-VIT are the best feature encoder between CLIP/VIT and $\Delta$ CLIP/ $\Delta$ VIT.
191
+
192
+ <table><tr><td>Representation</td><td>NMAE ↓</td><td>Correlation ↑</td><td>Regret@1 ↓</td></tr><tr><td colspan="4">Metaworld</td></tr><tr><td>Actions</td><td>0.424 ±0.058</td><td>0.347 ±0.152</td><td>0.232 ±0.078</td></tr><tr><td>CLIP</td><td>0.340 ±0.035</td><td>0.254 ±0.143</td><td>0.250 ±0.076</td></tr><tr><td>ΔCLIP</td><td>0.325 ±0.092</td><td>0.286 ±0.154</td><td>0.232 ±0.086</td></tr><tr><td>BEST-CLIP</td><td>0.309 ±0.027</td><td>0.351 ±0.130</td><td>0.194 ±0.076</td></tr><tr><td>VIT</td><td>0.303 ±0.030</td><td>0.280 ±0.146</td><td>0.263 ±0.091</td></tr><tr><td>ΔVIT</td><td>0.315 ±0.026</td><td>0.162 ±0.169</td><td>0.325 ±0.084</td></tr><tr><td>BEST-VIT</td><td>0.298 ±0.029</td><td>0.300 ±0.147</td><td>0.244 ±0.092</td></tr><tr><td>Random</td><td>0.366 ±0.086</td><td>0.043 ±0.150</td><td>0.375 ±0.108</td></tr><tr><td>BEST-φ</td><td>0.289 ±0.018</td><td>0.460 ±0.099</td><td>0.153 ±0.060</td></tr><tr><td colspan="4">Kitchen</td></tr><tr><td>Actions</td><td>0.857 ±0.128</td><td>0.326 ±0.128</td><td>0.221 ±0.089</td></tr><tr><td>CLIP</td><td>0.417 ±0.032</td><td>0.021 ±0.219</td><td>0.317 ±0.081</td></tr><tr><td>ΔCLIP</td><td>0.352 ±0.026</td><td>0.260 ±0.216</td><td>0.244 ±0.081</td></tr><tr><td>BEST-CLIP</td><td>0.333 ±0.025</td><td>0.346 ±0.200</td><td>0.197 ±0.076</td></tr><tr><td>VIT</td><td>0.385 ±0.030</td><td>0.030 ±0.244</td><td>0.322 ±0.095</td></tr><tr><td>ΔVIT</td><td>0.344 ±0.025</td><td>0.155 ±0.234</td><td>0.251 ±0.082</td></tr><tr><td>BEST-VIT</td><td>0.321 ±0.024</td><td>0.412 ±0.228</td><td>0.151 ±0.068</td></tr><tr><td>Random</td><td>0.382 ±0.033</td><td>-0.017 ±0.225</td><td>0.334 ±0.080</td></tr><tr><td>BEST-φ</td><td>0.392 ±0.053</td><td>0.591 ±0.203</td><td>0.070 ±0.045</td></tr></table>
193
+
194
+ a feature-extractor greatly improves results (ii). Finally, we adopt $\pi 2\mathrm{vec}$ to solve Equation 2 and estimate policies' return values in the Metaworld's assembly environment, without relying on any ground-truth data (iii). Although the successor feature assumption of linearity of rewards is violated, $\pi 2\mathrm{vec}$ still ranks policies competitively in the offline setting when compared to FQE. In the Appendix, we provide an intuition for choosing the best $\phi$ based on the correlation between task difficulty (iv), and we study the effect of different dataset types, such as demonstrations and trajectories from held out policies (v). We investigate $\pi 2\mathrm{vec}$ 's generalization capabilities (vi), including out-of-distribution scenarios (vii). We also demonstrate that $\pi 2\mathrm{vec}$ represents random policies close in the feature space (viii), and that $\pi 2\mathrm{vec}$ is robust to canonical state coverage (ix) and effective with online data (x).
195
+
196
+ (i) $\pi 2\mathrm{vec}$ consistently outperforms Actions. We compare $\pi 2\mathrm{vec}$ and Actions across all scenarios. Our method outperforms Actions representation when predicting values of unseen policies in both real robotics scenarios-RGB stacking and insert-gear (real)-as shown in Table 1. In the former, $\Psi^{\mathrm{VIT}}$ achieves regret@1 of 0.036 compared to Actions' 0.074, with a relative improvement of $51\%$ . In the latter, $\Psi^{\mathrm{CLIP}}$ improves over Actions by achieving regret@1 0.267 compared to Actions' 0.578 and drastically outperform Actions in terms of correlation by achieving +0.618 compared to Actions' -0.545. $\pi 2\mathrm{vec}$ performs robustly on insert gear (real) despite policies' performances for this task vary greatly (see supplementary for per-task policies performances).
197
+
198
+ We also evaluate our approach in the simulated counterpart Insert Gear (Sim). In this environment, $\Psi^{\mathrm{CLIP}}$ and $\Psi^{\mathrm{TAP}}$ achieve regret@1 of 0.314 and 0.359 respectively, compared to Actions 0.427. We underline the dichotomy between geometrical and semantic features: $\Psi^{\mathrm{TAP}}$ performs best in terms of NMAE and Correlation, while $\Psi^{\mathrm{CLIP}}$ outperforms in Regret@1. These results highlight how various $\phi$ compare depending on setting, type of task, and policy performance.
199
+
200
+ (ii) When evaluating across multiple settings, selecting $\phi$ leads to better results. We compare $\pi 2\mathrm{vec}$ with different foundation models across 12 Metaworld settings and 15 Kitchen settings. Table 2 reports the average results across all settings for Metaworld and Kitchen. In Metaworld, we notice that Actions performs on par with $\Psi^{\mathrm{CLIP}}$ , $\Psi^{\mathrm{VIT}}$ , and their respective variations $\Delta \mathrm{CLIP}$ and $\Delta \mathrm{VIT}$ , in terms of correlation and regret@1, while our approach consistently outperforms Actions in terms of NMAE. As these domains have less state variability, Actions represent policies robustly. We test CLIP/ $\Delta$ CLIP
201
+
202
+ Table 3: We extend π2vec to the fully-offline setting and test it on Metaworld assembly task (left, right, and top). We report results and confidence intervals. In this setting, performances of all policies are unknown.
203
+
204
+ <table><tr><td>Representation</td><td>NMAE ↓</td><td>Correlation ↑</td><td>Regret@1 ↓</td></tr><tr><td rowspan="2">FQE</td><td colspan="3">Assembly (left)</td></tr><tr><td>0.338±0.062</td><td>0.125 ±0.218</td><td>0.424±0.260</td></tr><tr><td>π2vec</td><td>8.306 ±0.155</td><td>0.360±0.097)</td><td>0.215±0.079</td></tr><tr><td rowspan="2">FQE</td><td colspan="3">Assembly (right)</td></tr><tr><td>0.270±0.093</td><td>-0.029±0.351</td><td>0.504±0.071</td></tr><tr><td>π2vec</td><td>2.116 ±0.056</td><td>0.154±0.115</td><td>0.319±0.080</td></tr><tr><td rowspan="2">FQE</td><td colspan="3">Assembly (top)</td></tr><tr><td>0.322±0.012</td><td>-0.251±0.516</td><td>0.609±0.228</td></tr><tr><td>π2vec</td><td>0.492±0.006</td><td>0.555±0.106</td><td>0.149±0.071</td></tr></table>
205
+
206
+ and VIT/ΔVIT on previously evaluated policies for each task through cross-validation to identify the best feature encoder for the task in terms of regret@1. We report $\Psi^{\mathrm{BEST - CLIP}}$ and $\Psi^{\mathrm{BEST - VIT}}$ as the average results over the best among CLIP/VIT and $\Delta$ CLIP/ΔVIT. $\Psi^{\mathrm{BEST - CLIP}}$ achieves regret@1 0.194 and correlation 0.351, outperforming Actions representation. We highlight that the choice of $\phi$ is critical, since $\Psi^{\mathrm{random}}$ using a randomly-initialized ResNet50 as feature extractor—underperforms. Moreover, $\pi 2\mathrm{vec}$ with the best $\phi$ drastically improves, achieving regret@1 of 0.153 compared to Actions 0.232. We notice similar improvements when evaluating on Kitchen's 15 settings. Table 2 compares choosing the BEST $\phi$ w.r.t. to VIT and CLIP, and against Actions. In Kitchen, $\Psi^{\mathrm{VIT}}$ outperforms $\Psi^{\mathrm{CLIP}}$ and Actions, while $\Psi^{\mathrm{BEST} - \phi}$ achieves the overall best results.
207
+
208
+ (iii) $\pi 2\mathrm{vec}$ enables fully-offline policy selection. By directly modelling the relationship between successor features and returns, we avoid the linear reward assumption of the original successor features work. This is preferable since rewards are generally not linearly related to state features. However, this restricts our method to settings where some policies' performance is known. To evaluate performance in a fully-offline setting, we fit a linear model the task reward $\hat{r} = \langle \phi (s),\mathbf{w}_{\mathrm{task}}\rangle$ given the state's feature representation $\phi (s)$ , as in Equation 2 from the original successor features work. Next we predict policies returns as $\hat{R}_i = \langle \Psi_{\pi_i}^{\phi},\mathbf{w}_{\mathrm{task}}\rangle$ . We compare our approach to FQE in Table 3 and find that while our method's return predictions are inaccurate (as evidenced by the high NMAE), it still performs well in ranking policies (higher Correlation and lower Regret@1).
209
+
210
+ # 6 CONCLUSION
211
+
212
+ We presented $\pi 2\mathrm{vec}$ , a framework for offline policy representation via successor features. Our method treats the policy as a black box, and creates a representation that captures statistics of how the policy changes the environment rather than its idiosyncrasies. The representations can be trained from offline data, and leverage the pretrained features of visual foundation models to represent individual states of the environment. In our experiments, we represented policies by relying on visual features from semantic (CLIP), geometric (TAP), and visual (VIT) foundation models. We showed that $\pi 2\mathrm{vec}$ outperforms previously used Actions based representations and generalizes to fully-offline settings. Overall, our experiments showcase the effectiveness and versatility of $\pi 2\mathrm{vec}$ in representing policies and its potential for various applications in reinforcement learning.
213
+
214
+ Moving forward, we acknowledge that finding the optimal combination of these elements remains an ongoing challenge. Future work should explore diverse foundation models, offline learning algorithms for successor feature training, and dataset choices. Fine-tuning the feature encoder $\phi$ along with $\psi_{\phi}^{\pi}$ is interesting but pose challenges, as each feature encoder would specialize to predict features for a specific policy, resulting in policy representations that are independent and not comparable. We leave end-to-end fine-tuning as future work. Integrating $\pi 2\mathrm{vec}$ into AOPS framework (Konyushova et al., 2021) for enhanced offline policy selection is another intriguing avenue. Additionally, extending $\pi 2\mathrm{vec}$ to augment the Generalized Policy Improvement (Barreto et al., 2017) in offline settings presents exciting research opportunities.
215
+
216
+ # REFERENCES
217
+
218
+ Andre Barreto, Will Dabney, Rémi Munos, Jonathan J Hunt, Tom Schaul, Hado P van Hasselt, and David Silver. Successor features for transfer in reinforcement learning. Advances in neural information processing systems, 30, 2017.
219
+ Marc G Bellemare, Will Dabney, and Rémi Munos. A distributional perspective on reinforcement learning. In International Conference on Machine Learning, pp. 449-458. PMLR, 2017.
220
+ Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021.
221
+ Kianté Brantley, Soroush Mehri, and Geoff J Gordon. Successor feature sets: Generalizing successor representations across policies. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 11774-11781, 2021.
222
+ Jonathan Chang, Kaiwen Wang, Nathan Kallus, and Wen Sun. Learning bellman complete representations for offline policy evaluation. In International Conference on Machine Learning, pp. 2938-2971. PMLR, 2022.
223
+ Peter Dayan. Improving generalization for temporal difference learning: The successor representation. Neural computation, 5(4):613-624, 1993.
224
+ Carl Doersch, Ankush Gupta, Larisa Markeeva, Adrià Recasens, Lucas Smaira, Yusuf Aytar, João Carreira, Andrew Zisserman, and Yi Yang. Tap-vid: A benchmark for tracking any point in a video. arXiv preprint arXiv:2211.03726, 2022.
225
+ Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=YicbFdNTTy.
226
+ Yuqing Du, Ksenia Konyushkova, Misha Denil, Akhil Raju, Jessica Landon, Felix Hill, Nando de Freitas, and Serkan Cabi. Vision-language models as success detectors. arXiv preprint arXiv:2303.07280, 2023.
227
+ Mehrdad Farajtabar, Yinlam Chow, and Mohammad Ghavamzadeh. More robust doubly robust off-policy evaluation. pp. 1447-1456, 2018.
228
+ Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4rl: Datasets for deep data-driven reinforcement learning, 2020.
229
+ Justin Fu, Mohammad Norouzi, Ofir Nachum, George Tucker, Ziyu Wang, Alexander Novikov, Mengjiao Yang, Michael R Zhang, Yutian Chen, Aviral Kumar, et al. Benchmarks for deep off-policy evaluation. arXiv preprint arXiv:2103.16596, 2021.
230
+ Caglar Gulcehre, Ziyu Wang, Alexander Novikov, Thomas Paine, Sergio Gomez, Konrad Zolna, Rishabh Agarwal, Josh S Merel, Daniel J Mankowitz, Cosmin Paduraru, et al. Rl unplugged: A suite of benchmarks for offline reinforcement learning. Advances in Neural Information Processing Systems, 33:7248-7259, 2020.
231
+ Abhishek Gupta, Vikash Kumar, Corey Lynch, Sergey Levine, and Karol Hausman. Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning. arXiv preprint arXiv:1910.11956, 2019.
232
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
233
+ Alexander Irpan, Kanishka Rao, Konstantinos Bousmalis, Chris Harris, Julian Ibarz, and Sergey Levine. Off-policy evaluation via off-policy classification. Advances in Neural Information Processing Systems, 32, 2019.
234
+
235
+ Max Jaderberg, Volodymyr Mnih, Wojciech Marian Czarnecki, Tom Schaul, Joel Z Leibo, David Silver, and Koray Kavukcuoglu. Reinforcement learning with unsupervised auxiliary tasks. arXiv preprint arXiv:1611.05397, 2016.
236
+ Dmitry Kalashnikov, Alex Irpan, Peter Pastor, Julian Ibarz, Alexander Herzog, Eric Jang, Deirdre Quillen, Ethan Holly, Mrinal Kalakrishnan, Vincent Vanhoucke, et al. Qt-opt: Scalable deep reinforcement learning for vision-based robotic manipulation. arXiv preprint arXiv:1806.10293, 2018.
237
+ Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), *ICLR (Poster)*, 2015. URL http://dblp.uni-trier.de/db/conf/iclr/iclr2015.html#KingmaB14.
238
+ Ksenia Konyushova, Yutian Chen, Thomas Paine, Caglar Gulcehre, Cosmin Paduraru, Daniel J Mankowitz, Misha Denil, and Nando de Freitas. Active offline policy selection. Advances in Neural Information Processing Systems, 34:24631-24644, 2021.
239
+ Michael Laskin, Aravind Srinivas, and Pieter Abbeel. Curl: Contrastive unsupervised representations for reinforcement learning. In International Conference on Machine Learning, pp. 5639-5650. PMLR, Jul 2020.
240
+ Hoang Le, Cameron Voloshin, and Yisong Yue. Batch policy learning under constraints. In International Conference on Machine Learning, pp. 3703-3712. PMLR, 2019.
241
+ Alex X Lee, Coline Manon Devin, Yuxiang Zhou, Thomas Lampe, Konstantinos Bousmalis, Jost Tobias Springenberg, Arunkumar Byravan, Abbas Abdelmaleki, Nimrod Gileadi, David Khosid, et al. Beyond pick-and-place: Tackling robotic stacking of diverse shapes. In 5th Annual Conference on Robot Learning, 2021.
242
+ Lucas Lehnert and Michael L Littman. Successor features combine elements of model-free and model-based reinforcement learning. The Journal of Machine Learning Research, 21(1):8030-8082, 2020.
243
+ Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020.
244
+ Lihong Li, Wei Chu, John Langford, and Robert E Schapire. A contextual-bandit approach to personalized news article recommendation. pp. 661-670, 2010.
245
+ Qiang Liu, Lihong Li, Ziyang Tang, and Dengyong Zhou. Breaking the curse of horizon: Infinite-horizon off-policy estimation. Advances in Neural Information Processing Systems, 31, 2018.
246
+ Suraj Nair, Aravind Rajeswaran, Vikash Kumar, Chelsea Finn, and Abhinav Gupta. R3m: A universal visual representation for robot manipulation. arXiv preprint arXiv:2203.12601, 2022.
247
+ Xinkun Nie, Emma Brunskill, and Stefan Wager. Learning when-to-treat policies. arXiv preprint arXiv:1905.09751, 2019.
248
+ Tom Le Paine, Cosmin Paduraru, Andrea Michi, Caglar Gulcehre, Konrad Zolna, Alexander Novikov, Ziyu Wang, and Nando de Freitas. Hyperparameter selection for offline reinforcement learning. arXiv preprint arXiv:2007.09055, 2020.
249
+ Doina Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department Faculty Publication Series, pp. 80, 2000.
250
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021.
251
+ Ali Rahimi and Benjamin Recht. Random features for large-scale kernel machines. Advances in neural information processing systems, 20, 2007.
252
+
253
+ Scott Reed, Konrad Zolna, Emilio Parisotto, Sergio Gomez Colmenarejo, Alexander Novikov, Gabriel Barth-Maron, Mai Gimenez, Yury Sulsky, Jackie Kay, Jost Tobias Springenberg, et al. A generalist agent. Transactions on Machine Learning Research (TMLR), 2022.
254
+ Chris Reinke and Xavier Alameda-Pineda. Successor feature representations. arXiv preprint arXiv:2110.15701, 2021.
255
+ Max Schwarzer, Ankesh Anand, Rishab Goel, R Devon Hjelm, Aaron Courville, and Philip Bachman. Data-efficient reinforcement learning with self-predictive representations. arXiv preprint arXiv:2007.05929, 2020.
256
+ Mohit Sharma, Claudio Fantacci, Yuxiang Zhou, Skanda Koppula, Nicolas Heess, Jon Scholz, and Yusuf Aytar. Lossless adaptation of pretrained vision models for robotic manipulation. 2023.
257
+ Richard S Sutton, A Rupam Mahmood, and Martha White. An emphatic approach to the problem of off-policy temporal-difference learning. The Journal of Machine Learning Research, 17(1): 2603-2631, 2016.
258
+ Georgios Theocharous, Philip S Thomas, and Mohammad Ghavamzadeh. Personalized ad recommendation systems for life-time value optimization with guarantees. 2015.
259
+ Tianhe Yu, Deirdre Quillen, Zhanpeng He, Ryan Julian, Karol Hausman, Chelsea Finn, and Sergey Levine. Meta-world: A benchmark and evaluation for multi-task and meta reinforcement learning. In Conference on robot learning, pp. 1094-1100. PMLR, 2020.
260
+ Michael R Zhang, Tom Le Paine, Ofir Nachum, Cosmin Paduraru, George Tucker, Ziyu Wang, and Mohammad Norouzi. Autoregressive dynamics models for offline policy evaluation and optimization. arXiv preprint arXiv:2104.13877, 2021.
261
+
262
+ # 7 APPENDIX
263
+
264
+ # 7.1 DOMAINS
265
+
266
+ The Metaworld Yu et al. (2020) and Kitchen Gupta et al. (2019) domains are widely known in the literature. They contain many tasks from multiple views, however, the variability among tasks is low. For example, the robotic arm in Metaworld is initialized within a narrow set of positions, while in Kitchen the object positions are fixed. The task in real robot RGB Stacking Lee et al. (2021); Reed et al. (2022) is to stack the red object on top of the blue object with green object as a distractor, where objects are of various geometric shapes. This task is difficult because the objects have unusual shapes and may be positioned at any point in the workspace. We also consider a challenging gear insertion task in sim and real where the objective is to pick up a gear of certain size (from an arbitrary position in the workspace) in the presence of other gears and insert it onto a specific shaft on the gear assembly base (arbitrary position in real, fixed in sim). We describe data and policies for each domain below.
267
+
268
+ Insert Gear (Sim). We use 18 policies for Insert Gear (Sim) task in the simulated environment. We take an intermediate and the last checkpoint for each policy. We collect trajectories with $T = 300$ steps from a single $\pi$ and train all $\psi_{\pi_i}$ on those trajectories. The state $s$ consists of two images, one from a left camera and one from a right camera, and proprioception sensing. All the policies in this domain have the following architecture. Image observations are encoded using a (shared) ResNet, and proprioception is embedded using an MLP. Then, two embeddings are concatenated and further processed by an MLP, followed by an action head.
269
+
270
+ Insert Gear (Real). The observable state consists of three points of view: a camera on the left of the basket, a camera on the right of the basket, and an egocentric camera on the gripper. The state also includes proprioception information about the arm position. The setup corresponds to the medium gear insertion task described in the work of Du et al. (2023). We conduct experiments on the Insert Gear (Real) task on a real robotic platform by evaluating 18 policies. We collect a dataset of trajectories with a hold-out policy trained on human demonstrations. Next, we train our set of policies on this dataset and we evaluate $\pi 2\mathrm{vec}$ . The state and he policy architecture are the same as in Insert Gear (Sim).
271
+
272
+ RGB stacking. We use 12 policies trained with behavior cloning on a previously collected dataset of demonstrations for RGB stacking task with a real robotic arm. Each policy is trained with a variety of hyperparameters. The state $s$ consists of images from the basket cameras, one on the left and one on the right, and a first person camera mounted on the end-effector, and proprioception sensing. For training $\pi 2\mathrm{vec}$ , we adopted an offline dataset of trajectories. We collected the trajectories by running a policy trained on human demonstrations. Trajectory length varies between 800 and 1000 steps. We built the evaluation dataset $D_{\mathrm{can}}$ by sampling 5,000 trajectories and then sampling one state from each of them. Policy architecture follows the description in Lee et al. (2021).
273
+
274
+ Metaworld. For Metaworld, we consider 4 tasks: assembly, button press, bin picking, and drawer open. We use 3 points of views (left, right, and top), as specified in Sharma et al. (2023); Nair et al. (2022). For each task-camera pair, we adopt 12 policies as proposed by Sharma et al. (2023) for the particular task and point of view. The policies vary by the hyperparameters used during training (learning rate, seed, and feature extractor among NFnet, VIT, and ResNet). Next, we train a successor feature network $\psi_{\pi}$ for each policy $\pi$ on a cumulative dataset of demonstrations for all tasks and views. At evaluation, we build $D_{\mathrm{can}}$ by uniformly sampling one state from each demonstration.
275
+
276
+ Franka-Kitchen. For Kitchen, we consider 5 tasks: Knob-on, Left door open, light on, microwave open, and door open with 3 points of views: left, right, and top, as provided by previous works (Sharma et al., 2023; Nair et al., 2022). For each task and point of view, we use human demonstrations provided by Fu et al. (2020). We also adopt policies $\{\pi_i\}$ proposed by Sharma et al. (2023). Each policy solves each task using proprioception and image information from a single point of view, and the policies vary by the hyperparameters used during training (learning rate, seed, and feature extractor among NFnet, ViT, and ResNet). Additional details can be found in Sharma et al. (2023).
277
+
278
+ Table 4 reports mean and standard deviation of the expected return values for the policies under consideration. We highlight that Metaworld and Insert gear have high standard-deviation (standard
279
+
280
+ Table 4: We report the mean return value and its standard-deviation for the policies for each environment. For Metaworld and Kitchen, we take the average mean and standard-deviation across all tasks and points of views. † Number of policies per task-camera pair.
281
+
282
+ <table><tr><td>Environment</td><td>N. policies</td><td>Return values</td></tr><tr><td>Insert-gear (sim)</td><td>18</td><td>0.60±0.51</td></tr><tr><td>Insert-gear (real)</td><td>18</td><td>2.29±1.71</td></tr><tr><td>RGB Stacking</td><td>12</td><td>179.41±8.21</td></tr><tr><td>Metaworld</td><td>12†</td><td>215.88±164.99</td></tr><tr><td>Kitchen</td><td>12†</td><td>-47.86±20.05</td></tr></table>
283
+
284
+ ![](images/c3fc7f3dfdd20580bfea024219edd1c049302ebd5719e6bed1a42cb01d201ad4.jpg)
285
+ Figure 4: We implement $\psi_{\pi}^{\phi}$ as a neural-network. First, we encode state $s_t$ consisting of observations and proprioception and policy actions $\pi(s_t)$ into feature vectors. Next, we concatenate the features and input the resulting vector to a multi-layer perceptron. $\psi_{\pi}^{\phi}$ outputs a vector of $B \times N$ dimensions, where $B$ number of bins of the distribution and $N$ is the dimension of the feature vector $\phi(s_t)$ . We reshape the output into a matrix, where each row $i$ represents a histogram of probabilities of size $B$ for the successor feature $\psi_i$ .
286
+
287
+ deviation is $75\%$ or more of the mean return value), as we have extremely good and extremely bad policies. On the contrary, return values for RGB Stacking and Kitchen vary less, i.e., most of the policies for these environments achieve similar performance.
288
+
289
+ # 7.2 ARCHITECTURE AND TRAINING DETAILS
290
+
291
+ The architecture of successor features network $\psi_{\pi}^{\phi}(s)$ for a policy $\pi$ is illustrated in Figure 4. The network takes state-action pairs as input; it encodes actions and proprioception with a multi-layer perceptron, and visual observations using a ResNet50. When the environment observation consists of multiple images (e.g., multiple camera views of the same scene), we embed each image separately and average the resulting vectors. We concatenate the state and action encodings and process the resulting feature vector with a MLP. Finally, the network $\psi_{\pi}^{\phi}(s)$ outputs a vector of dimension $R^{N\times B}$ , where $N$ is the dimension of the feature vector $\phi(s)$ represented as a distribution over $B$ bins<sup>1</sup>. $\psi_{\pi}^{\phi}(s)$ returns $N$ histograms, where each histogram $\psi_i$ approximates the distribution of the discounted sum of feature $\phi_i$ over policy $\pi$ . For each environment, we inspect the range of values assumed by $\phi$ to find the min (lower) and max (upper) bound of the histogram. At inference time, we take the expected value of each histogram to compute the successor features vector.
292
+
293
+ We train $\psi_{\pi}^{\phi}(s,a)$ using an FQE with a distributional objective (Le et al., 2019; Bellemare et al., 2017). Training the successor features network only requires offline data (separate for each domain) and does not require any online interactions. We train the network for 50,000 steps for Metaworld and Kitchen and 100,000 steps for RGB Stacking, Insert Gear (Sim), and Insert Gear (Real). We adopt
294
+
295
+ Table 5: We conduct additional experiments on Metaworld environment when using a dataset of trajectories for training $\pi 2\mathrm{vec}$ . As expected, enhancing the dataset leads to better performances. We report as BEST-CLIP and BEST-VIT the average results when adopting the best feature encoder between CLIP/VIT and $\Delta$ CLIP/ $\Delta$ VIT in terms of regret@1.
296
+
297
+ <table><tr><td>Representation</td><td>Dataset</td><td>NMAE</td><td>Correlation</td><td>Regret@1</td></tr><tr><td>Actions</td><td>-</td><td>0.424 ±0.058</td><td>0.347 ±0.152</td><td>0.232 ±0.078</td></tr><tr><td>ΔVIT</td><td>trajectories</td><td>0.296 ±0.024</td><td>0.399 ±0.128</td><td>0.214±0.064</td></tr><tr><td>ΔCLIP</td><td>trajectories</td><td>0.278 ±0.014</td><td>0.469 ±0.096</td><td>0.189 ±0.075</td></tr><tr><td>BEST-φ</td><td>trajectories</td><td>0.269±0.017</td><td>0.507 ±0.105</td><td>0.187 ±0.073</td></tr><tr><td>ΔVIT</td><td>best</td><td>0.322 ±0.029</td><td>0.447±0.126</td><td>0.191±0.074</td></tr><tr><td>ΔCLIP</td><td>best</td><td>0.274 ±0.026</td><td>0.537 ±0.166</td><td>0.177 ±0.175</td></tr><tr><td>BEST-φ</td><td>best</td><td>0.231 ±0.016</td><td>0.615±0.086</td><td>0.135 ±0.052</td></tr></table>
298
+
299
+ different training steps because Metaworld and Kitchen are more predictable than RGB Stacking and Insert gear and less demonstrations are provided. We use the Adam optimizer (Kingma & Ba, 2015) with a learning rate of $3e^{-5}$ and a discount factor of $\gamma = 0.99$ . For evaluation, we adopt 3-fold cross-validation in all experiments.
300
+
301
+ # 7.3 METRICS
302
+
303
+ We adopt three common metrics Fu et al. (2021): NMAE, correlation, regret@1.
304
+
305
+ - Normalized Mean Absolute Error (NMAE) is defined as the difference between the value and estimated value of a policy:
306
+
307
+ $$
308
+ \mathrm {N M A E} = \left| V ^ {\pi} - \hat {V} ^ {\pi} \right|, \tag {6}
309
+ $$
310
+
311
+ where $V^{\pi}$ is the true value of the policy, and $\hat{V}^{\pi}$ is the estimated value of the policy.
312
+
313
+ - Regret@1 is the difference between the value of the best policy in the entire set, and the value of the best predicted policy. It is defined as:
314
+
315
+ $$
316
+ \operatorname {R e g r e t} @ 1 = \max _ {i \in 1: N} V _ {i} ^ {\pi} - \max _ {j \in (1: N)} V _ {j} ^ {\pi}. \tag {7}
317
+ $$
318
+
319
+ - Rank Correlation (also Spearman's $\rho$ ) measures the correlation between the estimated rankings of the policies' value estimates and their true values. It can be written as:
320
+
321
+ $$
322
+ \operatorname {C o r r} = \frac {\operatorname {C o v} \left(V _ {1 : N} ^ {\pi} , \hat {V} _ {1 : N} ^ {\pi}\right)}{\sigma \left(V _ {1 : N} ^ {\pi}\right) \sigma \left(\hat {V} _ {1 : N} ^ {\pi}\right)}. \tag {8}
323
+ $$
324
+
325
+ Correlation and regret@1 are the most relevant metrics for evaluating $\pi 2\mathrm{vec}$ on Offline Policy Selection (OPS), where the focus is on ranking policies based on values and selecting the best policy.
326
+
327
+ Regret@1 is commonly adopted in assessing performances for Offline Policy Selection, as it directly measures how far off the best-estimated policy is to the actual best policy.
328
+
329
+ Correlation is relevant for measuring how the method ranks policies by their expected return value. By relying on methods that achieve higher correlation and thus are consistent in estimating policy values, researchers and practitioners can prioritize more promising policies for online evaluation.
330
+
331
+ On the other hand, NMAE refers to the accuracy of the estimated return value. NMAE is especially significant when aiming at estimating the true value of a policy and is suited for Offline Policy Evaluation (OPE), where we are interested to know the values of each policy. We assess $\pi 2\mathrm{vec}$ 's representation in both settings, showing that $\pi 2\mathrm{vec}$ consistently outperforms the baseline in both metrics. We improve the discussion on metrics in the Appendix of the manuscript.
332
+
333
+ # 7.4 ADDITIONAL EXPERIMENTS
334
+
335
+ (iv) Correlation between task difficulty and $\phi$ . We notice that policy performance varies widely in Insert Gear (Sim) and Insert Gear (Real), as most of the policies fail to solve the task (see
336
+
337
+ Table 6: We evaluate π2vec and Actions baseline when comparing intermediate checkpoints for a set of policies for Metaworld assembly task and left point-of-view. We highlight that Actions representations are similar in such scenario, leading to poor generalization when evaluating on unseen and potentially different policies.
338
+
339
+ <table><tr><td>Representation</td><td>NMAE</td><td>Correlation</td><td>Regret@1</td></tr><tr><td>Actions</td><td>0.724 ±0.268</td><td>-0.189 ±0.137</td><td>0.034 ±0.014</td></tr><tr><td>ΔCLIP</td><td>0.570 ±0.173</td><td>0.190 ±0.361</td><td>0.029 ±0.034</td></tr></table>
340
+
341
+ Table 7: We conduct an experiment in an out-of-domain setting. We represent a set of policies that were trained for Metaworld assembly task from right point-of-view with Actions and $\Psi^{\Delta\mathrm{CLIP}}$ . Next, we evaluate how those representations predict the return values if policies are fed with images from a different point-of-view (left camera in our experiment). $\Psi^{\Delta\mathrm{CLIP}}$ outperforms Actions in terms of regret@1 and NMAE, supporting our intuition that $\pi2\mathrm{vec}$ is robust in an out-of-distribution setting.
342
+
343
+ <table><tr><td>Representation</td><td>NMAE</td><td>Regret@1</td></tr><tr><td>Actions</td><td>0.363 ±0.055</td><td>0.475 ±0.100</td></tr><tr><td>ΔCLIP</td><td>0.227 ±0.078</td><td>0.300 ±0.106</td></tr></table>
344
+
345
+ Table 8: We intuitively expect that π2vec represents random policies close in feature space w.r.t. Actions representations, as random policies do not change the environment in any meaningful way. We conduct an experiment with random policies for Metaworld assembly-left to provide quantitative evidence supporting our interpretation.
346
+
347
+ <table><tr><td>Representation</td><td>Average distance</td><td>Max distance</td></tr><tr><td>Actions</td><td>0.39</td><td>0.62</td></tr><tr><td>Random</td><td>0.11</td><td>0.17</td></tr><tr><td>ΔCLIP</td><td>0.03</td><td>0.22</td></tr></table>
348
+
349
+ supplementary for per-task policies performances). The gap is evident when compared to the average return value for RGB Stacking, where standard deviation is negligible. Our intuition is that in hard-to-solve scenarios the actions are often imperfect and noisy. This interpretation would explain poor performance of the Actions baseline. The comparison of $\Psi^{\mathrm{CLIP}}$ and $\Psi^{\mathrm{VIT}}$ across environments suggests a correlation between the choice of $\phi$ and policies return values. $\Psi^{\mathrm{CLIP}}$ performs better than $\Psi^{\mathrm{VIT}}$ in Insert Gear (Sim), Insert Gear (Real), and Metaworld, where we report the highest standard deviation among policies performances. $\Psi^{\mathrm{CLIP}}$ is robust when the task is hard and most of the policies fail to solve it. On the other hand, $\Psi^{\mathrm{VIT}}$ is the best option in Kitchen and RGB stacking, where the standard deviation of policies returns is low or negligible.
350
+
351
+ (v) Studying the performance of $\pi 2\mathrm{vec}$ with different datasets. We investigate how modifications of the dataset for training $\pi 2\mathrm{vec}$ improves performance in Metaworld. Intuitively, if the training set for $\pi 2\mathrm{vec}$ closely resembles the set of reachable states for a policy $\pi$ , solving Equation 2 leads to a more close approximation of the real successor feature of $\pi$ in $(s,a)$ . We empirically test this claim as follows. We collect 1,000 trajectories for each task-view setting using a pre-trained policy. Next, we train successor features network $\psi_{\pi}^{\phi}$ for each policy $\pi$ and feature encoder $\phi$ , and represent each policy as $\Psi_{\pi}^{\phi}$ . Table 5 reports results on Metaworld when training $\pi 2\mathrm{vec}$ with the aforementioned dataset (named trajectory in the Table). In this setting, $\Psi^{\mathrm{CLIP}}$ and $\Psi^{\mathrm{VIT}}$ outperform both their counterpart trained on demonstrations and Actions representation, reaching respectively regret@1 of 0.189 and 0.187. These results slightly improve if we assume to opt for the best dataset for each task and take the average, as reported under best dataset in Table 5. Overall, choosing the best feature encoder $\phi$ and best dataset for any given task leads to the best performing $\Psi^{\mathrm{BEST}-\phi}$ with correlation 0.615 and regret@1 0.135-improving over Actions by 0.26 and 0.10 respectively.
352
+
353
+ (vi) $\pi 2\mathrm{vec}$ generalizes better while Actions works with policies are very similar. We explore how Actions and $\pi 2\mathrm{vec}$ compare in the special scenario where all policies are similar. We take 4 intermediate checkpoints at the end of the training for each policy as a set of policies to represent.
354
+
355
+ Table 9: We evaluate $\pi 2\mathrm{vec}$ when trained on online data for Insert Medium (gear). Results for $\pi 2\mathrm{vec}$ from offline data on this task are reported in Table 1.
356
+
357
+ <table><tr><td>Representation</td><td>NMAE</td><td>Correlation</td><td>Regret@1</td></tr><tr><td>Actions</td><td>0.174</td><td>0.650</td><td>0.427</td></tr><tr><td>CLIP</td><td>0.158</td><td>0.627</td><td>0.288</td></tr><tr><td>Random</td><td>0.348</td><td>0.425</td><td>0.302</td></tr><tr><td>TAP</td><td>0.172</td><td>0.686</td><td>0.205</td></tr></table>
358
+
359
+ Our intuition is that intermediate checkpoints for a single policy are similar to each other in how they behave. Next, we represent each checkpoint with $\Psi^{\mathrm{CLIP}}$ and Actions. We compare cross-validating the results across all checkpoints w.r.t. training on checkpoints for 3 policies and testing on checkpoints for the holdout policy. Table 6 reports results of this comparison on Metaworld's assembly-left task. We notice that Actions representations fail to generalize to policies that greatly differ from the policies in the training set. Fitting the linear regressor with Actions achieves a negative correlation of $-0.189$ and regret@1 0.034. On the other hand, $\Psi^{\mathrm{CLIP}}$ is robust to unseen policies and outperforms Actions with positive correlation 0.190 and lower regret of 0.029.
360
+
361
+ (vii) $\pi 2\mathrm{vec}$ performs in out-of-distribution scenarios. We conduct another investigation to explore $\pi 2\mathrm{vec}$ performances in a out-of-distribution setting. We hypothesize that $\pi 2\mathrm{vec}$ represents policies in meaningful ways even when those policies are deployed in settings that differ from the training set, thanks to the generalisation power of foundation models. Table 7 compares $\Delta$ CLIP and Actions in evaluating policies trained for Metaworld's assembly-right and tested in Metaworld's assembly-left. $\pi 2\mathrm{vec}$ achieves reget@1 of 0.300 and NMAE of 0.227, outperforming Actions by 0.175 and 0.136 respectively. We leave further exploration of $\pi 2\mathrm{vec}$ in out-of-distribution settings for further works.
362
+ (viii) $\pi 2\mathrm{vec}$ represents random policies close in the representation space. Intuitively, we expect that random policies do not modify the environment in a meaningful way. Therefore, their representations should be closer to each other compared to the similarity between the more meaningful trained policies. We investigate this claim as follows. We provide a set of 6 trained policies and a set of 6 random policies for Metaworld assembly-left. We compute the average and max distance among the random policies representations, normalized by the average intraset distance between trained policies representations. We compare our $\Psi^{\mathrm{CLIP}}$ and $\Psi^{\mathrm{random}}$ with Actions. Table 8 reports the results that clearly support our intuition. Both $\Psi^{\Delta \mathrm{CLIP}}$ and $\Psi^{\mathrm{Random}}$ represent random policies close to each other, as the average distance of their representation is respectively 0.03 and 0.11 and the maximum distance 0.22 and 0.17 respectively. On the other hand, if we represent policies with Actions, the representations average and maximum distances are 0.39 and 0.62, meaning that random policies are represented far apart from each other.
363
+ (ix) Canonical state coverage. We sample canonical states uniformly from the dataset that was used for training offline RL policies. Even though there is some intuition that selecting canonical states to represent the environment better can be beneficial, even simple sampling at random worked well in our experiments. We conduct further experiments to ablate the state coverage. We adopt demonstrations from Metaworld Assembly task and adopt the initial state of each trajectory for $\pi 2\mathrm{vec}$ 's and Actions representation. By adopting the initial state of a trajectory, $\pi 2\mathrm{vec}$ cannot rely on the state coverage. We report the results in Table 10. We show that $\pi 2\mathrm{vec}$ is robust to state coverage, showing SOTA performances even when the canonical states coverage is limited to the first state of each demonstration.
364
+ (x) $\pi 2\mathrm{vec}$ from online data. We ideally want to evaluate policies without deployment on a real robot, which is often time-consuming and can lead to faults and damages. Nonetheless, we explore $\pi 2\mathrm{vec}$ capabilities for representing policies from online data. For each policy $\pi$ , we collect a dataset of trajectories by deploying the policy on the agent. Next, we train $\psi_{\pi}$ on the dataset of $\pi$ ’s trajectories and compute its $\pi 2\mathrm{vec}$ 's representation. Table 9 reports results when training $\pi 2\mathrm{vec}$ on online data on Insert Gear (sim) task. We show that $\pi 2\mathrm{vec}$ 's performances improve with respect to the offline counterpart. This result is expected: a better dataset coverage leads to improved results, as we also showed in (v).
365
+
366
+ Table 10: We test π2vec robustness to canonical states coverage. We estimate π2vec's representations for Metaworld's assembly task (left, right, and top point-of-views) by using only the first state of the demonstrations for each task.
367
+
368
+ <table><tr><td>Representation</td><td>NMAE ↓</td><td>Correlation ↑</td><td>Regret@1 ↓</td></tr><tr><td colspan="4">Assembly (left)</td></tr><tr><td>CLIP</td><td>0.381</td><td>0.359</td><td>0.287</td></tr><tr><td>ΔCLIP</td><td>0.260</td><td>0.592</td><td>0.087</td></tr><tr><td>Random</td><td>0.422</td><td>0.252</td><td>0.46</td></tr><tr><td>VIT</td><td>0.366</td><td>0.26</td><td>0.347</td></tr><tr><td>Actions</td><td>0.356</td><td>0.503</td><td>0.222</td></tr><tr><td colspan="4">Assembly (right)</td></tr><tr><td>CLIP</td><td>0.363</td><td>0.023</td><td>0.365</td></tr><tr><td>ΔCLIP</td><td>0.242</td><td>0.582</td><td>0.096</td></tr><tr><td>Random</td><td>0.334</td><td>0.313</td><td>0.212</td></tr><tr><td>VIT</td><td>0.27</td><td>0.345</td><td>0.304</td></tr><tr><td>Actions</td><td>0.405</td><td>0.369</td><td>0.263</td></tr><tr><td colspan="4">Assembly (top)</td></tr><tr><td>CLIP</td><td>0.463</td><td>0.270</td><td>0.330</td></tr><tr><td>ΔCLIP</td><td>0.305</td><td>0.594</td><td>0.078</td></tr><tr><td>Random</td><td>0.394</td><td>0.277</td><td>0.328</td></tr><tr><td>VIT</td><td>0.418</td><td>0.020</td><td>0.417</td></tr><tr><td>Actions</td><td>0.414</td><td>0.554</td><td>0.106</td></tr></table>
2024/$_pi$2vec_ Policy Representation with Successor Features/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9865355fba1bdc6e78fa76895cf153d13ee8c9de3abdb1fde9d13e5a6df9ae5
3
+ size 605878
2024/$_pi$2vec_ Policy Representation with Successor Features/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/20d67cbe-7abd-4c48-bd5a-48ae865927bd_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/20d67cbe-7abd-4c48-bd5a-48ae865927bd_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/20d67cbe-7abd-4c48-bd5a-48ae865927bd_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc092435296fe07b55184879ca5b1dc16bf81ded5b1227522ee90a9c4b3ee6d
3
+ size 2289223
2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c33ab0a9ccb4b9f13c3ef54541e6eb58b018b493441c9f088455cf15f01e569
3
+ size 2289323
2024/$t^3$-Variational Autoencoder_ Learning Heavy-tailed Data with Student's t and Power Divergence/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/a3c37e28-0a3e-4151-b905-1ec9b50296fa_content_list.json ADDED
@@ -0,0 +1,1656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "3D FEATURE PREDICTION FOR MASKED-AUTOENCODER-BASED POINT CLOUD PRETRAINING",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 171,
8
+ 99,
9
+ 816,
10
+ 146
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Siming Yan\\*, Yuqi Yang, Yuxiao Guo, Hao Pan Peng-Shuai Wang, Xin Tong, Yang Liu, Qixing Hua",
17
+ "bbox": [
18
+ 179,
19
+ 167,
20
+ 566,
21
+ 200
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "†The University of Texas at Austin, ‡Microsoft Research Asia Peking University",
28
+ "bbox": [
29
+ 183,
30
+ 200,
31
+ 589,
32
+ 228
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "{siming, huangqx}@cs.utexas.edu, {wangps}@ Hotmail.com",
39
+ "bbox": [
40
+ 183,
41
+ 229,
42
+ 691,
43
+ 242
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "{t-yuqyan, Yuxiao.Guo, haopan, yangliu, xtong}@microsoft.com",
50
+ "bbox": [
51
+ 183,
52
+ 243,
53
+ 766,
54
+ 256
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "ABSTRACT",
61
+ "text_level": 1,
62
+ "bbox": [
63
+ 450,
64
+ 292,
65
+ 545,
66
+ 306
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Masked autoencoders (MAE) have recently been introduced to 3D self-supervised pretraining for point clouds due to their great success in NLP and computer vision. Unlike MAEs used in the image domain, where the pretext task is to restore features at the masked pixels, such as colors, the existing 3D MAE works reconstruct the missing geometry only, i.e., the location of the masked points. In contrast to previous studies, we advocate that point location recovery is inessential and restoring intrinsic point features is much superior. To this end, we propose to ignore point position reconstruction and recover high-order features at masked points including surface normals and surface variations, through a novel attention-based decoder which is independent of the encoder design. We validate the effectiveness of our pretext task and decoder design using different encoder structures for 3D training and demonstrate the advantages of our pretrained networks on various point cloud analysis tasks. The code is available at https://github.com/SimingYan/MaskFeat3D.",
73
+ "bbox": [
74
+ 228,
75
+ 323,
76
+ 769,
77
+ 505
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "1 INTRODUCTION",
84
+ "text_level": 1,
85
+ "bbox": [
86
+ 173,
87
+ 530,
88
+ 336,
89
+ 544
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Self-supervised pretraining has recently gained much attention. It starts from a pretext task trained on large unlabeled data, where the learned representation is fine-tuned on downstream tasks. This approach has shown great success in 2D images (Chen et al., 2020; Grill et al., 2020; He et al., 2020; Bao et al., 2022; He et al., 2022; Zhou et al., 2022; Zhuang et al., 2021; 2019) and natural language processing (NLP) (Devlin et al., 2019; Brown et al., 2020). Recently, people started looking into self-supervised pretraining on point cloud data due to its importance in 3D analysis and robotics applications.",
96
+ "bbox": [
97
+ 169,
98
+ 561,
99
+ 486,
100
+ 755
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "An important self-supervised pretraining paradigm - masked signal modeling (MSM), including BERT (Bugliarello et al., 2021), BEiT (Bao et al., 2022), and masked autoencoders (MAE) (He et al., 2022), has recently been adopted to 3D domains. MSM",
107
+ "bbox": [
108
+ 169,
109
+ 762,
110
+ 483,
111
+ 845
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "image",
117
+ "img_path": "images/55c321c86e53d216a78bf278b07770649f082faae200f83c0ec98bc5a43b0908.jpg",
118
+ "image_caption": [
119
+ "Figure 1: Comparison of standard Point-MAE and our proposed method. Unlike standard Point-MAE that uses masked points as the prediction target, our method use a novel attention-based decoder to leverage masked points as an additional input and infer the corresponding features."
120
+ ],
121
+ "image_footnote": [],
122
+ "bbox": [
123
+ 496,
124
+ 566,
125
+ 821,
126
+ 758
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "has a simple setup: a randomly-masked input is fed to the encoder, and a decoder strives to recover the signal at the masked region. MSM is highly scalable and exhibits superior performance in many downstream vision and NLP tasks, outperforming their fully supervised equivalents. Additionally, it does not require extensive augmentation, which is essential and critical to another self-supervised",
133
+ "bbox": [
134
+ 169,
135
+ 845,
136
+ 823,
137
+ 902
138
+ ],
139
+ "page_idx": 0
140
+ },
141
+ {
142
+ "type": "header",
143
+ "text": "Published as a conference paper at ICLR 2024",
144
+ "bbox": [
145
+ 171,
146
+ 32,
147
+ 478,
148
+ 47
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "page_footnote",
154
+ "text": "*Part of the work done when interning at Microsoft Research Asia.",
155
+ "bbox": [
156
+ 189,
157
+ 909,
158
+ 588,
159
+ 922
160
+ ],
161
+ "page_idx": 0
162
+ },
163
+ {
164
+ "type": "page_number",
165
+ "text": "1",
166
+ "bbox": [
167
+ 493,
168
+ 948,
169
+ 503,
170
+ 959
171
+ ],
172
+ "page_idx": 0
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "pretraining paradigm — contrastive learning. In images, a mask refers to a randomly selected portion of the pixels, and the pixel colors or other pixel features in the masked region are to be reconstructed by the decoder.",
177
+ "bbox": [
178
+ 169,
179
+ 103,
180
+ 823,
181
+ 147
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "text",
187
+ "text": "For 3D point clouds, the PointBERT approach (Yu et al., 2022) masks point patches and recovers patch tokens that are pretrained by a point cloud Tokenizer. As reconstruction features are associated with patches of points, the learned features at the point level are less competitive. MAE-based pretraining schemes (Pang et al., 2022; Hess et al., 2022; Zhang et al., 2022; Liu et al., 2022) tackle this problem by point-wise pretext tasks. However, their decoders are designed to recover the positions of the masked points in Cartesian coordinates or occupancy formats (Fig. 1-left). These designs make an intrinsic difference from 2D MSMs, where there is no need to recover masked pixel locations. This key difference makes MSM pay more attention to capturing the irregular and possibly noisy point distribution and ignore the intrinsic surface features associated with points, which are essential for 3D point cloud analysis.",
188
+ "bbox": [
189
+ 169,
190
+ 152,
191
+ 826,
192
+ 292
193
+ ],
194
+ "page_idx": 1
195
+ },
196
+ {
197
+ "type": "text",
198
+ "text": "In the presented work, we propose to recover intrinsic point features, i.e., point normals, and surface variations (Pauly et al., 2002) at masked points, where point normals are first-order surface features and surface variations are related to local curvature properties. We clearly demonstrate that the recovery of high-order surface point features, not point locations, is the key to improving 3D MSM performance. Learning to reconstruct high-order geometric features forces the encoder to extract distinctive and representative features robustly that may not be captured by learning to reconstruct point positions alone. Our study justifies the importance of designing signal recovery for 3D MSMs. It aligns 3D MSM learning with MSM development in vision, where feature modeling plays a critical role (Wei et al., 2022).",
199
+ "bbox": [
200
+ 169,
201
+ 299,
202
+ 826,
203
+ 425
204
+ ],
205
+ "page_idx": 1
206
+ },
207
+ {
208
+ "type": "text",
209
+ "text": "To recover point signals, we design a practical attention-based decoder. This new decoder takes masked points as queries, and stacks several transformer blocks. In each block, self-attention is used to propagate context features over the masked points and cross-attention is applied to fabricate the point features with the encoder's output (As shown in Fig. 1-right and Fig. 2). This design is separable from the encoder design. Therefore, common 3D encoders, such as sparse CNNs, point-based networks, and transformer-based networks, can all be adopted to strengthen the pretraining capability. Another benefit of this decoder design is that the masked point positions are only accessible by the decoder, thus avoiding leakage of positional information in the early stage of the network, as suggested by (Pang et al., 2022).",
210
+ "bbox": [
211
+ 169,
212
+ 431,
213
+ 826,
214
+ 556
215
+ ],
216
+ "page_idx": 1
217
+ },
218
+ {
219
+ "type": "text",
220
+ "text": "We conducted extensive ablation studies to verify the efficacy of our masked feature design and decoder. Substantial improvements over previous approaches and the generalization ability of our pretraining approach are demonstrated on various downstream tasks, including 3D shape classification, 3D shape part segmentation, and 3D object detection. We hope that our study can stimulate future research on designing strong MAE-based 3D backbones.",
221
+ "bbox": [
222
+ 169,
223
+ 563,
224
+ 826,
225
+ 633
226
+ ],
227
+ "page_idx": 1
228
+ },
229
+ {
230
+ "type": "text",
231
+ "text": "We summarize the contributions of our paper as follows:",
232
+ "bbox": [
233
+ 171,
234
+ 638,
235
+ 545,
236
+ 654
237
+ ],
238
+ "page_idx": 1
239
+ },
240
+ {
241
+ "type": "list",
242
+ "sub_type": "text",
243
+ "list_items": [
244
+ "- We propose a novel masked autoencoding method for 3D self-supervised pretraining that predicts intrinsic point features at masked points instead of their positions.",
245
+ "- We introduce a unique attention-based decoder that can generate point features without relying on any particular encoder architecture.",
246
+ "- Our experiments demonstrate that restoring intrinsic point features is superior to point location recovery in terms of Point cloud MAE, and we achieve state-of-the-art performance on various downstream tasks."
247
+ ],
248
+ "bbox": [
249
+ 178,
250
+ 659,
251
+ 821,
252
+ 757
253
+ ],
254
+ "page_idx": 1
255
+ },
256
+ {
257
+ "type": "text",
258
+ "text": "2 RELATED WORK",
259
+ "text_level": 1,
260
+ "bbox": [
261
+ 171,
262
+ 781,
263
+ 346,
264
+ 796
265
+ ],
266
+ "page_idx": 1
267
+ },
268
+ {
269
+ "type": "text",
270
+ "text": "Self-supervised pretraining in 3D Self-supervised pretraining is an active research topic in machine learning (Liu et al., 2021). The early adoption of self-supervised pretraining for 3D is to use autoencoders (Yang et al., 2018; Yan et al., 2022) and generative adversarial networks (Wu et al., 2016) to learn shape-level features, mainly for shape classification and retrieval tasks. Other self-supervised pretext tasks, such as clustering and registration, are also developed for 3D pretraining. Later, due to the great ability to learn features at both the instance and pixel levels in a self-supervised manner, contrastive learning (Wu et al., 2018; Grill et al., 2020; He et al., 2020; Brown et al., 2020; Chen & He, 2021; Yan et al., 2023) was introduced into the 3D domains to extract distinctive instance",
271
+ "bbox": [
272
+ 169,
273
+ 811,
274
+ 826,
275
+ 925
276
+ ],
277
+ "page_idx": 1
278
+ },
279
+ {
280
+ "type": "header",
281
+ "text": "Published as a conference paper at ICLR 2024",
282
+ "bbox": [
283
+ 171,
284
+ 32,
285
+ 478,
286
+ 47
287
+ ],
288
+ "page_idx": 1
289
+ },
290
+ {
291
+ "type": "page_number",
292
+ "text": "2",
293
+ "bbox": [
294
+ 493,
295
+ 946,
296
+ 504,
297
+ 959
298
+ ],
299
+ "page_idx": 1
300
+ },
301
+ {
302
+ "type": "image",
303
+ "img_path": "images/7114f81273cd4cdfb37e6c4464f6ff3f9f24793ab477c9ad4c7d89fbb323d751.jpg",
304
+ "image_caption": [
305
+ "Figure 2: The pretraining pipeline of our masked 3D feature prediction approach. Given a complete input point cloud, we first separate it into masked points and unmasked points (We use cube mask here for better visualization). We take unmasked points as the encoder input and output the block feature pairs. Then the decoder takes the block feature pairs and query points(i.e., masked points) as the input, and predicts the per-query-point features."
306
+ ],
307
+ "image_footnote": [],
308
+ "bbox": [
309
+ 173,
310
+ 99,
311
+ 828,
312
+ 224
313
+ ],
314
+ "page_idx": 2
315
+ },
316
+ {
317
+ "type": "text",
318
+ "text": "and point-wise features for various downstream tasks (Wang et al., 2021b; Xie et al., 2020; Hou et al., 2021; Zhang et al., 2021). However, contrastive learning requires data augmentation heavily to form positive or negative pairs for effective feature learning.",
319
+ "bbox": [
320
+ 169,
321
+ 310,
322
+ 825,
323
+ 354
324
+ ],
325
+ "page_idx": 2
326
+ },
327
+ {
328
+ "type": "text",
329
+ "text": "Masked signal modeling in 3D Masked signal modeling using transformer-based architectures for self-supervised learning (SSL) has shown great simplicity and superior performance. PointBERT (Yu et al., 2022) and PointMAE (Pang et al., 2022) are two such works that inherit from this idea. PointBERT partitions a point cloud into patches and trains a transformer-based autoencoder to recover masked patches' tokens. In contrast, PointMAE directly reconstructs point patches without costly tokenizer training, using Chamfer distance as the reconstruction loss. Other works like (Zhang et al., 2022; Liu et al., 2022) and (Hess et al., 2022) explore different strategies for point cloud reconstruction or classification with masking. As discussed in Sec. 1, the pretext tasks of most previous works focus only on masked point locations.",
330
+ "bbox": [
331
+ 169,
332
+ 364,
333
+ 826,
334
+ 491
335
+ ],
336
+ "page_idx": 2
337
+ },
338
+ {
339
+ "type": "text",
340
+ "text": "Signal recovery in masked autoencoders Masked autoencoders for vision pretraining typically use raw color information in masked pixels as the target signal (He et al., 2022). However, Wei et al. (Wei et al., 2022) have found that using alternative image features, such as HOG descriptors, tokenizer features, and features from other unsupervised and supervised pretrained networks, can improve network performance and efficiency. In contrast, existing 3D MAE methods have limited use of point features and struggle with predicting the location of masked points. Our approach focuses on feature recovery rather than position prediction, selecting representative 3D local features such as point normals and surface variation (Pauly et al., 2002) as target features to demonstrate their efficacy. Our study allows for leveraging more advanced 3D features in 3D masked autoencoders, while further exploration of other types of 3D features (Laga et al., 2018) is left for future work.",
341
+ "bbox": [
342
+ 169,
343
+ 501,
344
+ 826,
345
+ 642
346
+ ],
347
+ "page_idx": 2
348
+ },
349
+ {
350
+ "type": "text",
351
+ "text": "3 MASKED 3D FEATURE PREDICTION",
352
+ "text_level": 1,
353
+ "bbox": [
354
+ 171,
355
+ 662,
356
+ 504,
357
+ 679
358
+ ],
359
+ "page_idx": 2
360
+ },
361
+ {
362
+ "type": "text",
363
+ "text": "In this section, we present our masked 3D feature prediction approach for self-supervised point cloud pretraining. Our network design follows the masked autoencoder paradigm: a 3D encoder takes a point cloud whose points are randomly masked as input, and a decoder is responsible for reconstructing the predefined features at the masked points. The network architecture is depicted in Fig. 2. In the following sections, we first introduce the masking strategy and 3D masked feature modeling in Sec. 3.1 and 3.2, and then present our encoder and decoder design in Sec. 3.3 and 3.4. Here, the key ingredients of our approach are the design of prediction targets and the decoder, which govern the quality of the learned features.",
364
+ "bbox": [
365
+ 169,
366
+ 696,
367
+ 826,
368
+ 808
369
+ ],
370
+ "page_idx": 2
371
+ },
372
+ {
373
+ "type": "text",
374
+ "text": "3.1 3D MASKING",
375
+ "text_level": 1,
376
+ "bbox": [
377
+ 171,
378
+ 827,
379
+ 310,
380
+ 840
381
+ ],
382
+ "page_idx": 2
383
+ },
384
+ {
385
+ "type": "text",
386
+ "text": "We follow the masking strategy proposed by PointBERT (Yu et al., 2022) to mask out some portions of an input point cloud and feed it to the encoder. Denote the input point cloud as $\\mathcal{P} \\in \\mathbb{R}^{N \\times 3}$ , where $N$ is the number of points. We sample $K$ points using farthest point sampling (FPS). For each sample point, its $k$ -nearest neighbor points form a point patch. For a given mask ratio $m_r$ , $0 < m_r < 1$ , we randomly select $M$ patches and remove them from the input, where $M = \\min(\\lceil m_r \\cdot K \\rceil, K - 1)$ .",
387
+ "bbox": [
388
+ 169,
389
+ 854,
390
+ 823,
391
+ 926
392
+ ],
393
+ "page_idx": 2
394
+ },
395
+ {
396
+ "type": "header",
397
+ "text": "Published as a conference paper at ICLR 2024",
398
+ "bbox": [
399
+ 173,
400
+ 32,
401
+ 478,
402
+ 47
403
+ ],
404
+ "page_idx": 2
405
+ },
406
+ {
407
+ "type": "page_number",
408
+ "text": "3",
409
+ "bbox": [
410
+ 493,
411
+ 948,
412
+ 503,
413
+ 959
414
+ ],
415
+ "page_idx": 2
416
+ },
417
+ {
418
+ "type": "text",
419
+ "text": "In the following, the masked points and the remaining points are denoted by $\\mathcal{P}_M$ and $\\mathcal{P}_U$ , respectively.",
420
+ "bbox": [
421
+ 169,
422
+ 103,
423
+ 826,
424
+ 119
425
+ ],
426
+ "page_idx": 3
427
+ },
428
+ {
429
+ "type": "text",
430
+ "text": "3.2 TARGET FEATURE DESIGN",
431
+ "text_level": 1,
432
+ "bbox": [
433
+ 171,
434
+ 133,
435
+ 401,
436
+ 148
437
+ ],
438
+ "page_idx": 3
439
+ },
440
+ {
441
+ "type": "text",
442
+ "text": "As argued in Sec. 1, we advocate against using point locations as the reconstructed target. We choose to reconstruct normal and surface variation at each point, which reflect differential surface properties.",
443
+ "bbox": [
444
+ 169,
445
+ 160,
446
+ 826,
447
+ 189
448
+ ],
449
+ "page_idx": 3
450
+ },
451
+ {
452
+ "type": "text",
453
+ "text": "On the other hand, our decoder design (to be introduced in Sec. 3.4) takes query points as input and output predicted point-wise features. Therefore, the decoder implicitly carries positional information for learning meaningful features through the encoder.",
454
+ "bbox": [
455
+ 169,
456
+ 188,
457
+ 485,
458
+ 272
459
+ ],
460
+ "page_idx": 3
461
+ },
462
+ {
463
+ "type": "text",
464
+ "text": "Given a point cloud, both point normal and surface variations are defined using local principal component analysis (PCA). We first define a covariance matrix $C_r$ over a local surface region around $\\mathbf{p}$ :",
465
+ "bbox": [
466
+ 169,
467
+ 277,
468
+ 486,
469
+ 349
470
+ ],
471
+ "page_idx": 3
472
+ },
473
+ {
474
+ "type": "equation",
475
+ "text": "\n$$\nC _ {r} := \\frac {\\int_ {\\mathbf {x} \\in S \\cap \\mathbb {S} _ {r} (\\mathbf {p})} (\\mathbf {p} - \\mathbf {x}) (\\mathbf {p} - \\mathbf {x}) ^ {T} d \\mathbf {x}}{\\int_ {\\mathbf {x} \\in S \\cap \\mathbb {S} _ {r} (\\mathbf {p})} \\mathbf {1} \\cdot d \\mathbf {x}}, \\tag {1}\n$$\n",
476
+ "text_format": "latex",
477
+ "bbox": [
478
+ 179,
479
+ 354,
480
+ 485,
481
+ 396
482
+ ],
483
+ "page_idx": 3
484
+ },
485
+ {
486
+ "type": "image",
487
+ "img_path": "images/ca22febfd7507a00564c923c904ee15bcba2b6942297b669dc9fbe73105ff1fe.jpg",
488
+ "image_caption": [
489
+ "Figure 3: Visualization of point features.. The point normal is color-coded by the normal vector. The surface variation is color-coded where white indicates low value and red indicates high value."
490
+ ],
491
+ "image_footnote": [],
492
+ "bbox": [
493
+ 501,
494
+ 205,
495
+ 821,
496
+ 335
497
+ ],
498
+ "page_idx": 3
499
+ },
500
+ {
501
+ "type": "text",
502
+ "text": "where $\\mathcal{S} \\cap \\mathbb{S}_r(\\mathbf{p})$ is the local surface region at",
503
+ "bbox": [
504
+ 169,
505
+ 401,
506
+ 485,
507
+ 416
508
+ ],
509
+ "page_idx": 3
510
+ },
511
+ {
512
+ "type": "text",
513
+ "text": "$\\mathbf{p}$ , restricted by a sphere centered at $\\mathbf{p}$ with radius $r$ . We set $r = 0.1$ in our case. The ablation details are shown in the supplement.",
514
+ "bbox": [
515
+ 169,
516
+ 415,
517
+ 823,
518
+ 445
519
+ ],
520
+ "page_idx": 3
521
+ },
522
+ {
523
+ "type": "text",
524
+ "text": "The normal $n(\\mathbf{p})$ at $\\mathbf{p}$ is estimated as the smallest eigenvector of $C_r$ . The sign of each normal is computed by using the approach of (Hoppe et al., 1992).",
525
+ "bbox": [
526
+ 169,
527
+ 450,
528
+ 823,
529
+ 479
530
+ ],
531
+ "page_idx": 3
532
+ },
533
+ {
534
+ "type": "text",
535
+ "text": "Surface variation (Pauly et al., 2002) at $\\mathbf{p}$ is denoted by $\\sigma_r(\\mathbf{p})$ , in the following form:",
536
+ "bbox": [
537
+ 169,
538
+ 484,
539
+ 733,
540
+ 501
541
+ ],
542
+ "page_idx": 3
543
+ },
544
+ {
545
+ "type": "equation",
546
+ "text": "\n$$\n\\sigma_ {r} (\\mathbf {p}) = \\frac {\\lambda_ {1}}{\\lambda_ {1} + \\lambda_ {2} + \\lambda_ {3}}, \\tag {2}\n$$\n",
547
+ "text_format": "latex",
548
+ "bbox": [
549
+ 413,
550
+ 507,
551
+ 825,
552
+ 539
553
+ ],
554
+ "page_idx": 3
555
+ },
556
+ {
557
+ "type": "text",
558
+ "text": "where $\\lambda_1 \\leq \\lambda_2 \\leq \\lambda_3$ are the eigenvalues of $C_r$ . Surface variation is a geometric feature that measures the local derivation at point $\\mathbf{p}$ in a neighborhood of size $r$ on a given surface $S$ . Its original and modified versions have been used as a robust feature descriptor for a variety of shape analysis and processing tasks, such as saliency extraction (Pauly & Gross, 2001), curved feature extraction (Pauly et al., 2003), shape segmentation (Huang et al., 2006; Yan et al., 2021), and shape simplification (Pauly et al., 2002).",
559
+ "bbox": [
560
+ 169,
561
+ 542,
562
+ 823,
563
+ 628
564
+ ],
565
+ "page_idx": 3
566
+ },
567
+ {
568
+ "type": "text",
569
+ "text": "In the limit, i.e., when $r \\to 0$ , $\\sigma_r(\\mathbf{p})$ is related to the mean curvature (Clarenz et al., 2004). By varying the radii of $\\mathbb{S}_r$ , multiscale surface variation descriptors can be constructed. In our work, we chose only single-scale surface variation for simplicity.",
570
+ "bbox": [
571
+ 169,
572
+ 633,
573
+ 826,
574
+ 678
575
+ ],
576
+ "page_idx": 3
577
+ },
578
+ {
579
+ "type": "text",
580
+ "text": "Although both surface normal and surface variation are derived from local PCA, they are complementary to each other in the sense that surface normal carries first-order differential property while surface variation carries second-order differential property due to its relation to mean curvature. We visualize both features in Fig. 3 and show more examples in supplement. In Sec. 4.3, we show that reconstructing surface normal and surface variation leads to better learned features than reconstructing one of them.",
581
+ "bbox": [
582
+ 169,
583
+ 683,
584
+ 826,
585
+ 767
586
+ ],
587
+ "page_idx": 3
588
+ },
589
+ {
590
+ "type": "text",
591
+ "text": "Loss function Point normals and surface variations represent first- and second-order surface properties. Their value intervals are also bounded: surface normal has unit length; surface variation is non-negative and not greater than $\\frac{1}{3}$ . Their value-bounded properties are suitable for easy minimizing the deviation from the prediction to their ground truths, compared to using unbounded features such as curvatures. We denote the point normals and surface variations of $\\mathcal{P}_M$ by $\\mathcal{N}_M \\in \\mathbb{R}^{M \\times 3}$ and $\\mathcal{V}_M \\in \\mathbb{R}^M$ , respectively. The loss function for pretraining the masked autoencoders is composed of two terms:",
592
+ "bbox": [
593
+ 169,
594
+ 779,
595
+ 826,
596
+ 875
597
+ ],
598
+ "page_idx": 3
599
+ },
600
+ {
601
+ "type": "equation",
602
+ "text": "\n$$\nL _ {n} = \\left\\| \\mathcal {N} _ {M} - \\widehat {\\mathcal {N}} _ {M} \\right\\| _ {2} ^ {2}; \\tag {3}\n$$\n",
603
+ "text_format": "latex",
604
+ "bbox": [
605
+ 421,
606
+ 880,
607
+ 825,
608
+ 898
609
+ ],
610
+ "page_idx": 3
611
+ },
612
+ {
613
+ "type": "equation",
614
+ "text": "\n$$\nL _ {v} = \\left\\| \\mathcal {V} _ {M} - \\widehat {\\mathcal {V}} _ {M} \\right\\| _ {1}; \\tag {4}\n$$\n",
615
+ "text_format": "latex",
616
+ "bbox": [
617
+ 424,
618
+ 902,
619
+ 825,
620
+ 921
621
+ ],
622
+ "page_idx": 3
623
+ },
624
+ {
625
+ "type": "header",
626
+ "text": "Published as a conference paper at ICLR 2024",
627
+ "bbox": [
628
+ 171,
629
+ 32,
630
+ 478,
631
+ 47
632
+ ],
633
+ "page_idx": 3
634
+ },
635
+ {
636
+ "type": "page_number",
637
+ "text": "4",
638
+ "bbox": [
639
+ 491,
640
+ 948,
641
+ 504,
642
+ 959
643
+ ],
644
+ "page_idx": 3
645
+ },
646
+ {
647
+ "type": "text",
648
+ "text": "where $\\widehat{\\mathcal{N}}_M$ and $\\widehat{\\nu}_M$ are the predicted versions of $\\mathcal{N}_M$ and $\\nu_{M}$ , respectively. The total loss function $L = \\lambda_{1}L_{n} + \\lambda_{2}L_{v}$ , where $\\lambda_{1} = 1, \\lambda_{2} = 1$ in our case.",
649
+ "bbox": [
650
+ 169,
651
+ 102,
652
+ 823,
653
+ 133
654
+ ],
655
+ "page_idx": 4
656
+ },
657
+ {
658
+ "type": "text",
659
+ "text": "3.3 ENCODER DESIGN",
660
+ "text_level": 1,
661
+ "bbox": [
662
+ 171,
663
+ 148,
664
+ 346,
665
+ 162
666
+ ],
667
+ "page_idx": 4
668
+ },
669
+ {
670
+ "type": "text",
671
+ "text": "Unlike most MAE-based approaches that are limited to ViT-based encoders, our approach is not restricted to any specific type of encoder. Common 3D encoders for point clouds are all supported, as long as the encoder outputs a set of learned features bind to spatial blocks, where spatial blocks could be point patches used for ViT-like transformer encoders (Yu et al., 2022; Pang et al., 2022; Liu et al., 2022; Zhang et al., 2022), set abstractions used by PointNet++-like encoders (Qi et al., 2017b; Qian et al., 2022), or coarse voxels used by sparse CNN-based encoders (Wang et al., 2017; Graham et al., 2018; Choy et al., 2019).",
672
+ "bbox": [
673
+ 169,
674
+ 174,
675
+ 826,
676
+ 272
677
+ ],
678
+ "page_idx": 4
679
+ },
680
+ {
681
+ "type": "text",
682
+ "text": "In the following, we briefly review these typical encoders and their adaption for our pretraining.",
683
+ "bbox": [
684
+ 169,
685
+ 279,
686
+ 802,
687
+ 295
688
+ ],
689
+ "page_idx": 4
690
+ },
691
+ {
692
+ "type": "text",
693
+ "text": "ViT-based encoders These encoders first embed point patches via PointNet (Qi et al., 2017a), then send these patch tokens to a standard transformer that includes several multihead self-attention layers and feedforward layers. The transformer outputs the fabricated token features, corresponding to every input point patch. The token feature $\\mathbf{f}_i$ and the patch center $\\mathbf{c}_i$ form a block feature pair $B_{i} = \\{\\mathbf{f}_{i},\\mathbf{c}_{i}\\}$ which is needed by our decoder. Here we can call $\\mathbf{f}_i$ block feature and $\\mathbf{c}_i$ block centroid.",
694
+ "bbox": [
695
+ 169,
696
+ 305,
697
+ 823,
698
+ 377
699
+ ],
700
+ "page_idx": 4
701
+ },
702
+ {
703
+ "type": "text",
704
+ "text": "PointNet++-like encoders In these encoders, the network features are aggregated through a number of set abstraction levels. We take the learned features and the centroids at the coarsest set abstractions as block feature pairs.",
705
+ "bbox": [
706
+ 169,
707
+ 387,
708
+ 823,
709
+ 431
710
+ ],
711
+ "page_idx": 4
712
+ },
713
+ {
714
+ "type": "text",
715
+ "text": "Sparse CNN-based encoders These encoders apply 3D convolution on sparse voxels from the finest level to the coarsest level. Multiple convolution layers and resblocks are commonly used. We interpolate the coarse voxel features at the centroids of the unmasked patches and use these interpolated features and the patch centroids to form our block feature pairs.",
716
+ "bbox": [
717
+ 169,
718
+ 441,
719
+ 826,
720
+ 500
721
+ ],
722
+ "page_idx": 4
723
+ },
724
+ {
725
+ "type": "text",
726
+ "text": "As suggested by (Pang et al., 2022), the early leaking of masked point information to the network could jeopardize feature learning. We adopt this suggestion: feed the unmasked points to the encoder only, and leave the masked points to the decoder.",
727
+ "bbox": [
728
+ 169,
729
+ 503,
730
+ 823,
731
+ 547
732
+ ],
733
+ "page_idx": 4
734
+ },
735
+ {
736
+ "type": "text",
737
+ "text": "3.4 DECODER DESIGN",
738
+ "text_level": 1,
739
+ "bbox": [
740
+ 171,
741
+ 563,
742
+ 346,
743
+ 577
744
+ ],
745
+ "page_idx": 4
746
+ },
747
+ {
748
+ "type": "text",
749
+ "text": "Decoder structure We design an attention-based decoder to restore the target features at masked regions. The decoder takes the block feature pairs $\\mathcal{B} \\coloneqq \\{B_i\\}_{i=1}^b$ from the encoder and a query point set $\\mathcal{Q}$ , i.e., the masked point set $\\mathcal{P}_M$ . It is composed of a stack of $l$ transformer blocks, where $l = 4$ in our case (See Fig. 2). Each block contains a self-attention layer and a cross-attention layer. The self-attention layer takes the query points and their positional embeddings as input and outputs the per-query point features, denoted by $S^{in}$ . Then $S^{in}$ and the encoder block features $\\mathcal{B}$ are passed into the cross-attention layer, where $S^{in}$ serves as attention query, the block features serve as attention key and value, and the block centroids are the positional embedding of the block features. The output per-point features from the last block go through an MLP head to predict the target features at the query points.",
750
+ "bbox": [
751
+ 169,
752
+ 595,
753
+ 826,
754
+ 734
755
+ ],
756
+ "page_idx": 4
757
+ },
758
+ {
759
+ "type": "text",
760
+ "text": "Efficacy of self-attention layers At first glance, it is sufficient to use cross-attention layers only for predicting per-point features. The recent masked discrimination work (Liu et al., 2022) obeys this intuition for its decoder design, no information exchanged between different query points. Instead, we introduce the self-attention layer to propagate information over query points and use multiple attention blocks to strengthen the mutual relationship progressively. We found that our design significantly improves feature learning, as verified by our ablation study (See Sec. 4.3).",
761
+ "bbox": [
762
+ 169,
763
+ 744,
764
+ 826,
765
+ 830
766
+ ],
767
+ "page_idx": 4
768
+ },
769
+ {
770
+ "type": "text",
771
+ "text": "Supporting of various encoders In the above design, the decoder needs block feature pairs only from the encoder, thus having great potential to leverage various encoder structures, not limited to ViT-based transformer structures. This advantage is verified by our experiments (See Sec. 4).",
772
+ "bbox": [
773
+ 169,
774
+ 840,
775
+ 823,
776
+ 883
777
+ ],
778
+ "page_idx": 4
779
+ },
780
+ {
781
+ "type": "text",
782
+ "text": "Feature reconstruction versus position reconstruction Note that our decoder and loss design do not explicitly model point positions, which are zero-order surface properties complementary to",
783
+ "bbox": [
784
+ 169,
785
+ 895,
786
+ 823,
787
+ 925
788
+ ],
789
+ "page_idx": 4
790
+ },
791
+ {
792
+ "type": "header",
793
+ "text": "Published as a conference paper at ICLR 2024",
794
+ "bbox": [
795
+ 171,
796
+ 32,
797
+ 478,
798
+ 47
799
+ ],
800
+ "page_idx": 4
801
+ },
802
+ {
803
+ "type": "page_number",
804
+ "text": "5",
805
+ "bbox": [
806
+ 493,
807
+ 948,
808
+ 504,
809
+ 959
810
+ ],
811
+ "page_idx": 4
812
+ },
813
+ {
814
+ "type": "text",
815
+ "text": "surface normals and surface variations. Instead, the decoder predicts feature values at query points. Therefore, the zero-order positional information is already encoded implicitly. This explains why our approach is superior to baseline approaches that reconstruct point positions for feature learning (See Sec. 4.2).",
816
+ "bbox": [
817
+ 169,
818
+ 103,
819
+ 826,
820
+ 160
821
+ ],
822
+ "page_idx": 5
823
+ },
824
+ {
825
+ "type": "text",
826
+ "text": "Query point selection Due to the quadratic complexity of self-attention, the computational cost for a full query set could be much higher. In practice, we can randomly choose a point subset from $\\mathcal{P}_M$ as the query set during training. By default, we use all masked points as queries.",
827
+ "bbox": [
828
+ 169,
829
+ 171,
830
+ 823,
831
+ 214
832
+ ],
833
+ "page_idx": 5
834
+ },
835
+ {
836
+ "type": "text",
837
+ "text": "4 EXPERIMENT ANALYSIS",
838
+ "text_level": 1,
839
+ "bbox": [
840
+ 171,
841
+ 231,
842
+ 410,
843
+ 247
844
+ ],
845
+ "page_idx": 5
846
+ },
847
+ {
848
+ "type": "text",
849
+ "text": "We conducted a series of experiments and ablation studies to validate the efficacy and superiority of our masked 3D feature prediction approach, in short MaskFeat3D, for point cloud pretraining.",
850
+ "bbox": [
851
+ 169,
852
+ 263,
853
+ 826,
854
+ 292
855
+ ],
856
+ "page_idx": 5
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "4.1 EXPERIMENT SETUP",
861
+ "text_level": 1,
862
+ "bbox": [
863
+ 171,
864
+ 306,
865
+ 359,
866
+ 320
867
+ ],
868
+ "page_idx": 5
869
+ },
870
+ {
871
+ "type": "text",
872
+ "text": "Pretraining dataset We choose ShapeNet (Chang et al., 2015) dataset for our pretraining, following the practice of PointBERT (Yu et al., 2022) and previous 3D MAE-based approaches (Pang et al., 2022; Zhang et al., 2022; Liu et al., 2022). ShapeNet (Chang et al., 2015) contains 57748 synthetic 3D shapes from 55 categories. We sample 50000 points uniformly on each shape and select 128 nearest points from them for each point in the point cloud for constructing the local region to approximate surface variation. During pretraining, $N = 2048$ points are randomly sampled to create the point cloud.",
873
+ "bbox": [
874
+ 169,
875
+ 333,
876
+ 826,
877
+ 431
878
+ ],
879
+ "page_idx": 5
880
+ },
881
+ {
882
+ "type": "text",
883
+ "text": "Network training We integrated different encoders with our masked 3D feature prediction approach, including the ViT-based transformer used by (Pang et al., 2022), sparse-CNN-based encoder (Choy et al., 2019), and PointNeXt encoder (Qian et al., 2022) which is an advanced version of PointNet++. We implemented all pretraining models in PyTorch and used AdamW optimizer with $10^{-4}$ weight decay. We use PointBERT's masking strategy for ShapeNet pretraining. We set $K = 128$ in FPS, $k = 32$ -nearest points to form the point patch, and the best masking ratio is $60\\%$ empirically. The number of transformer blocks in the decoder is 4. The learning rates of the encoder and the decoder are set to $10^{-3}$ and $10^{-4}$ , respectively. Standard data augmentation such as rotation, scaling, and translation are employed. All models were trained with 300 epochs on eight 16GB Nvidia V100 GPUs. The total batch size is 64.",
884
+ "bbox": [
885
+ 169,
886
+ 443,
887
+ 826,
888
+ 582
889
+ ],
890
+ "page_idx": 5
891
+ },
892
+ {
893
+ "type": "text",
894
+ "text": "Downstream Tasks We choose shape classification and shape part segmentation tasks to validate the efficacy and generalizability of our pretrained networks.",
895
+ "bbox": [
896
+ 169,
897
+ 593,
898
+ 823,
899
+ 622
900
+ ],
901
+ "page_idx": 5
902
+ },
903
+ {
904
+ "type": "list",
905
+ "sub_type": "text",
906
+ "list_items": [
907
+ "- Shape classification: The experiments were carried out on two different datasets: ModelNet40 (Wu et al., 2015) and ScanObjectNN (Uy et al., 2019). ModelNet40 is a widely used synthetic dataset that comprises 40 classes and contains 9832 training objects and 2468 test objects. In contrast, ScanObjectNN is a real-world scanned dataset that includes approximately 15000 actual scanned objects from 15 classes. As the domain gap between ShapeNet and ScanObjectNN is larger than that between ShapeNet and ModelNet40, the evaluation on ScanObjectNN is a good measure of the generalizability of pretrained networks.",
908
+ "- Shape part segmentation ShapeNetPart Dataset (Yi et al., 2016) contains 16880 models from 16 shape categories, and each model has $2\\sim 6$ parts. Following the standard evaluation protocol (Qi et al., 2017b), 2048 points are sampled on each shape. For evaluation, we report per-class mean IoU (cls. mIoU) and mean IoU averaged over all test instances (ins. mIoU)."
909
+ ],
910
+ "bbox": [
911
+ 178,
912
+ 635,
913
+ 825,
914
+ 792
915
+ ],
916
+ "page_idx": 5
917
+ },
918
+ {
919
+ "type": "text",
920
+ "text": "The training-and-test split of the above tasks follows existing works. For these downstream tasks, we employ the task-specific decoders proposed by PointMAE (Pang et al., 2022) and reload the pretrained weights for the encoder. Training details are provided in the supplemental material.",
921
+ "bbox": [
922
+ 169,
923
+ 806,
924
+ 826,
925
+ 849
926
+ ],
927
+ "page_idx": 5
928
+ },
929
+ {
930
+ "type": "text",
931
+ "text": "4.2 EFFICACY OF 3D FEATURE PREDICTION",
932
+ "text_level": 1,
933
+ "bbox": [
934
+ 171,
935
+ 868,
936
+ 493,
937
+ 883
938
+ ],
939
+ "page_idx": 5
940
+ },
941
+ {
942
+ "type": "text",
943
+ "text": "The advantages in learning discriminative features by our masked feature prediction approach are verified by its superior performance in downstream tasks.",
944
+ "bbox": [
945
+ 169,
946
+ 895,
947
+ 823,
948
+ 925
949
+ ],
950
+ "page_idx": 5
951
+ },
952
+ {
953
+ "type": "header",
954
+ "text": "Published as a conference paper at ICLR 2024",
955
+ "bbox": [
956
+ 171,
957
+ 32,
958
+ 478,
959
+ 47
960
+ ],
961
+ "page_idx": 5
962
+ },
963
+ {
964
+ "type": "page_number",
965
+ "text": "6",
966
+ "bbox": [
967
+ 493,
968
+ 948,
969
+ 504,
970
+ 960
971
+ ],
972
+ "page_idx": 5
973
+ },
974
+ {
975
+ "type": "table",
976
+ "img_path": "images/bda9e2180cce8dcb7d5b48f33cea5a2fa12bedaa1010429f9b8a3bedd3da694c.jpg",
977
+ "table_caption": [],
978
+ "table_footnote": [],
979
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">ScanObjectNN</td><td colspan=\"2\">ShapeNetPart</td><td colspan=\"2\">ShapeNetPart(1% labels)</td></tr><tr><td>OBJ-BG</td><td>OBJ-ONLY</td><td>PB-T50-RS</td><td>ins. mIoU</td><td>cls. mIoU</td><td>ins. mIoU</td><td>cls. mIoU</td></tr><tr><td>PointViT† Yu et al. (2022)</td><td>79.9</td><td>80.6</td><td>77.2</td><td>85.1</td><td>83.4</td><td>77.6</td><td>72.2</td></tr><tr><td>PointBERT Yu et al. (2022)</td><td>87.4</td><td>88.1</td><td>83.1</td><td>85.6</td><td>84.1</td><td>79.2</td><td>73.9</td></tr><tr><td>MaskDiscr Liu et al. (2022)</td><td>89.7</td><td>89.3</td><td>84.3</td><td>86.0</td><td>84.4</td><td>78.8</td><td>72.3</td></tr><tr><td>MaskSurfel Zhang et al. (2022)</td><td>91.2</td><td>89.2</td><td>85.7</td><td>86.1</td><td>84.4</td><td>-</td><td>-</td></tr><tr><td>PointMAE Pang et al. (2022)</td><td>90.0</td><td>88.3</td><td>85.2</td><td>86.1</td><td>-</td><td>79.1</td><td>74.4</td></tr><tr><td>MaskFeat3D (PointViT)</td><td>91.7(91.6)</td><td>90.0(89.6)</td><td>87.7(87.5)</td><td>86.3(86.3)</td><td>84.9(84.8)</td><td>80.0(79.9)</td><td>75.1(75.0)</td></tr></table>",
980
+ "bbox": [
981
+ 174,
982
+ 99,
983
+ 823,
984
+ 196
985
+ ],
986
+ "page_idx": 6
987
+ },
988
+ {
989
+ "type": "table",
990
+ "img_path": "images/2e0aac85ce8fba0b00bff3d746c07bb4a62524120180a4b0ab399c656d0980b5.jpg",
991
+ "table_caption": [
992
+ "Table 1: Performance comparison of MAE-based approaches on downstream tasks. All the methods in the first section use the same transformer backbone architecture, PointViT. $\\dagger$ represents the from scratch results and all other methods represent the fine-tuning results using pretrained weights. The average result of 3 runs is given in brackets."
993
+ ],
994
+ "table_footnote": [],
995
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">ScanObjectNN</td><td colspan=\"2\">ShapeNetPart</td></tr><tr><td>OBJ-BG</td><td>OBJ-ONLY</td><td>PB-T50-RS</td><td>ins. mIoU</td><td>cls. mIoU</td></tr><tr><td>PointNet Qi et al. (2017a)</td><td>73.3</td><td>79.2</td><td>68.0</td><td>-</td><td>-</td></tr><tr><td>PointNet++ Qi et al. (2017b)</td><td>82.3</td><td>84.3</td><td>77.9</td><td>85.1</td><td>81.9</td></tr><tr><td>PointCNN Li et al. (2018)</td><td>86.1</td><td>85.5</td><td>78.5</td><td>86.1</td><td>84.6</td></tr><tr><td>DGCNN Wang et al. (2019)</td><td>82.8</td><td>86.2</td><td>78.1</td><td>85.2</td><td>82.3</td></tr><tr><td>MinkowskiNet Choy et al. (2019)</td><td>84.1</td><td>86.1</td><td>80.1</td><td>85.3</td><td>83.2</td></tr><tr><td>PointTransformer Zhao et al. (2021)</td><td>-</td><td>-</td><td>-</td><td>86.6</td><td>83.7</td></tr><tr><td>PointMLP Ma et al. (2022)</td><td>88.7</td><td>88.2</td><td>85.4</td><td>86.1</td><td>84.6</td></tr><tr><td>StratifiedTransformer Lai et al. (2022)</td><td>-</td><td>-</td><td>-</td><td>86.6</td><td>85.1</td></tr><tr><td>PointNeXt Qian et al. (2022)</td><td>91.9</td><td>91.0</td><td>88.1</td><td>87.1</td><td>84.7</td></tr><tr><td>MaskFeat3D (PointViT)</td><td>91.7(91.6)</td><td>90.0(89.6)</td><td>87.7(87.5)</td><td>86.3(86.3)</td><td>84.9(84.8)</td></tr><tr><td>MaskFeat3D (MinkowskiNet)</td><td>85.1(85.0)</td><td>87.0(86.7)</td><td>80.8(80.6)</td><td>85.6(85.5)</td><td>83.5(83.5)</td></tr><tr><td>MaskFeat3D (PointNeXt)</td><td>92.7(92.6)</td><td>92.0(91.9)</td><td>88.6(88.5)</td><td>87.4(87.4)</td><td>85.5(85.5)</td></tr></table>",
996
+ "bbox": [
997
+ 176,
998
+ 256,
999
+ 821,
1000
+ 429
1001
+ ],
1002
+ "page_idx": 6
1003
+ },
1004
+ {
1005
+ "type": "text",
1006
+ "text": "Table 2: Comparison with supervised methods. The average result of 3 runs is given in brackets.",
1007
+ "bbox": [
1008
+ 200,
1009
+ 433,
1010
+ 792,
1011
+ 448
1012
+ ],
1013
+ "page_idx": 6
1014
+ },
1015
+ {
1016
+ "type": "text",
1017
+ "text": "Comparison with MAE-based approaches We compare our approach with other MAE-based approaches that use the same encoder structure. Tab. 1 reports that: (1) the performance of all MAE-based methods surpasses their supervised baseline - PointViT; (2) our strategy of reconstructing point features instead of point positions yields significant improvements in ScannObjectNN classification, improving overall accuracy on the most challenging split, PB-T50-RS, from $85.7\\%$ (MaskSurfel) to $87.7\\%$ , and showing consistent improvements on other splits and ShapeNetPart segmentation.",
1018
+ "bbox": [
1019
+ 169,
1020
+ 462,
1021
+ 826,
1022
+ 547
1023
+ ],
1024
+ "page_idx": 6
1025
+ },
1026
+ {
1027
+ "type": "text",
1028
+ "text": "We also compare the performance of our approach with PointBERT (Yu et al., 2022), PointMAE (Pang et al., 2022), and MaskDiscr (Liu et al., 2022) on ShapeNetPart segmentation with less labeled data. In this experiment, we randomly select $1\\%$ labeled data from each category, and finetune the network with all selected data. The performance is reported in Tab. 1, which shows that using our pretrained network leads to much better performance than the baseline methods.",
1029
+ "bbox": [
1030
+ 169,
1031
+ 551,
1032
+ 826,
1033
+ 625
1034
+ ],
1035
+ "page_idx": 6
1036
+ },
1037
+ {
1038
+ "type": "text",
1039
+ "text": "Comparison with supervised approaches Compared with state-of-the-art supervised methods, our approach again achieves superior performance than most existing works as seen from Tab. 2, including PointNet++ (Qi et al., 2017b), PointCNN (Li et al., 2018), DGCNN (Wang et al., 2019), MinkowskiNet (Choy et al., 2019), PointTransformer (Zhao et al., 2021) and PointMLP (Ma et al., 2022). It is only inferior to the approaches that use advanced encoder structures such as stratified transformer (Lai et al., 2022) and PointNeXt (Qian et al., 2022).",
1040
+ "bbox": [
1041
+ 169,
1042
+ 633,
1043
+ 828,
1044
+ 720
1045
+ ],
1046
+ "page_idx": 6
1047
+ },
1048
+ {
1049
+ "type": "text",
1050
+ "text": "Encoder replacement To make a more fair comparison, we replaced the PointViT encoder with the PointNeXt's encoder, and retrained our pretraining network, denoted as MaskFeat3D (PointNeXt). From Tab. 2, we can see that our pretraining approach with this enhanced encoder can yield SOTA performance on all the downstream tasks, surpassing PointNeXt trained from scratch. We also used MinkowskiNet (Choy et al., 2019) as our pretraining encoder, the performance gain over MinkowskiNet trained from scratch is $+0.7\\%$ overall accuracy improvement on ScanObjectNN classification, and $+0.3\\%$ on ShapeNetPart segmentation. Please refer to the supplementary material for details.",
1051
+ "bbox": [
1052
+ 169,
1053
+ 729,
1054
+ 826,
1055
+ 843
1056
+ ],
1057
+ "page_idx": 6
1058
+ },
1059
+ {
1060
+ "type": "text",
1061
+ "text": "Few-shot Classification To perform few-shot classification on ModelNet40, we adopt the \"K-way N-shot\" settings as described in prior work (Wang et al., 2021a; Yu et al., 2022; Pang et al., 2022). Specifically, we randomly choose K out of the 40 available classes and sample N+20 3D shapes per class, with N shapes used for training and 20 for testing. We evaluate the performance of MaskFeat3D under four few-shot settings: 5-way 10-shot, 5-way 20-shot, 10-way 10-shot, and 10-way 20-shot. To",
1062
+ "bbox": [
1063
+ 169,
1064
+ 854,
1065
+ 828,
1066
+ 926
1067
+ ],
1068
+ "page_idx": 6
1069
+ },
1070
+ {
1071
+ "type": "header",
1072
+ "text": "Published as a conference paper at ICLR 2024",
1073
+ "bbox": [
1074
+ 173,
1075
+ 32,
1076
+ 478,
1077
+ 47
1078
+ ],
1079
+ "page_idx": 6
1080
+ },
1081
+ {
1082
+ "type": "page_number",
1083
+ "text": "7",
1084
+ "bbox": [
1085
+ 493,
1086
+ 946,
1087
+ 504,
1088
+ 959
1089
+ ],
1090
+ "page_idx": 6
1091
+ },
1092
+ {
1093
+ "type": "table",
1094
+ "img_path": "images/f1dfbd120226364edfad45596e5a27d1449d8e8f8f6e6cf87560449620a97d06.jpg",
1095
+ "table_caption": [],
1096
+ "table_footnote": [],
1097
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"2\">5-way</td><td colspan=\"2\">10-way</td></tr><tr><td>10-shot</td><td>20-shot</td><td>10-shot</td><td>20-shot</td></tr><tr><td>DGCNN†</td><td>31.6 ± 2.8</td><td>40.8 ± 4.6</td><td>19.9 ± 2.1</td><td>16.9 ± 1.5</td></tr><tr><td>OcCo</td><td>90.6 ± 2.8</td><td>92.5 ± 1.9</td><td>82.9 ± 1.3</td><td>86.5 ± 2.2</td></tr><tr><td>CrossPoint</td><td>92.5 ± 3.0</td><td>94.9 ± 2.1</td><td>83.6 ± 5.3</td><td>87.9 ± 4.2</td></tr><tr><td>Transformer†</td><td>87.8 ± 5.2</td><td>93.3 ± 4.3</td><td>84.6 ± 5.5</td><td>89.4 ± 6.3</td></tr><tr><td>OcCo</td><td>94.0 ± 3.6</td><td>95.9 ± 2.3</td><td>89.4 ± 5.1</td><td>92.4 ± 4.6</td></tr><tr><td>PointBERT</td><td>94.6 ± 3.1</td><td>96.3 ± 2.7</td><td>91.0 ± 5.4</td><td>92.7 ± 5.1</td></tr><tr><td>MaskDiscr</td><td>95.0 ± 3.7</td><td>97.2 ± 1.7</td><td>91.4 ± 4.0</td><td>93.4 ± 3.5</td></tr><tr><td>PointMAE</td><td>96.3 ± 2.5</td><td>97.8 ± 1.8</td><td>92.6 ± 4.1</td><td>95.0 ± 3.0</td></tr><tr><td>MaskFeat3D</td><td>97.1 ± 2.1</td><td>98.4 ± 1.6</td><td>93.4 ± 3.8</td><td>95.7 ± 3.4</td></tr></table>",
1098
+ "bbox": [
1099
+ 174,
1100
+ 101,
1101
+ 531,
1102
+ 226
1103
+ ],
1104
+ "page_idx": 7
1105
+ },
1106
+ {
1107
+ "type": "table",
1108
+ "img_path": "images/5b17d3f04fea30a95c22fb94adc2e40493f2c3231dde9362e82c0f77690a07d4.jpg",
1109
+ "table_caption": [
1110
+ "Table 3: Few-shot classification on ModelNet40. We report the average accuracy $(\\%)$ and standard deviation $(\\%)$ of 10 independent experiments."
1111
+ ],
1112
+ "table_footnote": [],
1113
+ "table_body": "<table><tr><td>Method</td><td>Target Feature</td><td>ScanNN</td></tr><tr><td rowspan=\"4\">PointMAE</td><td>position only</td><td>85.2</td></tr><tr><td>position + normal*</td><td>85.7</td></tr><tr><td>position + surface variation*</td><td>85.9</td></tr><tr><td>position + normal + variation*</td><td>86.0</td></tr><tr><td rowspan=\"3\">MaskFeat3D</td><td>normal</td><td>86.5</td></tr><tr><td>surface variation</td><td>87.0</td></tr><tr><td>normal + variation</td><td>87.7</td></tr></table>",
1114
+ "bbox": [
1115
+ 537,
1116
+ 126,
1117
+ 823,
1118
+ 226
1119
+ ],
1120
+ "page_idx": 7
1121
+ },
1122
+ {
1123
+ "type": "text",
1124
+ "text": "Table 4: Ablation study on different features. * uses position-index matching Zhang et al. (2022) for feature loss computation.",
1125
+ "bbox": [
1126
+ 537,
1127
+ 236,
1128
+ 823,
1129
+ 273
1130
+ ],
1131
+ "page_idx": 7
1132
+ },
1133
+ {
1134
+ "type": "text",
1135
+ "text": "mitigate the effects of random sampling, we conduct 10 independent runs for each few-shot setting and report the mean accuracy and standard deviation. Additionally, more ModelNet40 results can be found in the supplementary material.",
1136
+ "bbox": [
1137
+ 169,
1138
+ 301,
1139
+ 823,
1140
+ 345
1141
+ ],
1142
+ "page_idx": 7
1143
+ },
1144
+ {
1145
+ "type": "text",
1146
+ "text": "Overall, the improvements of our approach are consistent across different backbone encoders and datasets.",
1147
+ "bbox": [
1148
+ 169,
1149
+ 351,
1150
+ 823,
1151
+ 378
1152
+ ],
1153
+ "page_idx": 7
1154
+ },
1155
+ {
1156
+ "type": "text",
1157
+ "text": "4.3 ABLATION STUDY",
1158
+ "text_level": 1,
1159
+ "bbox": [
1160
+ 171,
1161
+ 397,
1162
+ 341,
1163
+ 411
1164
+ ],
1165
+ "page_idx": 7
1166
+ },
1167
+ {
1168
+ "type": "text",
1169
+ "text": "We proceed to present an ablation study to justify various design choices. For simplicity, we choose the shape classification task on ScanObjectNN, where the gaps under different configurations are salient and provide meaningful insights on the pros and cons of various design choices. Due to space constraints, additional ablation studies are available in the supplementary material.",
1170
+ "bbox": [
1171
+ 169,
1172
+ 424,
1173
+ 823,
1174
+ 481
1175
+ ],
1176
+ "page_idx": 7
1177
+ },
1178
+ {
1179
+ "type": "text",
1180
+ "text": "Decoder design The primary question that arises is whether it is essential to disregard point position recovery. PointMAE's decoder follows a standard ViT-like architecture, utilizing a fully connected (FC) layer to directly predict the masked point coordinates. We implemented this decoder to predict our target features. However, since their decoder design does not encode masked point position, it cannot solely predict target features without predicting point position. To address this, we follow the approach proposed in (Zhang et al., 2022) and employ position-index matching for feature loss computation. As shown in Tab. 4, even though incorporating point features as the predicting target can enhance performance, the overall performance still significantly lags behind our design. This experiment highlights the significance of both point feature prediction and disregarding point position recovery.",
1181
+ "bbox": [
1182
+ 169,
1183
+ 496,
1184
+ 826,
1185
+ 636
1186
+ ],
1187
+ "page_idx": 7
1188
+ },
1189
+ {
1190
+ "type": "text",
1191
+ "text": "Target feature choice In Tab. 4, the experiment shows that: (1) All combinations of point normal and surface variation can yield significant improvements over existing MAE approaches that recover point positions (cf. Tab. 1); (2) using both point normals and surface variations yields the best performance. As discussed in Sec. 3.2, this is due to the fact that they correspond to first- and second-order differential properties. They are relevant but complementary to each other. Therefore, reconstructing them together forces the encoder to learn more informative features than merely reconstructing one of them.",
1192
+ "bbox": [
1193
+ 169,
1194
+ 641,
1195
+ 826,
1196
+ 739
1197
+ ],
1198
+ "page_idx": 7
1199
+ },
1200
+ {
1201
+ "type": "text",
1202
+ "text": "Decoder depth Tab. 5-a varies the number of transformer blocks (decoder depth). A sufficient deep decoder is necessary for feature learning. Increasing the number of blocks from 2 to 4 provides $+1.5\\%$ improvement on ScanObjectNN classification task. The performance drops when increasing the depth further, due to the overfitting issue. Interestingly, we note that a 1-block decoder can strongly achieve $85.8\\%$ accuracy, which is still higher than the runner-up method (PointMAE).",
1203
+ "bbox": [
1204
+ 169,
1205
+ 744,
1206
+ 825,
1207
+ 815
1208
+ ],
1209
+ "page_idx": 7
1210
+ },
1211
+ {
1212
+ "type": "text",
1213
+ "text": "Data augmentation Tab. 5-b studies three traditional data augmentation methods: rotation, scaling, and translation. Since the standard scaling could change the surface normal and variation, we scale the shape by using the same factor on 3 different axis. The experiments show that rotation and scaling play a more important role.",
1214
+ "bbox": [
1215
+ 169,
1216
+ 820,
1217
+ 826,
1218
+ 878
1219
+ ],
1220
+ "page_idx": 7
1221
+ },
1222
+ {
1223
+ "type": "text",
1224
+ "text": "Masking ratio. Tab. 5-c varies the masking ratio of input point cloud, which is another important factor on our approach. When the masking ratio is too large, e.g., $90\\%$ , the remaining part contains too limited information, which makes the task too hard to complete. When masking ratio is too",
1225
+ "bbox": [
1226
+ 169,
1227
+ 881,
1228
+ 823,
1229
+ 925
1230
+ ],
1231
+ "page_idx": 7
1232
+ },
1233
+ {
1234
+ "type": "header",
1235
+ "text": "Published as a conference paper at ICLR 2024",
1236
+ "bbox": [
1237
+ 173,
1238
+ 32,
1239
+ 478,
1240
+ 47
1241
+ ],
1242
+ "page_idx": 7
1243
+ },
1244
+ {
1245
+ "type": "page_number",
1246
+ "text": "8",
1247
+ "bbox": [
1248
+ 493,
1249
+ 948,
1250
+ 504,
1251
+ 959
1252
+ ],
1253
+ "page_idx": 7
1254
+ },
1255
+ {
1256
+ "type": "table",
1257
+ "img_path": "images/c724d7bbd2cbf4be2e6f9dfe58d7c346933fa9ac8000c0745120fd128ac40913.jpg",
1258
+ "table_caption": [
1259
+ "(a) Decoder depth"
1260
+ ],
1261
+ "table_footnote": [],
1262
+ "table_body": "<table><tr><td># blocks</td><td>ScanNN</td></tr><tr><td>1</td><td>85.8</td></tr><tr><td>2</td><td>86.2</td></tr><tr><td>4</td><td>87.7</td></tr><tr><td>8</td><td>87.5</td></tr><tr><td>12</td><td>87.1</td></tr></table>",
1263
+ "bbox": [
1264
+ 173,
1265
+ 122,
1266
+ 287,
1267
+ 195
1268
+ ],
1269
+ "page_idx": 8
1270
+ },
1271
+ {
1272
+ "type": "table",
1273
+ "img_path": "images/68a569086cb1a76db2bcb7eb571aeafc5508330e36d96cbbc27de733718b0317.jpg",
1274
+ "table_caption": [
1275
+ "(b) Data augmentation"
1276
+ ],
1277
+ "table_footnote": [],
1278
+ "table_body": "<table><tr><td>rot</td><td>scale</td><td>trans</td><td>ScanNN</td></tr><tr><td>✓</td><td>-</td><td>-</td><td>87.0</td></tr><tr><td>-</td><td>✓</td><td>-</td><td>85.9</td></tr><tr><td>✓</td><td>✓</td><td>-</td><td>87.7</td></tr><tr><td>-</td><td>✓</td><td>✓</td><td>85.1</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>86.7</td></tr></table>",
1279
+ "bbox": [
1280
+ 290,
1281
+ 122,
1282
+ 454,
1283
+ 195
1284
+ ],
1285
+ "page_idx": 8
1286
+ },
1287
+ {
1288
+ "type": "table",
1289
+ "img_path": "images/16876ee033e1a9e13b69087c67e2dbeb0c3a6990df1548573e76f4733c57af20.jpg",
1290
+ "table_caption": [
1291
+ "(c) Mask ratio"
1292
+ ],
1293
+ "table_footnote": [],
1294
+ "table_body": "<table><tr><td>ratio</td><td>ScanNN</td></tr><tr><td>40%</td><td>86.8</td></tr><tr><td>60%</td><td>87.7</td></tr><tr><td>90%</td><td>86.5</td></tr></table>",
1295
+ "bbox": [
1296
+ 459,
1297
+ 122,
1298
+ 555,
1299
+ 175
1300
+ ],
1301
+ "page_idx": 8
1302
+ },
1303
+ {
1304
+ "type": "table",
1305
+ "img_path": "images/78382c86ac502715f071311caefb3f6fca0ca5b17a2fefdafb81d555ff6c8ff1.jpg",
1306
+ "table_caption": [
1307
+ "(d) Decoder attention"
1308
+ ],
1309
+ "table_footnote": [],
1310
+ "table_body": "<table><tr><td>attention type</td><td>ScanNN</td></tr><tr><td>cross only</td><td>85.7</td></tr><tr><td>cross+self</td><td>87.7</td></tr></table>",
1311
+ "bbox": [
1312
+ 558,
1313
+ 122,
1314
+ 694,
1315
+ 186
1316
+ ],
1317
+ "page_idx": 8
1318
+ },
1319
+ {
1320
+ "type": "table",
1321
+ "img_path": "images/15fe2e21fbf12077c53677e58266a59d7e5f04eefab9ed0f40e7e0dfcf8e3bd8.jpg",
1322
+ "table_caption": [
1323
+ "(e) Query point ratio"
1324
+ ],
1325
+ "table_footnote": [],
1326
+ "table_body": "<table><tr><td>query/mask</td><td>ScanNN</td></tr><tr><td>25%</td><td>85.7</td></tr><tr><td>50%</td><td>86.2</td></tr><tr><td>75%</td><td>86.6</td></tr><tr><td>100%</td><td>87.7</td></tr></table>",
1327
+ "bbox": [
1328
+ 697,
1329
+ 122,
1330
+ 823,
1331
+ 186
1332
+ ],
1333
+ "page_idx": 8
1334
+ },
1335
+ {
1336
+ "type": "text",
1337
+ "text": "small, e.g., $40\\%$ , the task becomes too simple and impedes the feature learning. In our experiments, masking ratio $= 60\\%$ shows the best performance.",
1338
+ "bbox": [
1339
+ 169,
1340
+ 228,
1341
+ 826,
1342
+ 257
1343
+ ],
1344
+ "page_idx": 8
1345
+ },
1346
+ {
1347
+ "type": "text",
1348
+ "text": "Decoder block design We tested whether the self-attention layer in our decoder is essential. By simply removing self-attention layers and using cross-attention layers only, we find that the performance has a large drop (-2.0), see Tab. 5-d.",
1349
+ "bbox": [
1350
+ 169,
1351
+ 268,
1352
+ 826,
1353
+ 311
1354
+ ],
1355
+ "page_idx": 8
1356
+ },
1357
+ {
1358
+ "type": "text",
1359
+ "text": "Number of query points Finally, we varied the number of query points used by our decoder to see how it affects the network performance. Tab. 5-e shows that more query points lead to better performance. Here, \"query/mask\" is the ratio of selected query points with respect to the total number of masked points.",
1360
+ "bbox": [
1361
+ 169,
1362
+ 316,
1363
+ 825,
1364
+ 372
1365
+ ],
1366
+ "page_idx": 8
1367
+ },
1368
+ {
1369
+ "type": "text",
1370
+ "text": "4.4 SCENE-LEVEL PRETRAINING EXTENSION",
1371
+ "text_level": 1,
1372
+ "bbox": [
1373
+ 171,
1374
+ 395,
1375
+ 503,
1376
+ 409
1377
+ ],
1378
+ "page_idx": 8
1379
+ },
1380
+ {
1381
+ "type": "text",
1382
+ "text": "In principle, masked point cloud autoencoders could be scaled to noisy, large-scale point clouds. Additionally, we conducted an extension experiment on real-world scene-level data to evaluate our approach. Specifically, we pretrained our model on the ScanNet (Dai et al., 2017) dataset and evaluated its performance on 3D object detection task using the ScanNet and SUN RGB-D (Song et al., 2015) dataset. The training details can be found in the supplementary material. In this experiment, we observed that surface",
1383
+ "bbox": [
1384
+ 169,
1385
+ 424,
1386
+ 447,
1387
+ 604
1388
+ ],
1389
+ "page_idx": 8
1390
+ },
1391
+ {
1392
+ "type": "table",
1393
+ "img_path": "images/2f523da5e5b00fa0cb4de4226f4e2c9e2111c6f75e32ade57e794412264cea0c.jpg",
1394
+ "table_caption": [
1395
+ "Table 5: Ablation studies of our design choices. Please refer to Sec. 4.3 for a detailed analysis."
1396
+ ],
1397
+ "table_footnote": [],
1398
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td rowspan=\"2\">Backbone</td><td colspan=\"2\">ScanNet</td><td colspan=\"2\">SUN RGB-D</td></tr><tr><td>AP25</td><td>AP50</td><td>AP25</td><td>AP50</td></tr><tr><td>STRL</td><td>VoteNet</td><td>59.5</td><td>38.4</td><td>58.2</td><td>35.0</td></tr><tr><td>RandomRooms</td><td>VoteNet</td><td>61.3</td><td>36.2</td><td>59.2</td><td>35.4</td></tr><tr><td>PointContrast</td><td>VoteNet</td><td>59.2</td><td>38.0</td><td>57.5</td><td>34.8</td></tr><tr><td>DepthContrast</td><td>VoteNet</td><td>62.1</td><td>39.1</td><td>60.4</td><td>35.4</td></tr><tr><td>Point-M2AE</td><td>Point-M2AE</td><td>66.3</td><td>48.3</td><td>-</td><td>-</td></tr><tr><td>MaskFeat3D</td><td>VoteNet</td><td>63.3</td><td>41.0</td><td>61.0</td><td>36.5</td></tr><tr><td>MaskFeat3D</td><td>Point-M2AE</td><td>67.5</td><td>50.0</td><td>-</td><td>-</td></tr><tr><td>MaskFeat3D</td><td>CAGroup3D</td><td>75.6</td><td>62.3</td><td>67.2</td><td>51.0</td></tr></table>",
1399
+ "bbox": [
1400
+ 459,
1401
+ 426,
1402
+ 823,
1403
+ 571
1404
+ ],
1405
+ "page_idx": 8
1406
+ },
1407
+ {
1408
+ "type": "text",
1409
+ "text": "Table 6: 3D object detection results.",
1410
+ "bbox": [
1411
+ 524,
1412
+ 580,
1413
+ 756,
1414
+ 594
1415
+ ],
1416
+ "page_idx": 8
1417
+ },
1418
+ {
1419
+ "type": "text",
1420
+ "text": "normal has a minor influence on the pretraining, while surface variation remains a robust feature. Moreover, we discovered that color signal could be an effective target feature. Hence, we pretrained our model with surface variation and color as the target features, and then fine-tuned the pretrained encoder on the downstream tasks. As shown in Tab. 6, given that previous studies lack a unified network backbone, we selected two of the most common works, VoteNet and Point-M2AE, along with the latest work, CAGroup3D, as the network backbones respectively. And our model exhibits consistent improvements in all the settings, which further proves the generalizability of our approach on noisy, large-scale point clouds. Although the concrete scene-level experiments are not the main focus of this paper, the results indicate that this is a promising direction.",
1421
+ "bbox": [
1422
+ 169,
1423
+ 604,
1424
+ 826,
1425
+ 731
1426
+ ],
1427
+ "page_idx": 8
1428
+ },
1429
+ {
1430
+ "type": "text",
1431
+ "text": "5 CONCLUSION",
1432
+ "text_level": 1,
1433
+ "bbox": [
1434
+ 171,
1435
+ 750,
1436
+ 320,
1437
+ 763
1438
+ ],
1439
+ "page_idx": 8
1440
+ },
1441
+ {
1442
+ "type": "text",
1443
+ "text": "Our study reveals that restoration of masked point location is not essential for 3D MAE training. By predicting geometric features such as surface normals and surface variations at the masked points via our cross-attention-based decoder, the performance of 3D MAEs can be improved significantly, as evaluated through extensive experiments and downstream tasks. Moreover, the performance gains remain consistent when using different encoder backbones. We hope that our study can inspire future research in the development of robust MAE-based 3D backbones.",
1444
+ "bbox": [
1445
+ 169,
1446
+ 777,
1447
+ 823,
1448
+ 861
1449
+ ],
1450
+ "page_idx": 8
1451
+ },
1452
+ {
1453
+ "type": "text",
1454
+ "text": "Acknowledgement. Part of this work was done when Siming Yan was a research intern at Microsoft Research Asia. Additionally, we would like to acknowledge the gifts from Google, Adobe, Wormpex AI, and support from NSF IIS-2047677, HDR-1934932, CCF-2019844, and IARPA WRIVA program.",
1455
+ "bbox": [
1456
+ 169,
1457
+ 867,
1458
+ 825,
1459
+ 925
1460
+ ],
1461
+ "page_idx": 8
1462
+ },
1463
+ {
1464
+ "type": "header",
1465
+ "text": "Published as a conference paper at ICLR 2024",
1466
+ "bbox": [
1467
+ 171,
1468
+ 32,
1469
+ 478,
1470
+ 47
1471
+ ],
1472
+ "page_idx": 8
1473
+ },
1474
+ {
1475
+ "type": "page_number",
1476
+ "text": "9",
1477
+ "bbox": [
1478
+ 493,
1479
+ 948,
1480
+ 504,
1481
+ 959
1482
+ ],
1483
+ "page_idx": 8
1484
+ },
1485
+ {
1486
+ "type": "text",
1487
+ "text": "REFERENCES",
1488
+ "text_level": 1,
1489
+ "bbox": [
1490
+ 174,
1491
+ 102,
1492
+ 287,
1493
+ 117
1494
+ ],
1495
+ "page_idx": 9
1496
+ },
1497
+ {
1498
+ "type": "list",
1499
+ "sub_type": "ref_text",
1500
+ "list_items": [
1501
+ "Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. BEiT: BERT pre-training of image transformers. In ICLR, 2022.",
1502
+ "Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. In NeurIPS, 2020.",
1503
+ "Emanuele Bugliarello, Ryan Cotterell, Naoaki Okazaki, and Desmond Elliott. Multimodal pretraining unmasked: A meta-analysis and a unified framework of vision-and-language BERTs. Transactions of the Association for Computational Linguistics, 9, 2021.",
1504
+ "Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An information-rich 3D model repository. arxiv:1512.03012, 2015.",
1505
+ "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020.",
1506
+ "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021.",
1507
+ "Christopher B. Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In CVPR, pp. 3075-3084, 2019.",
1508
+ "Ulrich Clarenz, Martin Rumpf, and Alexandru Telea. Robust feature detection and local classification for surfaces based on moment analysis. IEEE Trans. Vis. Comput. Graphics, 10(5):516-524, 2004.",
1509
+ "Angela Dai, Angel X. Chang, Manolis Savva, Maciej Halber, Thomas A. Funkhouser, and Matthias Nießner. ScanNet: Richly-annotated 3D reconstructions of indoor scenes. In CVPR, pp. 2432-2443, 2017.",
1510
+ "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In ACL, pp. 4171-4186, Stroudsburg, PA, USA, 2019.",
1511
+ "Benjamin Graham, Martin Engelcke, and Laurens van der Maaten. 3D semantic segmentation with submanifold sparse convolutional networks. In CVPR, 2018.",
1512
+ "Jean-Bastien Grill, Florian Strub, Florent Alché, Corentin Tallec, Pierre H. Richemond, Elena Buchatskaya, Carl Doersch, BernardoAvila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, Bilal Piot, Koray Kavukcuoglu, Rémi Munos, and Michal Valko. Bootstrap your own latent: A new approach to self-supervised learning. In NeurIPS, 2020.",
1513
+ "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, 2020.",
1514
+ "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dolkar, and Ross B. Girshick. Masked autoencoders are scalable vision learners. In CVPR, pp. 15979-15988, 2022.",
1515
+ "Georg Hess, Johan Jaxing, Elias Svensson, David Hagerman, Christoffer Petersson, and Lennart Svensson. Masked autoencoders for self-supervised learning on automotive point clouds. arXiv:2207.00531, 2022.",
1516
+ "Hugues Hoppe, Tony DeRose, Tom Duchamp, John Alan McDonald, and Werner Stuetzle. Surface reconstruction from unorganized points. In SIGGRAPH, pp. 71-78. ACM, 1992.",
1517
+ "Ji Hou, Benjamin Graham, Matthias Nießner, and Saining Xie. Exploring data-efficient 3D scene understanding with contrastive scene contexts. In CVPR, 2021.",
1518
+ "Qi-Xing Huang, Simon Flóry, Natasha Gelfand, Michael Hofer, and Helmut Pottmann. Reassembling fractured objects by geometric matching. ACM Trans. Graph., 25(3):569-578, 2006.",
1519
+ "Hamid Laga, Yulan Guo, Hedi Tabia, Robert B Fisher, and Mohammed Bennamoun. 3D Shape analysis: fundamentals, theory, and applications. John Wiley & Sons, 2018."
1520
+ ],
1521
+ "bbox": [
1522
+ 171,
1523
+ 125,
1524
+ 828,
1525
+ 925
1526
+ ],
1527
+ "page_idx": 9
1528
+ },
1529
+ {
1530
+ "type": "header",
1531
+ "text": "Published as a conference paper at ICLR 2024",
1532
+ "bbox": [
1533
+ 171,
1534
+ 32,
1535
+ 478,
1536
+ 47
1537
+ ],
1538
+ "page_idx": 9
1539
+ },
1540
+ {
1541
+ "type": "page_number",
1542
+ "text": "10",
1543
+ "bbox": [
1544
+ 490,
1545
+ 946,
1546
+ 506,
1547
+ 959
1548
+ ],
1549
+ "page_idx": 9
1550
+ },
1551
+ {
1552
+ "type": "list",
1553
+ "sub_type": "ref_text",
1554
+ "list_items": [
1555
+ "Xin Lai, Jianhui Liu, Li Jiang, Liwei Wang, Hengshuang Zhao, Shu Liu, Xiaojuan Qi, and Jiaya Jia. Stratified transformer for 3d point cloud segmentation. In CVPR, pp. 8500-8509, 2022.",
1556
+ "Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. PointCNN: Convolution on X-transformed points. In NeurIPS, pp. 828-838, 2018.",
1557
+ "Haotian Liu, Mu Cai, and Yong Jae Lee. Masked discrimination for self-supervised learning on point clouds. In ECCV, 2022.",
1558
+ "Xiao Liu, Fanjin Zhang, Zhenyu Hou, Li Mian, Zhaoyu Wang, Jing Zhang, and Jie Tang. Self-supervised learning: Generative or contrastive. IEEE Transactions on Knowledge and Data Engineering, 2021.",
1559
+ "Xu Ma, Can Qin, Haoxuan You, Haoxi Ran, and Yun Fu. Rethinking network design and local geometry in point cloud: A simple residual MLP framework. In ICLR, 2022.",
1560
+ "Yatian Pang, Wenxiao Wang, Francis E. H. Tay, W. Liu, Yonghong Tian, and Liuliang Yuan. Masked autoencoders for point cloud self-supervised learning. In ECCV, 2022.",
1561
+ "Mark Pauly and Markus Gross. Spectral processing of point-sampled geometry. In SIGGRAPH, pp. 379-386, 2001.",
1562
+ "Mark Pauly, Markus Gross, and Leif P. Kobbelt. Efficient simplification of point-sampled surfaces. In Proceedings of the Conference on Visualization, pp. 163-170, 2002.",
1563
+ "Mark Pauly, Richard Keiser, and Markus Gross. Multi-scale Feature Extraction on Point-Sampled Surfaces. Computer Graphics Forum, 2003. ISSN 1467-8659.",
1564
+ "Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. PointNet: Deep learning on point sets for 3D classification and segmentation. In CVPR, pp. 652-660, 2017a.",
1565
+ "Charles R. Qi, Li Yi, Hao Su, and Leonidas J. Guibas. PointNet++: Deep hierarchical feature learning on point sets in a metric space. In NeurIPS, pp. 5105-5114, 2017b.",
1566
+ "Guocheng Qian, Yuchen Li, Houwen Peng, Jinjie Mai, Hasan Abed Al Kader Hammoud, Mohamed Elhoseiny, and Bernard Ghanem. PointNeXt: Revisiting PointNet++ with improved training and scaling strategies. In NeurIPS, 2022.",
1567
+ "Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 567-576, 2015.",
1568
+ "Mikaela Angelina Uy, Quang-Hieu Pham, Binh-Son Hua, Duc Thanh Nguyen, and Sai-Kit Yeung. Revisiting point cloud classification: A new benchmark dataset and classification model on real-world data. In ICCV, pp. 1588-1597. IEEE, 2019.",
1569
+ "Hanchen Wang, Qi Liu, Xiangyu Yue, Joan Lasenby, and Matt J Kusner. Unsupervised point cloud pre-training via occlusion completion. In ICCV, pp. 9782-9792, 2021a.",
1570
+ "Peng-Shuai Wang, Yang Liu, Yu-Xiao Guo, Chun-Yu Sun, and Xin Tong. O-CNN: Octree-based convolutional neural networks for 3D shape analysis. ACM Trans. Graph., 36(4), 2017.",
1571
+ "Peng-Shuai Wang, Yu-Qi Yang, Qian-Fang Zou, Zhirong Wu, Yang Liu, and Xin Tong. Unsupervised 3D learning for shape analysis via multiresolution instance discrimination. In AAAI, 2021b.",
1572
+ "Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E. Sarma, Michael M. Bronstein, and Justin M. Solomon. Dynamic graph cnn for learning on point clouds. ACM Trans. Graph., 38(5), 2019.",
1573
+ "Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In CVPR, pp. 14668-14678, 2022.",
1574
+ "Jiajun Wu, Chengkai Zhang, Tianfan Xue, William T. Freeman, and Joshua B. Tenenbaum. Learning a probabilistic latent space of object shapes via 3D generative-adversarial modeling. In NeurIPS, 2016."
1575
+ ],
1576
+ "bbox": [
1577
+ 171,
1578
+ 102,
1579
+ 826,
1580
+ 922
1581
+ ],
1582
+ "page_idx": 10
1583
+ },
1584
+ {
1585
+ "type": "header",
1586
+ "text": "Published as a conference paper at ICLR 2024",
1587
+ "bbox": [
1588
+ 171,
1589
+ 32,
1590
+ 478,
1591
+ 47
1592
+ ],
1593
+ "page_idx": 10
1594
+ },
1595
+ {
1596
+ "type": "page_number",
1597
+ "text": "11",
1598
+ "bbox": [
1599
+ 488,
1600
+ 946,
1601
+ 506,
1602
+ 959
1603
+ ],
1604
+ "page_idx": 10
1605
+ },
1606
+ {
1607
+ "type": "list",
1608
+ "sub_type": "ref_text",
1609
+ "list_items": [
1610
+ "Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3D ShapeNets: A deep representation for volumetric shapes. In CVPR, pp. 1912-1920. IEEE Computer Society, 2015.",
1611
+ "Zhirong Wu, Yuanjun Xiong, Stella X. Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In CVPR, 2018.",
1612
+ "Saining Xie, Jiatao Gu, Demi Guo, Charles R. Qi, Leonidas J Guibas, and Or Litany. PointContrast: Unsupervised pre-training for 3D point cloud understanding. ECCV, 2020.",
1613
+ "Siming Yan, Zhenpei Yang, Chongyang Ma, Haibin Huang, Etienne Vouga, and Qixing Huang. Hpnet: Deep primitive segmentation using hybrid representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2753-2762, 2021.",
1614
+ "Siming Yan, Zhenpei Yang, Haoxiang Li, Li Guan, Hao Kang, Gang Hua, and Qixing Huang. Implicit autoencoder for point cloud self-supervised representation learning. arXiv preprint arXiv:2201.00785, 2022.",
1615
+ "Siming Yan, Chen Song, Youkang Kong, and Qixing Huang. Multi-view representation is what you need for point-cloud pre-training. In The Twelfth International Conference on Learning Representations, 2023.",
1616
+ "Yaoqing Yang, Chen Feng, Yiru Shen, and Dong Tian. FoldingNet: Point cloud auto-encoder via deep grid deformation. In CVPR, 2018.",
1617
+ "Li Yi, Vladimir G. Kim, Duygu Ceylan, I-Chao Shen, Mengyan Yan, Hao Su, Cewu Lu, Qixing Huang, Alla Sheffer, and Leonidas Guibas. A scalable active framework for region annotation in 3d shape collections. ACM Trans. Graph., 35(6), 2016.",
1618
+ "Xumin Yu, Lulu Tang, Yongming Rao, Tiejun Huang, Jie Zhou, and Jiwen Lu. Point-BERT: Pretraining 3D point cloud transformers with masked point modeling. In CVPR, pp. 19313-19322, June 2022.",
1619
+ "Yabin Zhang, Jiehong Lin, Chenhang He, Yongwei Chen, Kui Jia, and Lei Zhang. Masked surfel prediction for self-supervised point cloud learning. arXiv:2207.03111, 2022.",
1620
+ "Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3D features on any point-cloud. In ICCV, 2021.",
1621
+ "Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In ICCV, pp. 16259-16268, 2021.",
1622
+ "Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. ibot: Image bert pre-training with online tokenizer. ICLR, 2022.",
1623
+ "Chengxu Zhuang, Siming Yan, Aran Nayebi, and Daniel Yamins. Self-supervised neural network models of higher visual cortex development. In 2019 Conference on Cognitive Computational Neuroscience, pp. 566-569. CCN, 2019.",
1624
+ "Chengxu Zhuang, Siming Yan, Aran Nayebi, Martin Schrimpf, Michael C Frank, James J DiCarlo, and Daniel LK Yamins. Unsupervised neural network models of the ventral visual stream. Proceedings of the National Academy of Sciences, 118(3):e2014196118, 2021."
1625
+ ],
1626
+ "bbox": [
1627
+ 171,
1628
+ 102,
1629
+ 826,
1630
+ 771
1631
+ ],
1632
+ "page_idx": 11
1633
+ },
1634
+ {
1635
+ "type": "header",
1636
+ "text": "Published as a conference paper at ICLR 2024",
1637
+ "bbox": [
1638
+ 171,
1639
+ 32,
1640
+ 478,
1641
+ 47
1642
+ ],
1643
+ "page_idx": 11
1644
+ },
1645
+ {
1646
+ "type": "page_number",
1647
+ "text": "12",
1648
+ "bbox": [
1649
+ 488,
1650
+ 946,
1651
+ 508,
1652
+ 959
1653
+ ],
1654
+ "page_idx": 11
1655
+ }
1656
+ ]
2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/a3c37e28-0a3e-4151-b905-1ec9b50296fa_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/a3c37e28-0a3e-4151-b905-1ec9b50296fa_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c447467ce8417232074142472f060dbc2dea13f4b883ed4eaffda5cf8d980232
3
+ size 1680979
2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/full.md ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3D FEATURE PREDICTION FOR MASKED-AUTOENCODER-BASED POINT CLOUD PRETRAINING
2
+
3
+ Siming Yan\*, Yuqi Yang, Yuxiao Guo, Hao Pan Peng-Shuai Wang, Xin Tong, Yang Liu, Qixing Hua
4
+
5
+ †The University of Texas at Austin, ‡Microsoft Research Asia Peking University
6
+
7
+ {siming, huangqx}@cs.utexas.edu, {wangps}@ Hotmail.com
8
+
9
+ {t-yuqyan, Yuxiao.Guo, haopan, yangliu, xtong}@microsoft.com
10
+
11
+ # ABSTRACT
12
+
13
+ Masked autoencoders (MAE) have recently been introduced to 3D self-supervised pretraining for point clouds due to their great success in NLP and computer vision. Unlike MAEs used in the image domain, where the pretext task is to restore features at the masked pixels, such as colors, the existing 3D MAE works reconstruct the missing geometry only, i.e., the location of the masked points. In contrast to previous studies, we advocate that point location recovery is inessential and restoring intrinsic point features is much superior. To this end, we propose to ignore point position reconstruction and recover high-order features at masked points including surface normals and surface variations, through a novel attention-based decoder which is independent of the encoder design. We validate the effectiveness of our pretext task and decoder design using different encoder structures for 3D training and demonstrate the advantages of our pretrained networks on various point cloud analysis tasks. The code is available at https://github.com/SimingYan/MaskFeat3D.
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Self-supervised pretraining has recently gained much attention. It starts from a pretext task trained on large unlabeled data, where the learned representation is fine-tuned on downstream tasks. This approach has shown great success in 2D images (Chen et al., 2020; Grill et al., 2020; He et al., 2020; Bao et al., 2022; He et al., 2022; Zhou et al., 2022; Zhuang et al., 2021; 2019) and natural language processing (NLP) (Devlin et al., 2019; Brown et al., 2020). Recently, people started looking into self-supervised pretraining on point cloud data due to its importance in 3D analysis and robotics applications.
18
+
19
+ An important self-supervised pretraining paradigm - masked signal modeling (MSM), including BERT (Bugliarello et al., 2021), BEiT (Bao et al., 2022), and masked autoencoders (MAE) (He et al., 2022), has recently been adopted to 3D domains. MSM
20
+
21
+ ![](images/55c321c86e53d216a78bf278b07770649f082faae200f83c0ec98bc5a43b0908.jpg)
22
+ Figure 1: Comparison of standard Point-MAE and our proposed method. Unlike standard Point-MAE that uses masked points as the prediction target, our method use a novel attention-based decoder to leverage masked points as an additional input and infer the corresponding features.
23
+
24
+ has a simple setup: a randomly-masked input is fed to the encoder, and a decoder strives to recover the signal at the masked region. MSM is highly scalable and exhibits superior performance in many downstream vision and NLP tasks, outperforming their fully supervised equivalents. Additionally, it does not require extensive augmentation, which is essential and critical to another self-supervised
25
+
26
+ pretraining paradigm — contrastive learning. In images, a mask refers to a randomly selected portion of the pixels, and the pixel colors or other pixel features in the masked region are to be reconstructed by the decoder.
27
+
28
+ For 3D point clouds, the PointBERT approach (Yu et al., 2022) masks point patches and recovers patch tokens that are pretrained by a point cloud Tokenizer. As reconstruction features are associated with patches of points, the learned features at the point level are less competitive. MAE-based pretraining schemes (Pang et al., 2022; Hess et al., 2022; Zhang et al., 2022; Liu et al., 2022) tackle this problem by point-wise pretext tasks. However, their decoders are designed to recover the positions of the masked points in Cartesian coordinates or occupancy formats (Fig. 1-left). These designs make an intrinsic difference from 2D MSMs, where there is no need to recover masked pixel locations. This key difference makes MSM pay more attention to capturing the irregular and possibly noisy point distribution and ignore the intrinsic surface features associated with points, which are essential for 3D point cloud analysis.
29
+
30
+ In the presented work, we propose to recover intrinsic point features, i.e., point normals, and surface variations (Pauly et al., 2002) at masked points, where point normals are first-order surface features and surface variations are related to local curvature properties. We clearly demonstrate that the recovery of high-order surface point features, not point locations, is the key to improving 3D MSM performance. Learning to reconstruct high-order geometric features forces the encoder to extract distinctive and representative features robustly that may not be captured by learning to reconstruct point positions alone. Our study justifies the importance of designing signal recovery for 3D MSMs. It aligns 3D MSM learning with MSM development in vision, where feature modeling plays a critical role (Wei et al., 2022).
31
+
32
+ To recover point signals, we design a practical attention-based decoder. This new decoder takes masked points as queries, and stacks several transformer blocks. In each block, self-attention is used to propagate context features over the masked points and cross-attention is applied to fabricate the point features with the encoder's output (As shown in Fig. 1-right and Fig. 2). This design is separable from the encoder design. Therefore, common 3D encoders, such as sparse CNNs, point-based networks, and transformer-based networks, can all be adopted to strengthen the pretraining capability. Another benefit of this decoder design is that the masked point positions are only accessible by the decoder, thus avoiding leakage of positional information in the early stage of the network, as suggested by (Pang et al., 2022).
33
+
34
+ We conducted extensive ablation studies to verify the efficacy of our masked feature design and decoder. Substantial improvements over previous approaches and the generalization ability of our pretraining approach are demonstrated on various downstream tasks, including 3D shape classification, 3D shape part segmentation, and 3D object detection. We hope that our study can stimulate future research on designing strong MAE-based 3D backbones.
35
+
36
+ We summarize the contributions of our paper as follows:
37
+
38
+ - We propose a novel masked autoencoding method for 3D self-supervised pretraining that predicts intrinsic point features at masked points instead of their positions.
39
+ - We introduce a unique attention-based decoder that can generate point features without relying on any particular encoder architecture.
40
+ - Our experiments demonstrate that restoring intrinsic point features is superior to point location recovery in terms of Point cloud MAE, and we achieve state-of-the-art performance on various downstream tasks.
41
+
42
+ # 2 RELATED WORK
43
+
44
+ Self-supervised pretraining in 3D Self-supervised pretraining is an active research topic in machine learning (Liu et al., 2021). The early adoption of self-supervised pretraining for 3D is to use autoencoders (Yang et al., 2018; Yan et al., 2022) and generative adversarial networks (Wu et al., 2016) to learn shape-level features, mainly for shape classification and retrieval tasks. Other self-supervised pretext tasks, such as clustering and registration, are also developed for 3D pretraining. Later, due to the great ability to learn features at both the instance and pixel levels in a self-supervised manner, contrastive learning (Wu et al., 2018; Grill et al., 2020; He et al., 2020; Brown et al., 2020; Chen & He, 2021; Yan et al., 2023) was introduced into the 3D domains to extract distinctive instance
45
+
46
+ ![](images/7114f81273cd4cdfb37e6c4464f6ff3f9f24793ab477c9ad4c7d89fbb323d751.jpg)
47
+ Figure 2: The pretraining pipeline of our masked 3D feature prediction approach. Given a complete input point cloud, we first separate it into masked points and unmasked points (We use cube mask here for better visualization). We take unmasked points as the encoder input and output the block feature pairs. Then the decoder takes the block feature pairs and query points(i.e., masked points) as the input, and predicts the per-query-point features.
48
+
49
+ and point-wise features for various downstream tasks (Wang et al., 2021b; Xie et al., 2020; Hou et al., 2021; Zhang et al., 2021). However, contrastive learning requires data augmentation heavily to form positive or negative pairs for effective feature learning.
50
+
51
+ Masked signal modeling in 3D Masked signal modeling using transformer-based architectures for self-supervised learning (SSL) has shown great simplicity and superior performance. PointBERT (Yu et al., 2022) and PointMAE (Pang et al., 2022) are two such works that inherit from this idea. PointBERT partitions a point cloud into patches and trains a transformer-based autoencoder to recover masked patches' tokens. In contrast, PointMAE directly reconstructs point patches without costly tokenizer training, using Chamfer distance as the reconstruction loss. Other works like (Zhang et al., 2022; Liu et al., 2022) and (Hess et al., 2022) explore different strategies for point cloud reconstruction or classification with masking. As discussed in Sec. 1, the pretext tasks of most previous works focus only on masked point locations.
52
+
53
+ Signal recovery in masked autoencoders Masked autoencoders for vision pretraining typically use raw color information in masked pixels as the target signal (He et al., 2022). However, Wei et al. (Wei et al., 2022) have found that using alternative image features, such as HOG descriptors, tokenizer features, and features from other unsupervised and supervised pretrained networks, can improve network performance and efficiency. In contrast, existing 3D MAE methods have limited use of point features and struggle with predicting the location of masked points. Our approach focuses on feature recovery rather than position prediction, selecting representative 3D local features such as point normals and surface variation (Pauly et al., 2002) as target features to demonstrate their efficacy. Our study allows for leveraging more advanced 3D features in 3D masked autoencoders, while further exploration of other types of 3D features (Laga et al., 2018) is left for future work.
54
+
55
+ # 3 MASKED 3D FEATURE PREDICTION
56
+
57
+ In this section, we present our masked 3D feature prediction approach for self-supervised point cloud pretraining. Our network design follows the masked autoencoder paradigm: a 3D encoder takes a point cloud whose points are randomly masked as input, and a decoder is responsible for reconstructing the predefined features at the masked points. The network architecture is depicted in Fig. 2. In the following sections, we first introduce the masking strategy and 3D masked feature modeling in Sec. 3.1 and 3.2, and then present our encoder and decoder design in Sec. 3.3 and 3.4. Here, the key ingredients of our approach are the design of prediction targets and the decoder, which govern the quality of the learned features.
58
+
59
+ # 3.1 3D MASKING
60
+
61
+ We follow the masking strategy proposed by PointBERT (Yu et al., 2022) to mask out some portions of an input point cloud and feed it to the encoder. Denote the input point cloud as $\mathcal{P} \in \mathbb{R}^{N \times 3}$ , where $N$ is the number of points. We sample $K$ points using farthest point sampling (FPS). For each sample point, its $k$ -nearest neighbor points form a point patch. For a given mask ratio $m_r$ , $0 < m_r < 1$ , we randomly select $M$ patches and remove them from the input, where $M = \min(\lceil m_r \cdot K \rceil, K - 1)$ .
62
+
63
+ In the following, the masked points and the remaining points are denoted by $\mathcal{P}_M$ and $\mathcal{P}_U$ , respectively.
64
+
65
+ # 3.2 TARGET FEATURE DESIGN
66
+
67
+ As argued in Sec. 1, we advocate against using point locations as the reconstructed target. We choose to reconstruct normal and surface variation at each point, which reflect differential surface properties.
68
+
69
+ On the other hand, our decoder design (to be introduced in Sec. 3.4) takes query points as input and output predicted point-wise features. Therefore, the decoder implicitly carries positional information for learning meaningful features through the encoder.
70
+
71
+ Given a point cloud, both point normal and surface variations are defined using local principal component analysis (PCA). We first define a covariance matrix $C_r$ over a local surface region around $\mathbf{p}$ :
72
+
73
+ $$
74
+ C _ {r} := \frac {\int_ {\mathbf {x} \in S \cap \mathbb {S} _ {r} (\mathbf {p})} (\mathbf {p} - \mathbf {x}) (\mathbf {p} - \mathbf {x}) ^ {T} d \mathbf {x}}{\int_ {\mathbf {x} \in S \cap \mathbb {S} _ {r} (\mathbf {p})} \mathbf {1} \cdot d \mathbf {x}}, \tag {1}
75
+ $$
76
+
77
+ ![](images/ca22febfd7507a00564c923c904ee15bcba2b6942297b669dc9fbe73105ff1fe.jpg)
78
+ Figure 3: Visualization of point features.. The point normal is color-coded by the normal vector. The surface variation is color-coded where white indicates low value and red indicates high value.
79
+
80
+ where $\mathcal{S} \cap \mathbb{S}_r(\mathbf{p})$ is the local surface region at
81
+
82
+ $\mathbf{p}$ , restricted by a sphere centered at $\mathbf{p}$ with radius $r$ . We set $r = 0.1$ in our case. The ablation details are shown in the supplement.
83
+
84
+ The normal $n(\mathbf{p})$ at $\mathbf{p}$ is estimated as the smallest eigenvector of $C_r$ . The sign of each normal is computed by using the approach of (Hoppe et al., 1992).
85
+
86
+ Surface variation (Pauly et al., 2002) at $\mathbf{p}$ is denoted by $\sigma_r(\mathbf{p})$ , in the following form:
87
+
88
+ $$
89
+ \sigma_ {r} (\mathbf {p}) = \frac {\lambda_ {1}}{\lambda_ {1} + \lambda_ {2} + \lambda_ {3}}, \tag {2}
90
+ $$
91
+
92
+ where $\lambda_1 \leq \lambda_2 \leq \lambda_3$ are the eigenvalues of $C_r$ . Surface variation is a geometric feature that measures the local derivation at point $\mathbf{p}$ in a neighborhood of size $r$ on a given surface $S$ . Its original and modified versions have been used as a robust feature descriptor for a variety of shape analysis and processing tasks, such as saliency extraction (Pauly & Gross, 2001), curved feature extraction (Pauly et al., 2003), shape segmentation (Huang et al., 2006; Yan et al., 2021), and shape simplification (Pauly et al., 2002).
93
+
94
+ In the limit, i.e., when $r \to 0$ , $\sigma_r(\mathbf{p})$ is related to the mean curvature (Clarenz et al., 2004). By varying the radii of $\mathbb{S}_r$ , multiscale surface variation descriptors can be constructed. In our work, we chose only single-scale surface variation for simplicity.
95
+
96
+ Although both surface normal and surface variation are derived from local PCA, they are complementary to each other in the sense that surface normal carries first-order differential property while surface variation carries second-order differential property due to its relation to mean curvature. We visualize both features in Fig. 3 and show more examples in supplement. In Sec. 4.3, we show that reconstructing surface normal and surface variation leads to better learned features than reconstructing one of them.
97
+
98
+ Loss function Point normals and surface variations represent first- and second-order surface properties. Their value intervals are also bounded: surface normal has unit length; surface variation is non-negative and not greater than $\frac{1}{3}$ . Their value-bounded properties are suitable for easy minimizing the deviation from the prediction to their ground truths, compared to using unbounded features such as curvatures. We denote the point normals and surface variations of $\mathcal{P}_M$ by $\mathcal{N}_M \in \mathbb{R}^{M \times 3}$ and $\mathcal{V}_M \in \mathbb{R}^M$ , respectively. The loss function for pretraining the masked autoencoders is composed of two terms:
99
+
100
+ $$
101
+ L _ {n} = \left\| \mathcal {N} _ {M} - \widehat {\mathcal {N}} _ {M} \right\| _ {2} ^ {2}; \tag {3}
102
+ $$
103
+
104
+ $$
105
+ L _ {v} = \left\| \mathcal {V} _ {M} - \widehat {\mathcal {V}} _ {M} \right\| _ {1}; \tag {4}
106
+ $$
107
+
108
+ where $\widehat{\mathcal{N}}_M$ and $\widehat{\nu}_M$ are the predicted versions of $\mathcal{N}_M$ and $\nu_{M}$ , respectively. The total loss function $L = \lambda_{1}L_{n} + \lambda_{2}L_{v}$ , where $\lambda_{1} = 1, \lambda_{2} = 1$ in our case.
109
+
110
+ # 3.3 ENCODER DESIGN
111
+
112
+ Unlike most MAE-based approaches that are limited to ViT-based encoders, our approach is not restricted to any specific type of encoder. Common 3D encoders for point clouds are all supported, as long as the encoder outputs a set of learned features bind to spatial blocks, where spatial blocks could be point patches used for ViT-like transformer encoders (Yu et al., 2022; Pang et al., 2022; Liu et al., 2022; Zhang et al., 2022), set abstractions used by PointNet++-like encoders (Qi et al., 2017b; Qian et al., 2022), or coarse voxels used by sparse CNN-based encoders (Wang et al., 2017; Graham et al., 2018; Choy et al., 2019).
113
+
114
+ In the following, we briefly review these typical encoders and their adaption for our pretraining.
115
+
116
+ ViT-based encoders These encoders first embed point patches via PointNet (Qi et al., 2017a), then send these patch tokens to a standard transformer that includes several multihead self-attention layers and feedforward layers. The transformer outputs the fabricated token features, corresponding to every input point patch. The token feature $\mathbf{f}_i$ and the patch center $\mathbf{c}_i$ form a block feature pair $B_{i} = \{\mathbf{f}_{i},\mathbf{c}_{i}\}$ which is needed by our decoder. Here we can call $\mathbf{f}_i$ block feature and $\mathbf{c}_i$ block centroid.
117
+
118
+ PointNet++-like encoders In these encoders, the network features are aggregated through a number of set abstraction levels. We take the learned features and the centroids at the coarsest set abstractions as block feature pairs.
119
+
120
+ Sparse CNN-based encoders These encoders apply 3D convolution on sparse voxels from the finest level to the coarsest level. Multiple convolution layers and resblocks are commonly used. We interpolate the coarse voxel features at the centroids of the unmasked patches and use these interpolated features and the patch centroids to form our block feature pairs.
121
+
122
+ As suggested by (Pang et al., 2022), the early leaking of masked point information to the network could jeopardize feature learning. We adopt this suggestion: feed the unmasked points to the encoder only, and leave the masked points to the decoder.
123
+
124
+ # 3.4 DECODER DESIGN
125
+
126
+ Decoder structure We design an attention-based decoder to restore the target features at masked regions. The decoder takes the block feature pairs $\mathcal{B} \coloneqq \{B_i\}_{i=1}^b$ from the encoder and a query point set $\mathcal{Q}$ , i.e., the masked point set $\mathcal{P}_M$ . It is composed of a stack of $l$ transformer blocks, where $l = 4$ in our case (See Fig. 2). Each block contains a self-attention layer and a cross-attention layer. The self-attention layer takes the query points and their positional embeddings as input and outputs the per-query point features, denoted by $S^{in}$ . Then $S^{in}$ and the encoder block features $\mathcal{B}$ are passed into the cross-attention layer, where $S^{in}$ serves as attention query, the block features serve as attention key and value, and the block centroids are the positional embedding of the block features. The output per-point features from the last block go through an MLP head to predict the target features at the query points.
127
+
128
+ Efficacy of self-attention layers At first glance, it is sufficient to use cross-attention layers only for predicting per-point features. The recent masked discrimination work (Liu et al., 2022) obeys this intuition for its decoder design, no information exchanged between different query points. Instead, we introduce the self-attention layer to propagate information over query points and use multiple attention blocks to strengthen the mutual relationship progressively. We found that our design significantly improves feature learning, as verified by our ablation study (See Sec. 4.3).
129
+
130
+ Supporting of various encoders In the above design, the decoder needs block feature pairs only from the encoder, thus having great potential to leverage various encoder structures, not limited to ViT-based transformer structures. This advantage is verified by our experiments (See Sec. 4).
131
+
132
+ Feature reconstruction versus position reconstruction Note that our decoder and loss design do not explicitly model point positions, which are zero-order surface properties complementary to
133
+
134
+ surface normals and surface variations. Instead, the decoder predicts feature values at query points. Therefore, the zero-order positional information is already encoded implicitly. This explains why our approach is superior to baseline approaches that reconstruct point positions for feature learning (See Sec. 4.2).
135
+
136
+ Query point selection Due to the quadratic complexity of self-attention, the computational cost for a full query set could be much higher. In practice, we can randomly choose a point subset from $\mathcal{P}_M$ as the query set during training. By default, we use all masked points as queries.
137
+
138
+ # 4 EXPERIMENT ANALYSIS
139
+
140
+ We conducted a series of experiments and ablation studies to validate the efficacy and superiority of our masked 3D feature prediction approach, in short MaskFeat3D, for point cloud pretraining.
141
+
142
+ # 4.1 EXPERIMENT SETUP
143
+
144
+ Pretraining dataset We choose ShapeNet (Chang et al., 2015) dataset for our pretraining, following the practice of PointBERT (Yu et al., 2022) and previous 3D MAE-based approaches (Pang et al., 2022; Zhang et al., 2022; Liu et al., 2022). ShapeNet (Chang et al., 2015) contains 57748 synthetic 3D shapes from 55 categories. We sample 50000 points uniformly on each shape and select 128 nearest points from them for each point in the point cloud for constructing the local region to approximate surface variation. During pretraining, $N = 2048$ points are randomly sampled to create the point cloud.
145
+
146
+ Network training We integrated different encoders with our masked 3D feature prediction approach, including the ViT-based transformer used by (Pang et al., 2022), sparse-CNN-based encoder (Choy et al., 2019), and PointNeXt encoder (Qian et al., 2022) which is an advanced version of PointNet++. We implemented all pretraining models in PyTorch and used AdamW optimizer with $10^{-4}$ weight decay. We use PointBERT's masking strategy for ShapeNet pretraining. We set $K = 128$ in FPS, $k = 32$ -nearest points to form the point patch, and the best masking ratio is $60\%$ empirically. The number of transformer blocks in the decoder is 4. The learning rates of the encoder and the decoder are set to $10^{-3}$ and $10^{-4}$ , respectively. Standard data augmentation such as rotation, scaling, and translation are employed. All models were trained with 300 epochs on eight 16GB Nvidia V100 GPUs. The total batch size is 64.
147
+
148
+ Downstream Tasks We choose shape classification and shape part segmentation tasks to validate the efficacy and generalizability of our pretrained networks.
149
+
150
+ - Shape classification: The experiments were carried out on two different datasets: ModelNet40 (Wu et al., 2015) and ScanObjectNN (Uy et al., 2019). ModelNet40 is a widely used synthetic dataset that comprises 40 classes and contains 9832 training objects and 2468 test objects. In contrast, ScanObjectNN is a real-world scanned dataset that includes approximately 15000 actual scanned objects from 15 classes. As the domain gap between ShapeNet and ScanObjectNN is larger than that between ShapeNet and ModelNet40, the evaluation on ScanObjectNN is a good measure of the generalizability of pretrained networks.
151
+ - Shape part segmentation ShapeNetPart Dataset (Yi et al., 2016) contains 16880 models from 16 shape categories, and each model has $2\sim 6$ parts. Following the standard evaluation protocol (Qi et al., 2017b), 2048 points are sampled on each shape. For evaluation, we report per-class mean IoU (cls. mIoU) and mean IoU averaged over all test instances (ins. mIoU).
152
+
153
+ The training-and-test split of the above tasks follows existing works. For these downstream tasks, we employ the task-specific decoders proposed by PointMAE (Pang et al., 2022) and reload the pretrained weights for the encoder. Training details are provided in the supplemental material.
154
+
155
+ # 4.2 EFFICACY OF 3D FEATURE PREDICTION
156
+
157
+ The advantages in learning discriminative features by our masked feature prediction approach are verified by its superior performance in downstream tasks.
158
+
159
+ <table><tr><td rowspan="2">Method</td><td colspan="3">ScanObjectNN</td><td colspan="2">ShapeNetPart</td><td colspan="2">ShapeNetPart(1% labels)</td></tr><tr><td>OBJ-BG</td><td>OBJ-ONLY</td><td>PB-T50-RS</td><td>ins. mIoU</td><td>cls. mIoU</td><td>ins. mIoU</td><td>cls. mIoU</td></tr><tr><td>PointViT† Yu et al. (2022)</td><td>79.9</td><td>80.6</td><td>77.2</td><td>85.1</td><td>83.4</td><td>77.6</td><td>72.2</td></tr><tr><td>PointBERT Yu et al. (2022)</td><td>87.4</td><td>88.1</td><td>83.1</td><td>85.6</td><td>84.1</td><td>79.2</td><td>73.9</td></tr><tr><td>MaskDiscr Liu et al. (2022)</td><td>89.7</td><td>89.3</td><td>84.3</td><td>86.0</td><td>84.4</td><td>78.8</td><td>72.3</td></tr><tr><td>MaskSurfel Zhang et al. (2022)</td><td>91.2</td><td>89.2</td><td>85.7</td><td>86.1</td><td>84.4</td><td>-</td><td>-</td></tr><tr><td>PointMAE Pang et al. (2022)</td><td>90.0</td><td>88.3</td><td>85.2</td><td>86.1</td><td>-</td><td>79.1</td><td>74.4</td></tr><tr><td>MaskFeat3D (PointViT)</td><td>91.7(91.6)</td><td>90.0(89.6)</td><td>87.7(87.5)</td><td>86.3(86.3)</td><td>84.9(84.8)</td><td>80.0(79.9)</td><td>75.1(75.0)</td></tr></table>
160
+
161
+ Table 1: Performance comparison of MAE-based approaches on downstream tasks. All the methods in the first section use the same transformer backbone architecture, PointViT. $\dagger$ represents the from scratch results and all other methods represent the fine-tuning results using pretrained weights. The average result of 3 runs is given in brackets.
162
+
163
+ <table><tr><td rowspan="2">Method</td><td colspan="3">ScanObjectNN</td><td colspan="2">ShapeNetPart</td></tr><tr><td>OBJ-BG</td><td>OBJ-ONLY</td><td>PB-T50-RS</td><td>ins. mIoU</td><td>cls. mIoU</td></tr><tr><td>PointNet Qi et al. (2017a)</td><td>73.3</td><td>79.2</td><td>68.0</td><td>-</td><td>-</td></tr><tr><td>PointNet++ Qi et al. (2017b)</td><td>82.3</td><td>84.3</td><td>77.9</td><td>85.1</td><td>81.9</td></tr><tr><td>PointCNN Li et al. (2018)</td><td>86.1</td><td>85.5</td><td>78.5</td><td>86.1</td><td>84.6</td></tr><tr><td>DGCNN Wang et al. (2019)</td><td>82.8</td><td>86.2</td><td>78.1</td><td>85.2</td><td>82.3</td></tr><tr><td>MinkowskiNet Choy et al. (2019)</td><td>84.1</td><td>86.1</td><td>80.1</td><td>85.3</td><td>83.2</td></tr><tr><td>PointTransformer Zhao et al. (2021)</td><td>-</td><td>-</td><td>-</td><td>86.6</td><td>83.7</td></tr><tr><td>PointMLP Ma et al. (2022)</td><td>88.7</td><td>88.2</td><td>85.4</td><td>86.1</td><td>84.6</td></tr><tr><td>StratifiedTransformer Lai et al. (2022)</td><td>-</td><td>-</td><td>-</td><td>86.6</td><td>85.1</td></tr><tr><td>PointNeXt Qian et al. (2022)</td><td>91.9</td><td>91.0</td><td>88.1</td><td>87.1</td><td>84.7</td></tr><tr><td>MaskFeat3D (PointViT)</td><td>91.7(91.6)</td><td>90.0(89.6)</td><td>87.7(87.5)</td><td>86.3(86.3)</td><td>84.9(84.8)</td></tr><tr><td>MaskFeat3D (MinkowskiNet)</td><td>85.1(85.0)</td><td>87.0(86.7)</td><td>80.8(80.6)</td><td>85.6(85.5)</td><td>83.5(83.5)</td></tr><tr><td>MaskFeat3D (PointNeXt)</td><td>92.7(92.6)</td><td>92.0(91.9)</td><td>88.6(88.5)</td><td>87.4(87.4)</td><td>85.5(85.5)</td></tr></table>
164
+
165
+ Table 2: Comparison with supervised methods. The average result of 3 runs is given in brackets.
166
+
167
+ Comparison with MAE-based approaches We compare our approach with other MAE-based approaches that use the same encoder structure. Tab. 1 reports that: (1) the performance of all MAE-based methods surpasses their supervised baseline - PointViT; (2) our strategy of reconstructing point features instead of point positions yields significant improvements in ScannObjectNN classification, improving overall accuracy on the most challenging split, PB-T50-RS, from $85.7\%$ (MaskSurfel) to $87.7\%$ , and showing consistent improvements on other splits and ShapeNetPart segmentation.
168
+
169
+ We also compare the performance of our approach with PointBERT (Yu et al., 2022), PointMAE (Pang et al., 2022), and MaskDiscr (Liu et al., 2022) on ShapeNetPart segmentation with less labeled data. In this experiment, we randomly select $1\%$ labeled data from each category, and finetune the network with all selected data. The performance is reported in Tab. 1, which shows that using our pretrained network leads to much better performance than the baseline methods.
170
+
171
+ Comparison with supervised approaches Compared with state-of-the-art supervised methods, our approach again achieves superior performance than most existing works as seen from Tab. 2, including PointNet++ (Qi et al., 2017b), PointCNN (Li et al., 2018), DGCNN (Wang et al., 2019), MinkowskiNet (Choy et al., 2019), PointTransformer (Zhao et al., 2021) and PointMLP (Ma et al., 2022). It is only inferior to the approaches that use advanced encoder structures such as stratified transformer (Lai et al., 2022) and PointNeXt (Qian et al., 2022).
172
+
173
+ Encoder replacement To make a more fair comparison, we replaced the PointViT encoder with the PointNeXt's encoder, and retrained our pretraining network, denoted as MaskFeat3D (PointNeXt). From Tab. 2, we can see that our pretraining approach with this enhanced encoder can yield SOTA performance on all the downstream tasks, surpassing PointNeXt trained from scratch. We also used MinkowskiNet (Choy et al., 2019) as our pretraining encoder, the performance gain over MinkowskiNet trained from scratch is $+0.7\%$ overall accuracy improvement on ScanObjectNN classification, and $+0.3\%$ on ShapeNetPart segmentation. Please refer to the supplementary material for details.
174
+
175
+ Few-shot Classification To perform few-shot classification on ModelNet40, we adopt the "K-way N-shot" settings as described in prior work (Wang et al., 2021a; Yu et al., 2022; Pang et al., 2022). Specifically, we randomly choose K out of the 40 available classes and sample N+20 3D shapes per class, with N shapes used for training and 20 for testing. We evaluate the performance of MaskFeat3D under four few-shot settings: 5-way 10-shot, 5-way 20-shot, 10-way 10-shot, and 10-way 20-shot. To
176
+
177
+ <table><tr><td rowspan="2">Method</td><td colspan="2">5-way</td><td colspan="2">10-way</td></tr><tr><td>10-shot</td><td>20-shot</td><td>10-shot</td><td>20-shot</td></tr><tr><td>DGCNN†</td><td>31.6 ± 2.8</td><td>40.8 ± 4.6</td><td>19.9 ± 2.1</td><td>16.9 ± 1.5</td></tr><tr><td>OcCo</td><td>90.6 ± 2.8</td><td>92.5 ± 1.9</td><td>82.9 ± 1.3</td><td>86.5 ± 2.2</td></tr><tr><td>CrossPoint</td><td>92.5 ± 3.0</td><td>94.9 ± 2.1</td><td>83.6 ± 5.3</td><td>87.9 ± 4.2</td></tr><tr><td>Transformer†</td><td>87.8 ± 5.2</td><td>93.3 ± 4.3</td><td>84.6 ± 5.5</td><td>89.4 ± 6.3</td></tr><tr><td>OcCo</td><td>94.0 ± 3.6</td><td>95.9 ± 2.3</td><td>89.4 ± 5.1</td><td>92.4 ± 4.6</td></tr><tr><td>PointBERT</td><td>94.6 ± 3.1</td><td>96.3 ± 2.7</td><td>91.0 ± 5.4</td><td>92.7 ± 5.1</td></tr><tr><td>MaskDiscr</td><td>95.0 ± 3.7</td><td>97.2 ± 1.7</td><td>91.4 ± 4.0</td><td>93.4 ± 3.5</td></tr><tr><td>PointMAE</td><td>96.3 ± 2.5</td><td>97.8 ± 1.8</td><td>92.6 ± 4.1</td><td>95.0 ± 3.0</td></tr><tr><td>MaskFeat3D</td><td>97.1 ± 2.1</td><td>98.4 ± 1.6</td><td>93.4 ± 3.8</td><td>95.7 ± 3.4</td></tr></table>
178
+
179
+ Table 3: Few-shot classification on ModelNet40. We report the average accuracy $(\%)$ and standard deviation $(\%)$ of 10 independent experiments.
180
+
181
+ <table><tr><td>Method</td><td>Target Feature</td><td>ScanNN</td></tr><tr><td rowspan="4">PointMAE</td><td>position only</td><td>85.2</td></tr><tr><td>position + normal*</td><td>85.7</td></tr><tr><td>position + surface variation*</td><td>85.9</td></tr><tr><td>position + normal + variation*</td><td>86.0</td></tr><tr><td rowspan="3">MaskFeat3D</td><td>normal</td><td>86.5</td></tr><tr><td>surface variation</td><td>87.0</td></tr><tr><td>normal + variation</td><td>87.7</td></tr></table>
182
+
183
+ Table 4: Ablation study on different features. * uses position-index matching Zhang et al. (2022) for feature loss computation.
184
+
185
+ mitigate the effects of random sampling, we conduct 10 independent runs for each few-shot setting and report the mean accuracy and standard deviation. Additionally, more ModelNet40 results can be found in the supplementary material.
186
+
187
+ Overall, the improvements of our approach are consistent across different backbone encoders and datasets.
188
+
189
+ # 4.3 ABLATION STUDY
190
+
191
+ We proceed to present an ablation study to justify various design choices. For simplicity, we choose the shape classification task on ScanObjectNN, where the gaps under different configurations are salient and provide meaningful insights on the pros and cons of various design choices. Due to space constraints, additional ablation studies are available in the supplementary material.
192
+
193
+ Decoder design The primary question that arises is whether it is essential to disregard point position recovery. PointMAE's decoder follows a standard ViT-like architecture, utilizing a fully connected (FC) layer to directly predict the masked point coordinates. We implemented this decoder to predict our target features. However, since their decoder design does not encode masked point position, it cannot solely predict target features without predicting point position. To address this, we follow the approach proposed in (Zhang et al., 2022) and employ position-index matching for feature loss computation. As shown in Tab. 4, even though incorporating point features as the predicting target can enhance performance, the overall performance still significantly lags behind our design. This experiment highlights the significance of both point feature prediction and disregarding point position recovery.
194
+
195
+ Target feature choice In Tab. 4, the experiment shows that: (1) All combinations of point normal and surface variation can yield significant improvements over existing MAE approaches that recover point positions (cf. Tab. 1); (2) using both point normals and surface variations yields the best performance. As discussed in Sec. 3.2, this is due to the fact that they correspond to first- and second-order differential properties. They are relevant but complementary to each other. Therefore, reconstructing them together forces the encoder to learn more informative features than merely reconstructing one of them.
196
+
197
+ Decoder depth Tab. 5-a varies the number of transformer blocks (decoder depth). A sufficient deep decoder is necessary for feature learning. Increasing the number of blocks from 2 to 4 provides $+1.5\%$ improvement on ScanObjectNN classification task. The performance drops when increasing the depth further, due to the overfitting issue. Interestingly, we note that a 1-block decoder can strongly achieve $85.8\%$ accuracy, which is still higher than the runner-up method (PointMAE).
198
+
199
+ Data augmentation Tab. 5-b studies three traditional data augmentation methods: rotation, scaling, and translation. Since the standard scaling could change the surface normal and variation, we scale the shape by using the same factor on 3 different axis. The experiments show that rotation and scaling play a more important role.
200
+
201
+ Masking ratio. Tab. 5-c varies the masking ratio of input point cloud, which is another important factor on our approach. When the masking ratio is too large, e.g., $90\%$ , the remaining part contains too limited information, which makes the task too hard to complete. When masking ratio is too
202
+
203
+ (a) Decoder depth
204
+
205
+ <table><tr><td># blocks</td><td>ScanNN</td></tr><tr><td>1</td><td>85.8</td></tr><tr><td>2</td><td>86.2</td></tr><tr><td>4</td><td>87.7</td></tr><tr><td>8</td><td>87.5</td></tr><tr><td>12</td><td>87.1</td></tr></table>
206
+
207
+ (b) Data augmentation
208
+
209
+ <table><tr><td>rot</td><td>scale</td><td>trans</td><td>ScanNN</td></tr><tr><td>✓</td><td>-</td><td>-</td><td>87.0</td></tr><tr><td>-</td><td>✓</td><td>-</td><td>85.9</td></tr><tr><td>✓</td><td>✓</td><td>-</td><td>87.7</td></tr><tr><td>-</td><td>✓</td><td>✓</td><td>85.1</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>86.7</td></tr></table>
210
+
211
+ (c) Mask ratio
212
+
213
+ <table><tr><td>ratio</td><td>ScanNN</td></tr><tr><td>40%</td><td>86.8</td></tr><tr><td>60%</td><td>87.7</td></tr><tr><td>90%</td><td>86.5</td></tr></table>
214
+
215
+ (d) Decoder attention
216
+
217
+ <table><tr><td>attention type</td><td>ScanNN</td></tr><tr><td>cross only</td><td>85.7</td></tr><tr><td>cross+self</td><td>87.7</td></tr></table>
218
+
219
+ (e) Query point ratio
220
+
221
+ <table><tr><td>query/mask</td><td>ScanNN</td></tr><tr><td>25%</td><td>85.7</td></tr><tr><td>50%</td><td>86.2</td></tr><tr><td>75%</td><td>86.6</td></tr><tr><td>100%</td><td>87.7</td></tr></table>
222
+
223
+ small, e.g., $40\%$ , the task becomes too simple and impedes the feature learning. In our experiments, masking ratio $= 60\%$ shows the best performance.
224
+
225
+ Decoder block design We tested whether the self-attention layer in our decoder is essential. By simply removing self-attention layers and using cross-attention layers only, we find that the performance has a large drop (-2.0), see Tab. 5-d.
226
+
227
+ Number of query points Finally, we varied the number of query points used by our decoder to see how it affects the network performance. Tab. 5-e shows that more query points lead to better performance. Here, "query/mask" is the ratio of selected query points with respect to the total number of masked points.
228
+
229
+ # 4.4 SCENE-LEVEL PRETRAINING EXTENSION
230
+
231
+ In principle, masked point cloud autoencoders could be scaled to noisy, large-scale point clouds. Additionally, we conducted an extension experiment on real-world scene-level data to evaluate our approach. Specifically, we pretrained our model on the ScanNet (Dai et al., 2017) dataset and evaluated its performance on 3D object detection task using the ScanNet and SUN RGB-D (Song et al., 2015) dataset. The training details can be found in the supplementary material. In this experiment, we observed that surface
232
+
233
+ Table 5: Ablation studies of our design choices. Please refer to Sec. 4.3 for a detailed analysis.
234
+
235
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Backbone</td><td colspan="2">ScanNet</td><td colspan="2">SUN RGB-D</td></tr><tr><td>AP25</td><td>AP50</td><td>AP25</td><td>AP50</td></tr><tr><td>STRL</td><td>VoteNet</td><td>59.5</td><td>38.4</td><td>58.2</td><td>35.0</td></tr><tr><td>RandomRooms</td><td>VoteNet</td><td>61.3</td><td>36.2</td><td>59.2</td><td>35.4</td></tr><tr><td>PointContrast</td><td>VoteNet</td><td>59.2</td><td>38.0</td><td>57.5</td><td>34.8</td></tr><tr><td>DepthContrast</td><td>VoteNet</td><td>62.1</td><td>39.1</td><td>60.4</td><td>35.4</td></tr><tr><td>Point-M2AE</td><td>Point-M2AE</td><td>66.3</td><td>48.3</td><td>-</td><td>-</td></tr><tr><td>MaskFeat3D</td><td>VoteNet</td><td>63.3</td><td>41.0</td><td>61.0</td><td>36.5</td></tr><tr><td>MaskFeat3D</td><td>Point-M2AE</td><td>67.5</td><td>50.0</td><td>-</td><td>-</td></tr><tr><td>MaskFeat3D</td><td>CAGroup3D</td><td>75.6</td><td>62.3</td><td>67.2</td><td>51.0</td></tr></table>
236
+
237
+ Table 6: 3D object detection results.
238
+
239
+ normal has a minor influence on the pretraining, while surface variation remains a robust feature. Moreover, we discovered that color signal could be an effective target feature. Hence, we pretrained our model with surface variation and color as the target features, and then fine-tuned the pretrained encoder on the downstream tasks. As shown in Tab. 6, given that previous studies lack a unified network backbone, we selected two of the most common works, VoteNet and Point-M2AE, along with the latest work, CAGroup3D, as the network backbones respectively. And our model exhibits consistent improvements in all the settings, which further proves the generalizability of our approach on noisy, large-scale point clouds. Although the concrete scene-level experiments are not the main focus of this paper, the results indicate that this is a promising direction.
240
+
241
+ # 5 CONCLUSION
242
+
243
+ Our study reveals that restoration of masked point location is not essential for 3D MAE training. By predicting geometric features such as surface normals and surface variations at the masked points via our cross-attention-based decoder, the performance of 3D MAEs can be improved significantly, as evaluated through extensive experiments and downstream tasks. Moreover, the performance gains remain consistent when using different encoder backbones. We hope that our study can inspire future research in the development of robust MAE-based 3D backbones.
244
+
245
+ Acknowledgement. Part of this work was done when Siming Yan was a research intern at Microsoft Research Asia. Additionally, we would like to acknowledge the gifts from Google, Adobe, Wormpex AI, and support from NSF IIS-2047677, HDR-1934932, CCF-2019844, and IARPA WRIVA program.
246
+
247
+ # REFERENCES
248
+
249
+ Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. BEiT: BERT pre-training of image transformers. In ICLR, 2022.
250
+ Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. In NeurIPS, 2020.
251
+ Emanuele Bugliarello, Ryan Cotterell, Naoaki Okazaki, and Desmond Elliott. Multimodal pretraining unmasked: A meta-analysis and a unified framework of vision-and-language BERTs. Transactions of the Association for Computational Linguistics, 9, 2021.
252
+ Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An information-rich 3D model repository. arxiv:1512.03012, 2015.
253
+ Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020.
254
+ Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021.
255
+ Christopher B. Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In CVPR, pp. 3075-3084, 2019.
256
+ Ulrich Clarenz, Martin Rumpf, and Alexandru Telea. Robust feature detection and local classification for surfaces based on moment analysis. IEEE Trans. Vis. Comput. Graphics, 10(5):516-524, 2004.
257
+ Angela Dai, Angel X. Chang, Manolis Savva, Maciej Halber, Thomas A. Funkhouser, and Matthias Nießner. ScanNet: Richly-annotated 3D reconstructions of indoor scenes. In CVPR, pp. 2432-2443, 2017.
258
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In ACL, pp. 4171-4186, Stroudsburg, PA, USA, 2019.
259
+ Benjamin Graham, Martin Engelcke, and Laurens van der Maaten. 3D semantic segmentation with submanifold sparse convolutional networks. In CVPR, 2018.
260
+ Jean-Bastien Grill, Florian Strub, Florent Alché, Corentin Tallec, Pierre H. Richemond, Elena Buchatskaya, Carl Doersch, BernardoAvila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, Bilal Piot, Koray Kavukcuoglu, Rémi Munos, and Michal Valko. Bootstrap your own latent: A new approach to self-supervised learning. In NeurIPS, 2020.
261
+ Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, 2020.
262
+ Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dolkar, and Ross B. Girshick. Masked autoencoders are scalable vision learners. In CVPR, pp. 15979-15988, 2022.
263
+ Georg Hess, Johan Jaxing, Elias Svensson, David Hagerman, Christoffer Petersson, and Lennart Svensson. Masked autoencoders for self-supervised learning on automotive point clouds. arXiv:2207.00531, 2022.
264
+ Hugues Hoppe, Tony DeRose, Tom Duchamp, John Alan McDonald, and Werner Stuetzle. Surface reconstruction from unorganized points. In SIGGRAPH, pp. 71-78. ACM, 1992.
265
+ Ji Hou, Benjamin Graham, Matthias Nießner, and Saining Xie. Exploring data-efficient 3D scene understanding with contrastive scene contexts. In CVPR, 2021.
266
+ Qi-Xing Huang, Simon Flóry, Natasha Gelfand, Michael Hofer, and Helmut Pottmann. Reassembling fractured objects by geometric matching. ACM Trans. Graph., 25(3):569-578, 2006.
267
+ Hamid Laga, Yulan Guo, Hedi Tabia, Robert B Fisher, and Mohammed Bennamoun. 3D Shape analysis: fundamentals, theory, and applications. John Wiley & Sons, 2018.
268
+
269
+ Xin Lai, Jianhui Liu, Li Jiang, Liwei Wang, Hengshuang Zhao, Shu Liu, Xiaojuan Qi, and Jiaya Jia. Stratified transformer for 3d point cloud segmentation. In CVPR, pp. 8500-8509, 2022.
270
+ Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. PointCNN: Convolution on X-transformed points. In NeurIPS, pp. 828-838, 2018.
271
+ Haotian Liu, Mu Cai, and Yong Jae Lee. Masked discrimination for self-supervised learning on point clouds. In ECCV, 2022.
272
+ Xiao Liu, Fanjin Zhang, Zhenyu Hou, Li Mian, Zhaoyu Wang, Jing Zhang, and Jie Tang. Self-supervised learning: Generative or contrastive. IEEE Transactions on Knowledge and Data Engineering, 2021.
273
+ Xu Ma, Can Qin, Haoxuan You, Haoxi Ran, and Yun Fu. Rethinking network design and local geometry in point cloud: A simple residual MLP framework. In ICLR, 2022.
274
+ Yatian Pang, Wenxiao Wang, Francis E. H. Tay, W. Liu, Yonghong Tian, and Liuliang Yuan. Masked autoencoders for point cloud self-supervised learning. In ECCV, 2022.
275
+ Mark Pauly and Markus Gross. Spectral processing of point-sampled geometry. In SIGGRAPH, pp. 379-386, 2001.
276
+ Mark Pauly, Markus Gross, and Leif P. Kobbelt. Efficient simplification of point-sampled surfaces. In Proceedings of the Conference on Visualization, pp. 163-170, 2002.
277
+ Mark Pauly, Richard Keiser, and Markus Gross. Multi-scale Feature Extraction on Point-Sampled Surfaces. Computer Graphics Forum, 2003. ISSN 1467-8659.
278
+ Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. PointNet: Deep learning on point sets for 3D classification and segmentation. In CVPR, pp. 652-660, 2017a.
279
+ Charles R. Qi, Li Yi, Hao Su, and Leonidas J. Guibas. PointNet++: Deep hierarchical feature learning on point sets in a metric space. In NeurIPS, pp. 5105-5114, 2017b.
280
+ Guocheng Qian, Yuchen Li, Houwen Peng, Jinjie Mai, Hasan Abed Al Kader Hammoud, Mohamed Elhoseiny, and Bernard Ghanem. PointNeXt: Revisiting PointNet++ with improved training and scaling strategies. In NeurIPS, 2022.
281
+ Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 567-576, 2015.
282
+ Mikaela Angelina Uy, Quang-Hieu Pham, Binh-Son Hua, Duc Thanh Nguyen, and Sai-Kit Yeung. Revisiting point cloud classification: A new benchmark dataset and classification model on real-world data. In ICCV, pp. 1588-1597. IEEE, 2019.
283
+ Hanchen Wang, Qi Liu, Xiangyu Yue, Joan Lasenby, and Matt J Kusner. Unsupervised point cloud pre-training via occlusion completion. In ICCV, pp. 9782-9792, 2021a.
284
+ Peng-Shuai Wang, Yang Liu, Yu-Xiao Guo, Chun-Yu Sun, and Xin Tong. O-CNN: Octree-based convolutional neural networks for 3D shape analysis. ACM Trans. Graph., 36(4), 2017.
285
+ Peng-Shuai Wang, Yu-Qi Yang, Qian-Fang Zou, Zhirong Wu, Yang Liu, and Xin Tong. Unsupervised 3D learning for shape analysis via multiresolution instance discrimination. In AAAI, 2021b.
286
+ Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E. Sarma, Michael M. Bronstein, and Justin M. Solomon. Dynamic graph cnn for learning on point clouds. ACM Trans. Graph., 38(5), 2019.
287
+ Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In CVPR, pp. 14668-14678, 2022.
288
+ Jiajun Wu, Chengkai Zhang, Tianfan Xue, William T. Freeman, and Joshua B. Tenenbaum. Learning a probabilistic latent space of object shapes via 3D generative-adversarial modeling. In NeurIPS, 2016.
289
+
290
+ Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3D ShapeNets: A deep representation for volumetric shapes. In CVPR, pp. 1912-1920. IEEE Computer Society, 2015.
291
+ Zhirong Wu, Yuanjun Xiong, Stella X. Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In CVPR, 2018.
292
+ Saining Xie, Jiatao Gu, Demi Guo, Charles R. Qi, Leonidas J Guibas, and Or Litany. PointContrast: Unsupervised pre-training for 3D point cloud understanding. ECCV, 2020.
293
+ Siming Yan, Zhenpei Yang, Chongyang Ma, Haibin Huang, Etienne Vouga, and Qixing Huang. Hpnet: Deep primitive segmentation using hybrid representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2753-2762, 2021.
294
+ Siming Yan, Zhenpei Yang, Haoxiang Li, Li Guan, Hao Kang, Gang Hua, and Qixing Huang. Implicit autoencoder for point cloud self-supervised representation learning. arXiv preprint arXiv:2201.00785, 2022.
295
+ Siming Yan, Chen Song, Youkang Kong, and Qixing Huang. Multi-view representation is what you need for point-cloud pre-training. In The Twelfth International Conference on Learning Representations, 2023.
296
+ Yaoqing Yang, Chen Feng, Yiru Shen, and Dong Tian. FoldingNet: Point cloud auto-encoder via deep grid deformation. In CVPR, 2018.
297
+ Li Yi, Vladimir G. Kim, Duygu Ceylan, I-Chao Shen, Mengyan Yan, Hao Su, Cewu Lu, Qixing Huang, Alla Sheffer, and Leonidas Guibas. A scalable active framework for region annotation in 3d shape collections. ACM Trans. Graph., 35(6), 2016.
298
+ Xumin Yu, Lulu Tang, Yongming Rao, Tiejun Huang, Jie Zhou, and Jiwen Lu. Point-BERT: Pretraining 3D point cloud transformers with masked point modeling. In CVPR, pp. 19313-19322, June 2022.
299
+ Yabin Zhang, Jiehong Lin, Chenhang He, Yongwei Chen, Kui Jia, and Lei Zhang. Masked surfel prediction for self-supervised point cloud learning. arXiv:2207.03111, 2022.
300
+ Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3D features on any point-cloud. In ICCV, 2021.
301
+ Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In ICCV, pp. 16259-16268, 2021.
302
+ Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. ibot: Image bert pre-training with online tokenizer. ICLR, 2022.
303
+ Chengxu Zhuang, Siming Yan, Aran Nayebi, and Daniel Yamins. Self-supervised neural network models of higher visual cortex development. In 2019 Conference on Cognitive Computational Neuroscience, pp. 566-569. CCN, 2019.
304
+ Chengxu Zhuang, Siming Yan, Aran Nayebi, Martin Schrimpf, Michael C Frank, James J DiCarlo, and Daniel LK Yamins. Unsupervised neural network models of the ventral visual stream. Proceedings of the National Academy of Sciences, 118(3):e2014196118, 2021.
2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db7d8a0b02b517bf1373662c1454d56cde9672016abb584168b3e86a4f6783cf
3
+ size 340968
2024/3D Feature Prediction for Masked-AutoEncoder-Based Point Cloud Pretraining/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/05dead7a-7267-4e56-9f6a-9da24e57926b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/05dead7a-7267-4e56-9f6a-9da24e57926b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/05dead7a-7267-4e56-9f6a-9da24e57926b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b6654ce651398099ca8e06116e6cd030050a8bd3eb16c0d932ecbac407a265d
3
+ size 29128272
2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/full.md ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3D RECONSTRUCTION WITH GENERALIZABLE NEURAL FIELDS USING SCENE PRIORS
2
+
3
+ Yang Fu $^{1\dagger}$ Shalini De Mello $^{2}$ Xueting Li $^{2}$ Amey Kulkarni $^{2}$
4
+
5
+ Jan Kautz $^{2}$ Xiaolong Wang $^{1}$ Sifei Liu $^{2}$
6
+
7
+ <sup>1</sup>University of California, San Diego <sup>2</sup>NVIDIA
8
+
9
+ # ABSTRACT
10
+
11
+ High-fidelity 3D scene reconstruction has been substantially advanced by recent progress in neural fields. However, most existing methods train a separate network from scratch for each individual scene. This is not scalable, inefficient, and unable to yield good results given limited views. While learning-based multi-view stereo methods alleviate this issue to some extent, their multi-view setting makes it less flexible to scale up and to broad applications. Instead, we introduce training generalizable Neural Fields incorporating scene Priors (NFPs). The NFP network maps any single-view RGB-D image into signed distance and radiance values. A complete scene can be reconstructed by merging individual frames in the volumetric space WITHOUT a fusion module, which provides better flexibility. The scene priors can be trained on large-scale datasets, allowing for fast adaptation to the reconstruction of a new scene with fewer views. NFP not only demonstrates SOTA scene reconstruction performance and efficiency, but it also supports single-image novel-view synthesis, which is underexplored in neural fields. More qualitative results are available at: https://oasisyang.github.io/neural-prior.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ Reconstructing a large indoor scene has been a long-standing problem in computer vision. A common approach is to use the Truncated Signed Distance Function (TSDF) (Zhou et al., 2018; Dai et al., 2017b) with a depth sensor on personal devices. However, the discretized representation with TSDF limits its ability to model fine-grained details, e.g., thin surfaces in the scene. Recently, a continuous representation using neural fields and differentiable volume rendering (Guo et al., 2022; Yu et al., 2022; Azinovic et al., 2022; Wang et al., 2022b; Li et al., 2022) has achieved impressive and detailed 3D scene reconstruction.
16
+
17
+ Although these results are encouraging, all of them require training a distinct network for every scene, leading to extended training durations with the demand of a substantial number of input views.
18
+
19
+ To tackle these limitations, several works learn a generalizable neural network so that the representation can be shared among different scenes (Wang et al., 2021b; Zhang et al., 2022; Chen et al., 2021; Long et al., 2022; Xu et al., 2022). While these efforts scale up training on large-scale scene datasets, introduce generalizable intermediate scene representation, and significantly cut down inference time, they all rely on intricate fusion networks to handle multi-view input images at each iteration. This adds complexity to the training process and limits flexibility in data preprocessing.
20
+
21
+ In this paper, we propose to perform 3D reconstruction by learning generalizable Neural Fields using scene Priors (NFPs). Such priors are largely built upon depth-map inputs (given posed RGB-D images). By leveraging the priors, our NFPs network allows for a simple and flexible design with single-view inputs during training, and it can efficiently adapt to each novel scene using fewer input views. Specifically, full scene reconstruction is achieved by directly merging the posed multi-view frames and their corresponding fields from NFPs, without the need for learnable fusion blocks.
22
+
23
+ A direct way to generalize per-scene Nerf optimization is to encode each single-view input image into an intermediate representation in the volumetric space. Yet, co-learning the encoder and the
24
+
25
+ ![](images/b88df4f26f68e979997007c96719bbd9cdae447f0d890fa5472844dc415f1c2c.jpg)
26
+ Figure 1: We propose Neural Fields scene Prior (NFP) to enable fast reconstruction of geometry and texture of indoor scenes. Our method first (a) learns a generalizable network as a scene prior that obtains a coarse scene reconstruction in a feed-forward manner. Next, we directly fuse the per-view results and (b) perform per-scene optimization in a more accurate and efficient way leading to high-quality surface reconstruction and realistic texture reconstruction.
27
+
28
+ NeRF presents significant challenges. Given that a single-view image captures only a thin segment of a surface, it becomes considerably harder to discern the geometry compared to understanding the texture. Thus, to train NFPs, we introduce a two-stage paradigm: (i) We train a geometric reconstruction network to map depth images to local SDFs; (ii) We adopt this pre-trained network as a geometric prior to support the training of a separate color reconstruction network, as a texture prior, in which the radiance function can be easily learned with volumetric rendering (Wang et al., 2021a; Yariv et al., 2021), given the SDF prediction.
29
+
30
+ Dense voxel grids are a popular choice in many NeRF-based rendering techniques (Yen-Chen et al., 2020; Chen et al., 2021; Liu et al., 2020; Huang et al., 2021; Takikawa et al., 2021; Sun et al., 2022b; Wang et al., 2022b). However, for the single-view input context, they fall short for two main reasons. First, the single-view image inherently captures just a thin and confined segment of surfaces, filling only a minuscule fraction of the entire voxel space. Second, dense voxel grids employ uniform sampling, neglecting surface priors like available depth information. Instead, we resort to a surface representation: we build a set of projected points in the 3D space as keypoint, from where a continuous surface can be decoded. The keypoint representation spans a compact 2D surface representation, allowing dense sampling close to the surface, which significantly enhances scalability.
31
+
32
+ NFPs can easily facilitate further fine-tuning on large-scale indoor scenes. Given the pretrained geometry and texture network as the scene prior, the single-scene reconstruction can be performed by optimizing the aggregated surface representation and the decoders.
33
+
34
+ With coarse reconstruction from the generalized network and highly compact surface representation, our approach achieves competitive scene reconstruction and novel view synthesis performance with substantially fewer views and faster convergence speed. In summary, our contributions include:
35
+
36
+ - We propose NFPs, a generalizable scene prior that enables fast, large-scale scene reconstruction.
37
+ - NFPs facilitate (a) single-view, across-scene input, (b) direct fusion of local frames, and (c) efficient per-scene fine-tuning.
38
+ - We introduce a continuous surface representation, taking advantage of the depth input and avoiding redundancy in the uniform sampling of a volume.
39
+ - With the limited number of views, we demonstrate competitive performance on both the scene reconstruction and novel view synthesis tasks, with substantially superior efficiency than existing approaches.
40
+
41
+ # 2 RELATED WORK
42
+
43
+ Reconstructing and rendering large-scale indoor scenes is crucial for various applications. Depth sensors, on the other hand, are becoming increasingly common in commercial devices, such as Kinect (Zhang, 2012; Smisek et al., 2013), iPhone LiDAR (Nowacki & Woda, 2019), etc. Leveraging
44
+
45
+ ![](images/b538ffe34cb5a37473bb1f432ae260f11d6ac7c646be6b520df98aa32042178c.jpg)
46
+ Figure 2: Overview of NFP. Given the RGBD input, we first extract the geometric and texture pixel feature using two encoders (Sec. 3.1). Then, we construct the continuous surface representation upon the discrete surface feature (Sec. 3.2). Next, we introduce a two-stage paradigm to learn the generalizable geometric and texture prior, optimized via multiple objectives (Sec. 3.3).
47
+
48
+ depth information in implicit neural representations is trending. We discuss both these topics in detail, in the following.
49
+
50
+ Multi-view scene reconstruction. Reconstructing 3D scenes from images was dominated by multi-view stereo (MVS) (Schönberger et al., 2016; Schonberger & Frahm, 2016), which often follows the single-view depth estimation (e.g., via feature matching) and depth fusion process (Newcombe et al., 2011; Dai et al., 2017b; Merrell et al., 2007). Recent learning-based MVS methods (Cheng et al., 2020; Düçeker et al., 2020; Huang et al., 2018; Luo et al., 2019) substantially outperform the conventional pipelines. For instance, Yao et al. (2018); Luo et al. (2019) build the cost-volume based on 2D image features and use 3D CNNs for better depth estimation. Another line of works (Sun et al., 2021; Bi et al., 2017) fuse multi-view depth and reconstruct surface meshes using techniques such as TSDF fusion. Instead of fusing the depth, Wei et al. (2021), Wang et al. (2021b), Zhang et al. (2022), and Xu et al. (2022) directly aggregate multi-view inputs into a radiance field for coherent reconstruction. The multi-view setting enables learning generalizable implicit representation, however, their scalability is constrained as they always require multi-view RGB/RGB-D data during training. Our approach, for the first time, learns generalizable scene priors from single-view images with substantially improved scalability.
51
+
52
+ Neural Implicit Scene Representation. A growing number of approaches (Yariv et al., 2020; Wang et al., 2021a; Yariv et al., 2021; Oechsle et al., 2021; Niemeyer et al., 2020; Sun et al., 2022a) represent a scene by implicit neural representations. Although these methods achieve impressive reconstruction of objects and scenes with small-scale and rich textures, they hardly faithfully reconstruct large-scale scenes due to the shape-radiance ambiguity suggested in (Zhang et al., 2020; Wei et al., 2021). To address this issue, Guo et al. (2022) and Yu et al. (2022) attempt to build the NeRF upon a given geometric prior, i.e., sparse depth maps and pretrained depth estimation networks. However, these methods take a long time to optimize on an individual scene. As mentioned previously, generalizable NeRF representations with multi-view feature aggregation are studied (Chen et al., 2021; Wang et al., 2021b; Zhang et al., 2022; Johari et al., 2022; Xu et al., 2022). However, they still focus on reconstructing the scene's appearance, e.g., for novel view synthesis, but cannot guarantee high-quality surface reconstruction.
53
+
54
+ # Depth-supervised reconstruction and rendering.
55
+
56
+ With the availability of advanced depth sensors, many approaches seek depth-enhanced supervision of NeRF (Azinović et al., 2022; Li et al., 2022; Zhu et al., 2022; Sucar et al., 2021; Yu et al., 2022; Williams et al., 2022; Xu et al., 2022; Deng et al., 2022) since depth information is more accessible. For instance, Azinović et al. (2022) enables detailed reconstruction of large indoor scenes by comparing the rendered and input RGB-D images. Unlike most methods that use depth as supervision, Xu et al. (2022), Williams et al. (2022) and Dong et al. (2023) build the neural field conditioned on the geometric prior. For example, Point-NeRF pretrains a monocular depth estimation network and generates a point cloud by lifting the depth prediction. Compared to ours, their geometric prior is less integrated into the main reconstruction stream since it is separately learned and detached. Furthermore, these methods only consider performing novel view synthesis (Xu et al., 2022; Deng et al., 2022), where the geometry is not optimized, or perform pure geometric (Yu et al., 2022; Li
57
+
58
+ et al., 2022; Williams et al., 2022; Azinović et al., 2022) reconstruction. In contrast, our approach makes the scene prior and the per-scene optimization a unified model that enables more faithful and efficient reconstruction for both color and geometry.
59
+
60
+ # 3 METHOD
61
+
62
+ Given a sequence of RGB-D images and their corresponding camera poses, our goal is to perform fast and high-quality scene reconstruction. To this end, we learn a generalizable neural scene prior, which encodes an RGB image and its depth map as continuous neural fields in 3D space and decodes them into signed distance and radiance values. As illustrated in Fig. 2, we first extract generalizable surface features from geometry and texture encoders (Sec. 3.1). Then, pixels with depth values are backprojected to the 3D space as keypoints, from which continuous fields can be built with the proposed surface representation (Sec. 3.2). Motivated by previous works (Wang et al., 2021a; Yariv et al., 2021), we utilize two separate MLPs to decode the geometry and texture representations, which are further rendered into RGB and depth values (Sec. 3.3). To obtain high-quality surface reconstruction, we further propose to optimize the neural representation on top of the learned geometric and texture prior for a specific scene (Sec. 3.4).
63
+
64
+ # 3.1 CONSTRUCTING SURFACE FEATURE
65
+
66
+ Given an RGB-D image $\{\mathrm{I},\mathrm{D}\}$ , we first project the depth map into 3D point clouds in the world coordinate system using its camera pose $\{\mathrm{R},\mathrm{t}\}$ and intrinsic matrix K. We sub-sample $M$ points via Farthest Point Sampling (FPS), denoted as $\{p_m\}$ , $m\in [0,M - 1]$ , which are used as keypoints representing the discrete form of surfaces. We extract generalizable point-wise geometry and texture features, as described below, which are further splatted onto these keypoints. Both encoders are updated when training the NFP.
67
+
68
+ Geometry encoder. For each surface point, we apply the K-nearest neighbor (KNN) algorithm to find $K - 1$ points and construct a local region with $K$ points. Thus, we obtain a collection of $M$ local regions, $\{p_m, \{p_k\}_{k \in \Psi_m}\}$ , $\forall m \in [0, M - 1]$ , where $\Psi_m$ is the neighbor index set of point $p_m$ and $|\Psi_m| = K - 1$ . Then, we utilize a stack of PointConv (Wu et al., 2019) layers to extract the geometry feature from each local region $f_{\mathrm{m}}^{\mathrm{geo}} = \mathrm{PointConv}(\{p_m, \{p_k\}_{k \in N_m}\})$ .
69
+
70
+ Texture encoder. In addition, we extract RGB features for the keypoints via a 2D convolutional neural network. In particular, we feed an RGB image $I$ into an UNet (Ronneberger et al., 2015) with ResNet34 (He et al., 2016) as the backbone, which outputs a dense feature map. Then, we splat the pixel-wise features $f_{\mathrm{m}}^{\mathrm{tex}}$ onto the keypoints, according to the projection location of the surface point $p_m$ from the image plane. Thus, each surface point is represented by both a geometry feature and a texture feature, denoted by $f(\mathrm{p_m}) = [f_{\mathrm{geo}}(\mathrm{p_m}), f_{\mathrm{tex}}(\mathrm{p_m})]$ .
71
+
72
+ # 3.2 CONTINUOUS SURFACE IMPLICIT REPRESENTATION
73
+
74
+ Given the lifted keypoints and their projected geometry and texture features, in this section, we introduce how to construct continuous implicit fields conditioned on such discrete representations. We follow a spatial interpolation strategy: for any query point $\mathbf{x}$ (e.g., in a typical volume rendering process, it can be a sampled point along any ray), we first find the $K$ nearest surface points $\{p_v\}_{v \in V}$ where $V$ is a set of indices of the neighboring surface points. Then, the query point's feature can be obtained via aggregation of its neighboring surface points. In particular, we apply distance-based spatial interpolation as
75
+
76
+ $$
77
+ f (\mathrm {x}) = \frac {\sum_ {v \in V} \omega_ {v} f \left(\mathrm {p} _ {v}\right)}{\sum_ {v \in V} \omega_ {v}}; \quad \omega_ {v} = \exp (- | | \mathrm {x} - \mathrm {p} _ {v} | |), \tag {1}
78
+ $$
79
+
80
+ where $f(\mathbf{x})$ represents either the geometry $f_{\mathrm{geo}}(\mathbf{x})$ or the texture $f_{\mathrm{tex}}(\mathbf{x})$ feature, and $p_v$ is the position of the $v$ -th neighbouring keypoint. With distance-based spatial interpolation, we establish continuous implicit fields for any point from the discrete keypoints.
81
+
82
+ The continuous representation suffers from two drawbacks: First, when a point is far away from the surface, $f(\mathbf{x})$ is no longer a valid representation, but will still contribute to decoding and rendering.
83
+
84
+ Second, the distance $\omega_{v}$ is agnostic to the tangent direction and hence is likely to blur the boundaries. To mitigate the first problem, we incorporate an additional MLP layer that takes into account both the original surface feature $f(\mathrm{p}_v)$ and its relative distance to the query point $\mathbf{x} - \mathbf{p}_v$ , and outputs a distance-aware surface feature $f(\mathrm{p}_v^x) = \mathbf{MLP}(f(\mathrm{p}_v),\mathrm{x} - \mathrm{p}_v)$ . Subsequently, this refined surface feature $f(\mathrm{p}_v^x)$ replaces the original surface feature in Eq. 1 to obtain the feature of query point $\mathbf{x}$ . In addition, we ensure that the sampled points lie near the surface via importance sampling. We resolve the second issue via providing the predicted normal to the decoders as an input. We refer to Sec. 3.3 and 3.4 for details.
85
+
86
+ # 3.3 GENERALIZABLE NEURAL SCENE PRIOR
87
+
88
+ To reconstruct both geometry and texture, i.e., a textured mesh, a direct way is to decode the geometry and texture surface representation (Sec. 3.2) into signed distance and radiance values, render them into RGB and depth pixels (Guo et al., 2022; Yu et al., 2022), and then supervise them by the ground-truth RGB-D images.
89
+
90
+ Unlike the multi-view setting that covers a significant portion of the volumetric space, the single-view input only encompasses a small fraction of it. From our experiments, we found that the joint training approach struggles to generate accurate geometry.
91
+
92
+ Hence, we first learn a geometric network that maps any depth input to its corresponding SDF (Sec. 3.3.1). Once a coarse surface is established, learning the radiance function initialized by it becomes much easier - we pose it in the second stage where a generalizable texture network is introduced similarly (Sec. 3.3.2).
93
+
94
+ # 3.3.1 GENERALIZABLE GEOMETRIC PRIOR
95
+
96
+ We represent scene geometry as a signed distance function, where in our case, it is conditioned on the geometric surface representation $f_{\mathrm{geo}}(x)$ to allow for generalization ability across different scenes. Specifically, along each back-projected ray with camera center $\mathbf{o}$ and ray direction $\mathbf{r}$ , we sample $N$ points as $\mathbf{x}_i = \mathbf{o} + d_i\mathbf{r}$ , $\forall i \in [0, N-1]$ . For each sampled points $\mathbf{x}_i$ , its geometry feature $f_{\mathrm{geo}}(\mathbf{x}_i)$ can be computed by equation 1. Then, the geometry decoder $\phi_{\mathrm{G}}$ , taking the point position and its geometry feature as inputs, maps each sampled point to a signed distance, which is defined as $\mathbf{s}(\mathbf{x}_i) = \phi_{\mathrm{G}}(f_{\mathrm{geo}}(\mathbf{x}_i), \mathbf{x}_i)$ . Note that we also apply positional encoding $\gamma(\cdot)$ to the point position as suggested in Mildenhall et al. (2020). We omit it for brevity.
97
+
98
+ Following the formulation of NeuS (Wang et al., 2021a), the estimated depth value $\hat{d}$ is the expected values of sampled depth $d_{i}$ along the ray:
99
+
100
+ $$
101
+ \hat {d} = \sum_ {i} ^ {N} T _ {i} \alpha_ {i} d _ {i}; \quad T _ {i} = \prod_ {j = 1} ^ {i - 1} \left(1 - \alpha_ {j}\right) \tag {2}
102
+ $$
103
+
104
+ $$
105
+ \alpha_ {i} = \max \left(\frac {\sigma_ {s} (\mathbf {s} (\mathbf {x} _ {i})) - \sigma_ {s} (\mathbf {s} (\mathbf {x} _ {i + 1}))}{\sigma_ {s} (\mathbf {s} (\mathbf {x} _ {i}))}, 0\right),
106
+ $$
107
+
108
+ where $T_{i}$ represents the accumulated transmittance at point $\mathbf{x}_i$ , $\alpha_{i}$ is the opacity value and $\sigma_{s}$ is a Sigmoid function modulated by a learnable parameter $s$ .
109
+
110
+ Geometry objectives. To optimize the generalizable geometric representation, we apply a pixel-wise rendering loss on the depth map,
111
+
112
+ $$
113
+ \mathcal {L} _ {\text {d e p t h}} = | \hat {d} - \mathrm {D} (x, y) |. \tag {3}
114
+ $$
115
+
116
+ Inspired by (Azinović et al., 2022; Li et al., 2022), we approximate ground-truth SDF based on the distance to observed depth values along the ray direction, $b(\mathrm{x}_i) = \mathrm{D}(x,y) - d_i$ . Thus, for points that fall in the near-surface region $|b(\mathrm{x}_i)| \leq \tau$ , $\tau$ is a truncation threshold, we apply the following approximated SDF loss
117
+
118
+ $$
119
+ \mathcal {L} _ {\text {n e a r}} = | \mathrm {s} \left(\mathrm {x} _ {i}\right) - b \left(\mathrm {x} _ {i}\right) | \tag {4}
120
+ $$
121
+
122
+ We also adopt a free-space loss (Ortiz et al., 2022) to penalize the negative and large positive predictions.
123
+
124
+ $$
125
+ \mathcal {L} _ {\text {f r e e}} = \max \left(0, e ^ {- \epsilon \mathbf {s} \left(\mathrm {x} _ {i}\right)} - 1, \mathbf {s} \left(\mathrm {x} _ {i}\right) - b \left(\mathrm {x} _ {i}\right)\right), \tag {5}
126
+ $$
127
+
128
+ where $\epsilon$ is the penalty factor. Then, our approximated SDF loss is
129
+
130
+ $$
131
+ \mathcal {L} _ {\mathrm {s d f}} = \left\{ \begin{array}{l l} \mathcal {L} _ {\text {n e a r}} & \text {i f} b \left(\mathrm {x} _ {i}\right) \leq | \tau | \\ \mathcal {L} _ {\text {f r e e}} & \text {o t h e r w i s e} \end{array} \right. \tag {6}
132
+ $$
133
+
134
+ The approximated SDF values provide us with more explicit and direct supervision than the rendering depth loss (Eq. equation 3).
135
+
136
+ Surface regularization. To avoid artifacts and invalid predictions, we further use the Eikonal regularization term (Yariv et al., 2021; Ortiz et al., 2022; Wang et al., 2021a), which aims to encourage valid SDF values via the following,
137
+
138
+ $$
139
+ \mathcal {L} _ {\mathrm {e i k}} = \left| \left| \nabla_ {\mathrm {x} _ {i}} \mathbf {s} \left(\mathrm {x} _ {i}\right) - 1 \right| \right| _ {2} ^ {2}, \tag {7}
140
+ $$
141
+
142
+ where $\nabla_{\mathbf{x}_i}\mathbf{s}(\mathbf{x}_i)$ is the gradient of predicted SDF w.r.t. the sampled point $\mathbf{x}_i$
143
+
144
+ Therefore, we update the geometry encoder and decoder with the generalizable geometry loss as follows,
145
+
146
+ $$
147
+ \mathcal {L} _ {\text {g e o}} = \lambda_ {\text {d e p t h}} \mathcal {L} _ {\text {d e p t h}} + \lambda_ {\text {s d f}} \mathcal {L} _ {\text {s d f}} + \lambda_ {\text {e i k}} \mathcal {L} _ {\text {e i k}} \tag {8}
148
+ $$
149
+
150
+ # 3.3.2 GENERALIZABLE TEXTURE PRIOR
151
+
152
+ We build the 2nd stage – the generalizable texture network following the pretrained geometry network, as presented in Sec. 3.3.1, which offers the SDF's prediction as an initialization. Specifically, we learn pixel-wise RGB features, as described in Sec. 3.1, and project them onto the corresponding keypoints. Following the spatial interpolation method in Sec. 3.2, we query the texture feature of any sampled point in 3D space. As aforementioned, the spatial interpolation in Eq. equation 1 is not aware of the surface tangent directions. For instance, a point at the intersection of two perpendicular planes will be interpolated with keypoints coming from both planes. Thus, representations at the boundary regions can be blurred. To deal with it, we further concatenate the surface normal $\nabla_{\mathbf{x}_i}\mathbf{s}(\mathbf{x}_i)$ predicted in the first stage with the input to compensate for the missing information.
153
+
154
+ With a separate texture decoder $\phi_{\mathrm{tex}}$ , the color of point $\mathbf{x}_i$ is estimated, conditioned on the texture feature $f_{\mathrm{tex}}(\mathbf{x}_i)$ and the surface normal $\nabla_{\mathbf{x}_i}\mathbf{s}(\mathbf{x}_i)$ ,
155
+
156
+ $$
157
+ \mathbf {c} \left(\mathrm {x} _ {i}\right) = \phi_ {\mathrm {t e x}} \left(f _ {\mathrm {t e x}} \left(\mathrm {x} _ {i}\right), \mathbf {r}, \nabla_ {\mathrm {x} _ {i}} \mathbf {s} \left(\mathrm {x} _ {i}\right)\right), \tag {9}
158
+ $$
159
+
160
+ where $\mathbf{r}$ is the ray direction. Here we omit the positional encoding of the point's position and ray direction for conciseness. Therefore, the predicted pixel color can be expressed as $\hat{\mathbf{c}} = \sum_{i}^{N}T_{i}\alpha_{i}\mathbf{c}_{i}$ , where $T_{i}$ and $\alpha_{i}$ are defined same as Eq. equation 2. We supervise the network by minimizing the L2 loss between the rendered pixel RGB values and their ground truth values
161
+
162
+ $$
163
+ \mathcal {L} _ {\mathrm {r g b}} = \left\| \hat {\mathbf {c}} - \mathrm {I} (x, y) \right\| _ {2} ^ {2}. \tag {10}
164
+ $$
165
+
166
+ Meanwhile, we jointly learn the geometry network including the PointConv encoder and geometry decoder introduced in Sec. 3.2, via the same $\mathcal{L}_{\mathrm{geo}}$ . Thus, the total loss function for generalizable texture representation learning is
167
+
168
+ $$
169
+ \mathcal {L} _ {\text {t e x}} = \lambda_ {\text {d e p t h}} \mathcal {L} _ {\text {d e p t h}} + \lambda_ {\text {s d f}} \mathcal {L} _ {\text {s d f}} + \lambda_ {\text {e i k}} \mathcal {L} _ {\text {e i k}} + \tag {11}
170
+ $$
171
+
172
+ $$
173
+ \lambda_ {\mathrm {r g b}} \mathcal {L} _ {\mathrm {r g b}}.
174
+ $$
175
+
176
+ During volumetric rendering, to restrict the sampled points from being concentrated on the surface, we perform importance sampling based on: (i) the predicted surface as presented in Wang et al. (2021a), and (ii) the input depth map. More details are in the supplementary material.
177
+
178
+ # 3.4 PRIOR-GUIDED PER-SCENE OPTIMIZATION
179
+
180
+ To facilitate large-scale, high-quality scene reconstruction, we can further finetune the pretrained generalizable geometric and texture prior to individual scenes, with multi-view frames. Specifically, we first directly fuse the geometry and texture feature of multi-view frames via the scene prior networks. No further learnable modules are required, in contrast, to (Chen et al., 2021; Zhang et al., 2022; Li et al., 2022). Then, we design a prior-guided pruning and sampling module, which lets optimization happen near surfaces. In particular, we initialize the grid in the volumetric space via
181
+
182
+ ![](images/a0a1a9826f87f04652b4ca1a7a0b0f039a9ffa1c064da8dade6128d94c5cc406.jpg)
183
+ Figure 3: Qualitative comparisons for mesh reconstruction on ScanNet. Our method achieves the most complete and fine-detailed reconstruction. The prior reconstruction results and the ground-truth are provided as reference. Better viewed when zoomed in.
184
+
185
+ learn NSP and estimate the SDF value of each grid by its corresponding feature, and remove the grids whose SDF values are larger than a threshold. We note that the generalizable scene prior can be combined with various optimization strategies (Xu et al., 2022; Yu et al., 2022; Wang et al., 2022b). More details can be found in the supplementary materials.
186
+
187
+ During the finetuning, we update the scene-prior feature, and the weights of the MLP decoders to fit the captured images for a specific scene. Besides the objective functions described in Eq. equation 11, we also introduce the smoothness regularization term to minimize the difference between the gradients of nearby points
188
+
189
+ $$
190
+ \mathcal {L} _ {\text {s m o o t h}} = \left| \left| \nabla_ {\mathrm {x} _ {i}} \mathbf {s} \left(\mathrm {x} _ {i}\right) - \nabla_ {\mathrm {x} _ {i} + \sigma} \mathbf {s} \left(\mathrm {x} _ {i} + \sigma\right) \right| \right| _ {2}, \tag {12}
191
+ $$
192
+
193
+ where $\sigma$ is a small perturbation value around point $\mathbf{x}_i$ . Thus, the total loss function for per-scene optimization is
194
+
195
+ $$
196
+ \mathcal {L} _ {\text {s c e n e}} = \lambda_ {\text {d e p t h}} \mathcal {L} _ {\text {d e p t h}} + \lambda_ {\text {s d f}} \mathcal {L} _ {\text {s d f}} + \lambda_ {\text {e i k}} \mathcal {L} _ {\text {e i k}} + \tag {13}
197
+ $$
198
+
199
+ $$
200
+ \lambda_ {\mathrm {r g b}} \mathcal {L} _ {\mathrm {r g b}} + \lambda_ {\mathrm {s m o o t h}} \mathcal {L} _ {\mathrm {s m o o t h}}.
201
+ $$
202
+
203
+ # 4 EXPERIMENTS
204
+
205
+ In this work, we introduce a generalizable network that can be applied to both surface reconstruction and novel view synthesis from RGB-D images in an offline manner. To our best knowledge, there is no prior work that aims for both two tasks. To make fair comparisons, we compare our work with the state-of-the-art (STOA) approaches of each task, respectively.
206
+
207
+ # 4.1 BASELINES, DATASETS AND METRICS
208
+
209
+ Baselines. To evaluate surface reconstruction, we consider the following two groups of methods: First, we compared our method with RGB-based neural implicit surface reconstruction approaches: ManhattanSDF (Guo et al., 2022) and MonoSDF (Yu et al., 2022) which involve an additional network to extract the geometric prior during training. Second, we consider several RGB-D surface reconstruction approaches that share similar settings with ours: Neural-RGBD (Azinović et al., 2022) and Go-surf (Wang et al., 2022b). In addition, to have a fair comparison, we finetune ManhattanSDF and MonoSDF with ground-truth depth maps as two additional baselines and denoted as ManhattanSDF* and MonoSDF*. We follow the setting in (Guo et al., 2022; Azinović et al., 2022) and evaluate the quality of the mesh reconstruction in different scenes. We note that all the above approaches perform per-scene optimization.
210
+
211
+ To evaluate the performance in novel view synthesis, we compare our method with the latest NeRF-based methods in novel view synthesis, including NeRF (Mildenhall et al., 2020), NSVF (Liu et al., 2020), NerfingMVS (Wei et al., 2021), IBRNet (Wang et al., 2021b) and NeRFusion Zhang et al. (2022). As most of existing works are only optimized with RGB data, we further evaluate the Go-surf for novel view synthesis from RGB-D images as another baseline. We adopt the evaluation setting in NerfingMVS, where we evaluate our method on 8 scenes, and for each scene, we pick 40 images covering a local region and hold out 1/8 of these as the test set for novel view synthesis.
212
+
213
+ Datasets. We mainly perform experiments on ScanNetV2 (Dai et al., 2017a) for both surface reconstruction and novel view synthesis tasks. Specifically, we first train the generalizable neural scene prior on the ScanNetV2 training set and then evaluate its performance in two testing splits proposed by Guo et al. (2022) and Wei et al. (2021) for surface reconstruction and novel view synthesis, respectively. The GT of ScanNetV2, produced by BundleFusion Dai et al. (2017b), is
214
+
215
+ <table><tr><td>Method</td><td>depth</td><td>opt. (min)</td><td>Acc↓</td><td>Comp↓</td><td>Prec↑</td><td>Recall↑</td><td>F-score↑</td></tr><tr><td>ManhattanSDF (Guo et al., 2022)</td><td>SfM</td><td>640</td><td>0.072</td><td>0.068</td><td>0.621</td><td>0.586</td><td>0.602</td></tr><tr><td>MonoSDF (Yu et al., 2022)</td><td>network</td><td>720</td><td>0.039</td><td>0.044</td><td>0.775</td><td>0.722</td><td>0.747</td></tr><tr><td>NeuRIS (Wang et al., 2022a)</td><td>network</td><td>480</td><td>0.051</td><td>0.048</td><td>0.720</td><td>0.674</td><td>0.696</td></tr><tr><td>FastMono (Dong et al., 2023)</td><td>network</td><td>30</td><td>0.042</td><td>0.056</td><td>0.751</td><td>0.678</td><td>0.710</td></tr><tr><td>HelixSurf (Liang et al., 2023)</td><td>network</td><td>30</td><td>0.038</td><td>0.044</td><td>0.786</td><td>0.727</td><td>0.755</td></tr><tr><td>ManhattanSDF* (Guo et al., 2022)</td><td>GT.</td><td>640</td><td>0.027</td><td>0.032</td><td>0.915</td><td>0.883</td><td>0.907</td></tr><tr><td>MonoSDF* (Yu et al., 2022)</td><td>GT.</td><td>720</td><td>0.033</td><td>0.026</td><td>0.942</td><td>0.912</td><td>0.926</td></tr><tr><td>Neural-RGBD (Azinović et al., 2022)</td><td>GT.</td><td>240</td><td>0.055</td><td>0.022</td><td>0.932</td><td>0.918</td><td>0.925</td></tr><tr><td>Go-surf (Wang et al., 2022b)</td><td>GT.</td><td>35</td><td>0.052</td><td>0.018</td><td>0.946</td><td>0.956</td><td>0.950</td></tr><tr><td>Ours-prior (w/o per-scene opt.)</td><td>-</td><td>-</td><td>0.084</td><td>0.057</td><td>0.695</td><td>0.764</td><td>0.737</td></tr><tr><td>Ours (w per-scene opt.)</td><td>GT.</td><td>15</td><td>0.049</td><td>0.017</td><td>0.947</td><td>0.962</td><td>0.954</td></tr></table>
216
+
217
+ Table 1: Quantitative comparisons for mesh reconstruction on ScanNet. We compare with a number of baselines. “*” is our re-implementation with dense ground-truth depth map. “opt.” stands for the optimization time for per-scene fine-tuning.
218
+
219
+ <table><tr><td>Method</td><td>#frame</td><td>Acc ↓</td><td>Comp ↓</td><td>C-ℓ1 ↓</td><td>NC ↑</td><td>F-score↑</td></tr><tr><td>BundleFusion (Dai et al., 2017b)</td><td>1,000</td><td>0.0191</td><td>0.0581</td><td>0.0386</td><td>0.9027</td><td>0.8439</td></tr><tr><td>COLMAP (Schönberger et al., 2016)</td><td>1,000</td><td>0.0271</td><td>0.0322</td><td>0.0296</td><td>0.9134</td><td>0.8744</td></tr><tr><td>ConvOccNets (Peng et al., 2020)</td><td>1,000</td><td>0.0498</td><td>0.0524</td><td>0.0511</td><td>0.8607</td><td>0.6822</td></tr><tr><td>SIREN (Sitzmann et al., 2020)</td><td>1,000</td><td>0.0229</td><td>0.0412</td><td>0.0320</td><td>0.9049</td><td>0.8515</td></tr><tr><td>Neural RGBD (Azinović et al., 2022)</td><td>1,000</td><td>0.0151</td><td>0.0197</td><td>0.0174</td><td>0.9316</td><td>0.9635</td></tr><tr><td>Go-surf (Wang et al., 2022b)</td><td>1,000</td><td>0.0158</td><td>0.0195</td><td>0.0177</td><td>0.9317</td><td>0.9591</td></tr><tr><td>Ours</td><td>1,000</td><td>0.0172</td><td>0.0192</td><td>0.0177</td><td>0.9311</td><td>0.9529</td></tr><tr><td>Go-surf (Wang et al., 2022b)</td><td>30</td><td>0.0246</td><td>0.0442</td><td>0.0336</td><td>0.9117</td><td>0.9042</td></tr><tr><td>Ours</td><td>30</td><td>0.0177</td><td>0.0292</td><td>0.0234</td><td>0.9207</td><td>0.9311</td></tr></table>
220
+
221
+ Table 2: Quantitative evaluation of the reconstruction quality on 10 synthetic scenes. Our method show competitive results when being reconstructed using only 30 frames used per room, in the lower part of the table.
222
+
223
+ known to be noisy, making accurate evaluations against it challenging. To further validate our method, we also conduct experiments on 10 synthetic scenes proposed by Azinović et al. (2022).
224
+
225
+ Evaluation Metrics. For 3D reconstruction, we evaluate our method in terms of mesh reconstruction quality used in Guo et al. (2022). Meanwhile, we measure the PSNR, SSIM, and LPIPS for novel view synthesis quality.
226
+
227
+ # 4.2 COMPARISONS WITH THE STATE-OF-THE-ART METHODS
228
+
229
+ Surface reconstruction. Table 1 provides a quantitative comparison of our methods against STOA approaches for surface reconstruction (Guo et al., 2022; Yu et al., 2022; Wang et al., 2022a; Liang et al., 2023). Within our methods, the feed-forward NFPs are denoted as Ours-prior, while the per-scene optimized networks are labeled as Ours. We list the RGB- and RGB-D-based approaches as in the top and the middle rows, whereas ours are placed in the bottom section. While we include ManhattanSDF (Guo et al., 2022) and MonoSDF (Yu et al., 2022) that are supervised by predicted or sparse depth information as in the top row, to ensure fair comparisons, we re-implement them by replacing the original supervision with ground-truth depth, as in the middle row (denoted by $*$ ). Generally, using ground-truth depths can always enhance the reconstruction performance.
230
+
231
+ Comparison with NFPs on ScanNet. In contrast to all the other approaches that all require time-consuming per-scene optimization, the NFPs can extract the geometry structure through a single forward pass. The results in Table 1 demonstrate that, even without per-scene optimization, the NFPs network not only achieves performance on par with RGB-based approaches but also operates hundreds of times faster. Note in contrast to all the other approaches in Table 1 that use around 400 frames to optimize the scene-specific neural fields, Ours-prior only takes around 40 frames per scene as inputs to achieve comparable mesh reconstruction results without per-scene optimization.
232
+
233
+ Comparison with optimized NFPs on ScanNet. We further perform per-scene optimization on top of the NFPs network. Compared with methods using additional supervision or ground truth depth maps, our method demonstrates more accurate results on the majority of the metrics. More importantly, our method is either much faster, compared with the SOTA approaches. Some qualitative results are shown in Fig. 3 and more results can be found in the supplementary materials.
234
+
235
+ Comparison on synthetic scenes. Table 2 compares our approach with the most recent works on neural surface reconstruction from RGB-D images. The results demonstrate that our method achieves
236
+
237
+ ![](images/8f22cb39d45b6bbda31d37915fc86924a52fa5d3e3cf179490d0cedbb5e0677a.jpg)
238
+ Figure 4: Qualitative comparison for novel view synthesis on ScanNet. We compare our method with baselines which achieves the competitive geometry reconstruction performance. Our approach produces more realistic rendering results than two other baselines.
239
+
240
+ comparable performance with most existing works, even when optimizing with a limited number of frames, such as 1,000 vs 30.
241
+
242
+ Results on novel view synthesis. To validate the learned radiance representation, we further conduct experiments on novel view synthesis. The quantitative results and qualitative results are shown in Table 3 and Fig. 4. Table 3 shows that the proposed method achieves comparable if not better results compared to SOTA novel view synthesis methods (Wang et al., 2021b; Zhang et al., 2022; Liu et al., 2020). We note that our method outperforms Go-surf in this instance, even when both methods achieve com
243
+
244
+ <table><tr><td>Method</td><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td></tr><tr><td>NeRF (Mildenhall et al., 2020)</td><td>24.04</td><td>0.860</td><td>0.334</td></tr><tr><td>NSVF (Liu et al., 2020)</td><td>26.01</td><td>0.881</td><td>-</td></tr><tr><td>NeRFingMVS (Wei et al., 2021)</td><td>26.37</td><td>0.903</td><td>0.245</td></tr><tr><td>IBRNet (Wang et al., 2021b)</td><td>25.14</td><td>0.871</td><td>0.266</td></tr><tr><td>NeRFusion (Zhang et al., 2022)</td><td>26.49</td><td>0.915</td><td>0.209</td></tr><tr><td>Go-surf (Wang et al., 2022b)</td><td>25.47</td><td>0.894</td><td>0.420</td></tr><tr><td>Ours</td><td>26.88</td><td>0.909</td><td>0.244</td></tr></table>
245
+
246
+ parable geometric reconstruction performance. This suggests that our learned prior representation offers distinct advantages for novel view synthesis. In addition, from Fig. 4, both NerfingMVS (Wei et al., 2021) and Go-surf (Wang et al., 2022b) fail on scenes with complex geometry and large camera motion. The generalized representation enables the volumetric rendering to focus on more informative regions during optimization and improves its performance for rendering RGB images of novel views.
247
+
248
+ # 4.3 ABLATION STUDIES
249
+
250
+ We further perform the ablation studies to evaluate the effectiveness and the efficiency of the neural prior network. Effectiveness of generalized representation. Table 4 shows the results with and without the generalized representation. For the model without generalized representation, we randomly initialize the parameters of feature grids and decoders while keeping the other components unchanged. We observe that the model integrated with geometry prior and/or color prior can consistently improve the performance on 3D reconstruction and novel view synthesis.
251
+
252
+ Fast optimization. Our approach can achieve high-quality reconstruction at approximately 1.5K iterations within 15 minutes. As illustrated in Fig. 5, our method achieves a high F-score at the very early training stage, while Manhattan SDF* (Guo et al., 2022) and MonoSDF* (Yu et al., 2022) take much more iterations to reach a similar performance.
253
+
254
+ # 5 CONCLUSION
255
+
256
+ Table 3: Quantitative comparisons for novel view synthesis on ScanNet. The best two results of different metrics are highlighted.
257
+
258
+ <table><tr><td>Geo. prior</td><td>Acc↓</td><td>Comp↓</td><td>F-score↑</td></tr><tr><td rowspan="2">✓</td><td>0.079</td><td>0.031</td><td>0.851</td></tr><tr><td>0.046</td><td>0.030</td><td>0.862</td></tr><tr><td>Color prior</td><td>PSNR ↑</td><td>SSIM ↑</td><td>LPIPS↓</td></tr><tr><td rowspan="2">✓</td><td>25.87</td><td>0.899</td><td>0.415</td></tr><tr><td>26.88</td><td>0.909</td><td>0.246</td></tr></table>
259
+
260
+ Table 4: Ablation studies on geometric and texture prior. We report both mesh reconstruction metrics and novel view synthesis metrics.
261
+
262
+ ![](images/302589125f3d64e5397110cd70af0ac8ddf68364f86c1ed3ddae74403796c46f.jpg)
263
+ Figure 5: Ablation studies on the number of training iterations for per-scene optimization.
264
+
265
+ In this work, we present a generalizable scene prior that enables fast, large-scale scene reconstruction of geometry and texture. Our model follows a single-view RGB-D input setting and allows nonlearnable direct fusion of images. We design a two-stage paradigm to learn the generalizable geometric and texture networks. Large-scale, high-fidelity scene reconstruction can be obtained with efficient fine-tuning on the pretrained scene priors, even with limited views. We demonstrate that our approach can achieve state-of-the-art quality of indoor scene reconstruction with fine geometric details and realistic texture.
266
+
267
+ Acknowledgement This work was supported, in part, by NSF CAREER Award IIS-2240014, the Qualcomm Innovation Fellowship, Amazon Research Award.
268
+
269
+ # REFERENCES
270
+
271
+ Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural rgb-d surface reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6290–6301, 2022. 1, 3, 4, 5, 7, 8, 15, 16, 18, 19
272
+ Sai Bi, Nima Khademi Kalantari, and Ravi Ramamoorthi. Patch-based optimization for image-based texture mapping. ACM Trans. Graph., 36(4):106-1, 2017. 3
273
+ Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 14124-14133, 2021. 1, 2, 3, 6, 14
274
+ Rui Chen, Songfang Han, Jing Xu, and Hao Su. Point-based multi-view stereo network. In ICCV, pp. 1538-1547, 2019. 18
275
+ Shuo Cheng, Zexiang Xu, Shilin Zhu, Zhuwen Li, Li Erran Li, Ravi Ramamoorthi, and Hao Su. Deep stereo using adaptive thin volume representation with uncertainty awareness. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2524-2534, 2020. 3
276
+ Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, pp. 5828-5839, 2017a. 7, 14, 15, 16, 18, 19
277
+ Angela Dai, Matthias Nießner, Michael Zollhöfer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3d reconstruction using on-the-fly surface reintegration. ACM Transactions on Graphics (ToG), 36(4):1, 2017b. 1, 3, 7, 8
278
+ Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised nerf: Fewer views and faster training for free. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12882-12891, 2022. 3
279
+ Wei Dong, Christopher Choy, Charles Loop, Or Litany, Yuke Zhu, and Anima Anandkumar. Fast monocular scene reconstruction with global-sparse local-dense grids. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4263-4272, 2023. 3, 8
280
+ Arda Düçeker, Silvano Galliani, Christoph Vogel, Pablo Speciale, Mihai Dusmanu, and Marc Pollefeys. DeepVideoMVS: Multi-View Stereo on Video with Recurrent Spatio-Temporal Fusion. arXiv preprint arXiv:2012.02177, 2020.3
281
+ Arda Duzceker, Silvano Galliani, Christoph Vogel, Pablo Speciale, Mihai Dusmanu, and Marc Pollefeys. Deepvideomvs: Multi-view stereo on video with recurrent spatio-temporal fusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15324-15333, 2021. 18
282
+ Haoyu Guo, Sida Peng, Haotong Lin, Qianqian Wang, Guofeng Zhang, Hujun Bao, and Xiaowei Zhou. Neural 3d scene reconstruction with the manhattan-world assumption. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5511-5520, 2022. 1, 3, 5, 7, 8, 9, 15, 16
283
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. 4, 14
284
+ Yuxin Hou, Juho Kannala, and Arno Solin. Multi-view stereo by temporal nonparametric fusion. In ICCV, pp. 2651-2660, 2019. 18
285
+ Jiahui Huang, Shi-Sheng Huang, Haoxuan Song, and Shi-Min Hu. Di-fusion: Online implicit 3d reconstruction with deep priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8932-8941, 2021. 2
286
+
287
+ Po-Han Huang, Kevin Matzen, Johannes Kopf, Narendra Ahuja, and Jia-Bin Huang. Deepmvs: Learning multi-view stereopsis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2821-2830, 2018. 3
288
+ Mohammad Mahdi Johari, Yann Lepoittevin, and François Fleuret. Geonerf: Generalizing nerf with geometry priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18365-18375, 2022. 3
289
+ Kejie Li, Yansong Tang, Victor Adrian Prisacariu, and Philip HS Torr. Bnv-fusion: Dense 3d reconstruction using bi-level neural volume fusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6166-6175, 2022. 1, 3, 5, 6, 14
290
+ Zhihao Liang, Zhangjin Huang, Changxing Ding, and Kui Jia. Helixsurf: A robust and efficient neural implicit surface learning of indoor scenes with iterative intertwined regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13165-13174, 2023. 8
291
+ Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In NeurIPS, 2020. 2, 7, 9
292
+ Xiaoxiao Long, Cheng Lin, Peng Wang, Taku Komura, and Wenping Wang. Sparseneus: Fast generalizable neural surface reconstruction from sparse views. arXiv preprint arXiv:2206.05737, 2022.1
293
+ Keyang Luo, Tao Guan, Lili Ju, Haipeng Huang, and Yawei Luo. P-mvsnet: Learning patchwise matching confidence aggregation for multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10452-10461, 2019. 3
294
+ Paul Merrell, Amir Akbarzadeh, Liang Wang, Philippines Mordohai, Jan-Michael Frahm, Ruigang Yang, David Nister, and Marc Pollefeys. Real-time visibility-based fusion of depth maps. In 2007 IEEE 11th International Conference on Computer Vision, pp. 1-8. IEEE, 2007. 3
295
+ Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In ECCV, pp. 405-421. Springer, 2020. 5, 7, 9
296
+ Zak Murez, Tarrence van As, James Bartolozzi, Ayan Sinha, Vijay Badrinarayanan, and Andrew Rabinovich. Atlas: End-to-end 3d scene reconstruction from posed images. In ECCV, 2020. 18
297
+ Richard A Newcombe, Shahram Izadi, Otmar Hilliges, David Molyneaux, David Kim, Andrew J Davison, Pushmeet Kohi, Jamie Shotton, Steve Hodges, and Andrew Fitzgibbon. Kinectfusion: Real-time dense surface mapping and tracking. In 2011 10th IEEE international symposium on mixed and augmented reality, pp. 127-136. IEEE, 2011. 3
298
+ Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In $CVPR$ , pp. 3504-3515, 2020. 3
299
+ Pawel Nowacki and Marek Woda. Capabilities of arcore and arkit platforms for ar/vr applications. In International Conference on Dependability and Complex Systems, pp. 358-370. Springer, 2019. 2
300
+ Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5589-5599, 2021. 3
301
+ Joseph Ortiz, Alexander Clegg, Jing Dong, Edgar Sucar, David Novotny, Michael Zollhoefer, and Mustafa Mukadam. isdf: Real-time neural signed distance fields for robot perception. arXiv preprint arXiv:2204.02296, 2022. 5, 6
302
+ Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703, 2019. 19
303
+
304
+ Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision, pp. 523-540. Springer, 2020. 8
305
+ Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pp. 234-241. Springer, 2015. 4, 14
306
+ Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, pp. 4104-4113, 2016. 3
307
+ Johannes L Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise view selection for unstructured multi-view stereo. In ECCV, pp. 501-518. Springer, 2016. 3, 8
308
+ Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in Neural Information Processing Systems, 33:7462-7473, 2020. 8
309
+ Jan Smisek, Michal Jancosek, and Tomas Pajdla. 3d with Kinect. In *Consumer depth cameras for computer vision*, pp. 3-25. Springer, 2013. 2
310
+ Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6229-6238, 2021. 3
311
+ Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In CVPR, 2022a. 3
312
+ Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5459-5469, 2022b. 2
313
+ Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. Neuralrecon: Real-time coherent 3d reconstruction from monocular video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15598-15607, 2021. 3, 18
314
+ Towaki Takikawa, Joey Litalien, Kangxue Yin, Karsten Kreis, Charles Loop, Derek Nowrouzezahrai, Alec Jacobson, Morgan McGuire, and Sanja Fidler. Neural geometric level of detail: Real-time rendering with implicit 3d shapes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11358-11367, 2021. 2
315
+ Matthew Tancik, Ethan Weber, Evonne Ng, Ruilong Li, Brent Yi, Justin Kerr, Terrance Wang, Alexander Kristoffersen, Jake Austin, Kamyar Salahi, Abhik Ahuja, David McAllister, and Angjoo Kanazawa. Nerfstudio: A modular framework for neural radiance field development. arXiv preprint arXiv:2302.04264, 2023. 19
316
+ Jiepeng Wang, Peng Wang, Xiaoxiao Long, Christian Theobalt, Taku Komura, Lingjie Liu, and Wenping Wang. Neuris: Neural reconstruction of indoor scenes using normal priors. In European Conference on Computer Vision, pp. 139-155. Springer, 2022a. 8
317
+ Jingwen Wang, Tymoteusz Bleja, and Lourdes Agapito. Go-surf: Neural feature grid optimization for fast, high-fidelity rgb-d surface reconstruction. arXiv preprint arXiv:2206.14735, 2022b. 1, 2, 7, 8, 9, 15, 16
318
+ Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021a. 2, 3, 4, 5, 6, 14
319
+ Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4690-4699, 2021b. 1, 3, 7, 9
320
+
321
+ Yi Wei, Shaohui Liu, Yongming Rao, Wang Zhao, Jiwen Lu, and Jie Zhou. Nerfingmvs: Guided optimization of neural radiance fields for indoor multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5610-5619, 2021. 3, 7, 9, 16
322
+ Francis Williams, Zan Gojcic, Sameh Khamis, Denis Zorin, Joan Bruna, Sanja Fidler, and Or Litany. Neural fields as learnable kernels for 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18500-18510, 2022. 3, 4
323
+ Wenxuan Wu, Zhongang Qi, and Li Fuxin. Pointconv: Deep convolutional networks on 3d point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9621-9630, 2019. 4, 14
324
+ Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5438-5448, 2022. 1, 3, 7
325
+ Yao Yao, Zixin Luo, Shiwei Li, Tian Fang, and Long Quan. Mvsnet: Depth inference for unstructured multi-view stereo. In Proceedings of the European conference on computer vision (ECCV), pp. 767-783, 2018. 3
326
+ Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Advances in Neural Information Processing Systems, 33:2492-2502, 2020. 3
327
+ Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. Advances in Neural Information Processing Systems, 34:4805-4815, 2021. 2, 3, 4, 6, 14
328
+ Lin Yen-Chen, Pete Florence, Jonathan T Barron, Alberto Rodriguez, Phillip Isola, and Tsung-Yi Lin. iNeRF: Inverting Neural Radiance Fields for Pose Estimation. arXiv preprint arXiv:2012.05877, 2020. 2
329
+ Zehao Yu and Shenghua Gao. Fast-mvsnet: Sparse-to-dense multi-view stereo with learned propagation and gauss-newton refinement. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1949-1958, 2020. 18
330
+ Zehao Yu, Songyou Peng, Michael Niemeyer, Torsten Sattler, and Andreas Geiger. Monosdf: Exploring monocular geometric cues for neural implicit surface reconstruction. arXiv preprint arXiv:2206.00665, 2022. 1, 3, 5, 7, 8, 9, 15, 16
331
+ Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 3
332
+ Xiaoshuai Zhang, Sai Bi, Kalyan Sunkavalli, Hao Su, and Zexiang Xu. Nerfusion: Fusing radiance fields for large-scale scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5449-5458, 2022. 1, 3, 6, 7, 9, 14
333
+ Zhengyou Zhang. Microsoft Kinect sensor and its effect. IEEE multimedia, 19(2):4-10, 2012. 2
334
+ Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3d: A modern library for 3d data processing. arXiv preprint arXiv:1801.09847, 2018. 1
335
+ Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12786-12796, 2022. 3
336
+ Zi-Xin Zou, Shi-Sheng Huang, Yan-Pei Cao, Tai-Jiang Mu, Ying Shan, and Hongbo Fu. Mononeuralfusion: Online monocular neural 3d reconstruction with geometric priors. arXiv preprint arXiv:2209.15153, 2022. 16, 18
337
+
338
+ # A APPENDIX
339
+
340
+ In the supplementary document, we introduce more implementation details (Sec. A), a comparison with RGB-D surface reconstruction on ScanNet.(Sec. B.1, Sec. B.2 and Sec. B.3), single-view novel view synthesis (Sec. B.4) and qualitative results (Sec. B.5 and Sec. B.6). We specifically discussed the importance of sampling methods w.r.t the two-stage generalizable prior training, which plays an important role in the surface representation, as described in Sec. 3.2 in the main paper. We provide additional experiments including (i) quantitative results of the neural scene prior, i.e., without perscene optimization, (ii) quantitative comparisons with state-of-the-art RGB-D surface reconstruction approaches, (iii) quantitative comparisons to the MVS-based approaches, (iv) single-view novel view synthesis, and (v) qualitative results on ScanNet and self-collected data. Videos of full-size room reconstruction are included and recommended to watch.
341
+
342
+ # A IMPLEMENTATION DETAILS
343
+
344
+ # A.1 GENERALIZABLE NEURAL SCENE PRIOR
345
+
346
+ The generalizable neural scene prior is trained on the training split of ScanNet Dai et al. (2017a). We discuss some details of every component including the geometry encoder, texture encoder, generalizable geometric prior module, and generalizable texture prior in this section.
347
+
348
+ Geometry and Texture Encoder. For the geometry encoder, we first sample 512 keypoints from all the points projected from 2D pixels, via Farthest Point Sampling (FPS) for each frame. For each surface point, we apply the K-Nearest-Neighbor algorithm to select 16 adjacent points. Then, we adopt two PointConv Wu et al. (2019) layers, to extract the geometry feature whose output channels are set to 64. To extract the RGB feature we use a U-Net Ronneberger et al. (2015) with ResNet34 He et al. (2016) as the backbone network. We further use an additional convolutional layer to output a per-point feature with the dimension as 32. All the encoder modules are jointly trained with the whole pipeline.
349
+
350
+ Generalizable Geometric Prior. Given an RGB-D image and its corresponding camera pose, we first randomly sample 256 rays from regions where depth values are valid, e.g., non-zero. Then for each ray, we define a small truncation region near the ground-truth depth where 32 points are sampled uniformly. We then use two MLPs to map the geometry features to SDF values. The hyperparameters $\lambda_{\mathrm{depth}}$ , $\lambda_{\mathrm{sdf}}$ and $\lambda_{\mathrm{eik}}$ are set to 1.0, 1.0 and 0.5, respectively.
351
+
352
+ Generalizable Texture Prior. Initialized with the geometric prior, we learn the texture prior via the volumetric rendering loss Wang et al. (2021a); Yariv et al. (2021). Different from the sampling strategy used in geometric prior learning, we restrict the importance sampling to the samples concentrated on the surface as described in Sec.3.4 of our main paper. In particular, we first sample 2048 rays from each RGB-D image where we uniformly sample 64 points in the predefined near-far region. Following Wang et al. (2021a), then, we sample 48 points that are close to the predicted surface. This sampling strategy is employed during both training and inference. Additionally, during training, for rays with non-zero depth values, we further sample 16 points within the truncation region around the ray's depth. Therefore, 128 points are sampled along each ray. For each point, we utilize 2 MLPs in the texture decoder to estimate its RGB value. The hyperparameters $\lambda_{\mathrm{depth}}$ , $\lambda_{\mathrm{sdf}}$ , $\lambda_{\mathrm{eik}}$ and $\lambda_{\mathrm{rgb}}$ are set to 1.0, 1.0, 0.5 and 10.0, respectively.
353
+
354
+ Scene prior extraction and fusion. To leverage multiple views of RGB-D frames, with the scene prior networks, we can directly aggregate the keypoints along with their geometry and texture features from these frames in the volumetric space. Then, the colored surface reconstruction can be decoded from the fused representation following the same procedure in Sec.3.1 and 3.2. No further learnable modules are required, in contrast, to Chen et al. (2021); Zhang et al. (2022); Li et al. (2022).
355
+
356
+ Prior-guided pruning and sampling. To optimize a single scene, we discard the encoders and treat the volume feature representation as learnable to be optimized together with the decoders. To further speed up the optimization, we accelerate the feature query process of sampled points, i.e., instead of optimizing the unstructured keypoints, from which the feature extraction can be inefficient, we introduce the prior-guided voxel pruning to leverage the advantage of voxel-grid sampling and surface representation. Specifically, we initialize uniform grids in the volumetric space and then query
357
+
358
+ each grid feature. Instead of optimizing a large number of uniform grids, we remove the redundant grids adaptively based on the geometric prior using the Algo. 1 described below. To concentrate the sampled ray points near the surface, we apply an importance sampling strategy, similar to that used in training the generalizable texture prior, to mask out those far away from the surface. Starting from a large threshold at the early training stage, we decrease it gradually with more training iterations to prune more unnecessary grids. A similar procedure is also applied to the coarsely sampled points to remove some useless points and help more points concentrate around the surface region. Notably, compared to the voxel-based approach Wang et al. (2022b) having more than 4,000,000 uniform grids to be optimized, the number of learnable keypoints in our case is around 40,000 - a $100\mathrm{x}$ reduction in computational complexity.
359
+
360
+ Algorithm 1: Prior-guided voxel pruning
361
+ ```latex
362
+ Input: Grid feature $\{f_i\}_{i = 1:N}$
363
+ Grid position $\{\mathbf{x}_i\}_{i = 1:N}$
364
+ Positional encoding $\gamma (\cdot)$
365
+ Geometry decoder $\mathbf{s}(\cdot)$
366
+ Number of grids $N$
367
+ Number of iterations $T$
368
+ Output: Grid feature after prune $\{f_j\}_{i = 1:M}$
369
+ Initialization : $\tau_0 = 0.16$
370
+ for $t = 1$ T do
371
+ $\tau = \max (0.005,0.8^{\frac{20t}{T}}\cdot \tau_{0})$
372
+ for $i = 1:N$ do
373
+ $s_i\gets \mathbf{s}(f_i,\gamma (\mathbf{x}_i))$ . if $|s_i|\geq \tau$ then Prune i-th grid end
374
+ end
375
+ ```
376
+
377
+ # B ADDITIONAL EXPERIMENTS
378
+
379
+ # B.1 COMPARISON WITH RGB-D SURFACE RECONSTRUCTION ON SCANNET
380
+
381
+ Computational Resource. The geometric and texture priors network are trained on 8 NVIDIA V100 GPUs for 2 days until convergence. The per-scene optimization step is trained and tested on a single NVIDIA V100 GPU. All baselines reported in our paper are tested using the same computational resources.
382
+
383
+ Clarification of Table 2 in the main paper. Table 2 of the main paper presents the comparison of our method with ManhattanSDF (Guo et al., 2022) and MonoSDF (Yu et al., 2022) with the depth supervision. For fairness, we keep every component of each method by involving an additional depth loss. Unlike some RGB-D surface reconstruction methods, we did not optimize the camera pose while during training. In the course of these modifications, both ManhattanSDF and MonoSDF were observed to have an architecture quite similar to NeuralRGBD Azinovic et al. (2022). Given these circumstances, we are confident that comparing our approach with ManhattanSDF and MonoSDF on ScanNet is indeed fair and effective.
384
+
385
+ Comparison with Go-surf (Wang et al., 2022b) and NeuralRGBD (Azinović et al., 2022). We compare our method with Go-surf and Neural RGB-D in Table 5. To have a fair comparison, instead of optimizing camera poses and neural scene representation jointly, we fix the original camera poses as provided by ScanNet Dai et al. (2017a). Follow the same setting in the main paper, we report the performance of different models training with dense and sparse training views. As shown in Table 5, our approach achieves better performance over all metrics. More importantly, although Go-surf achieves similar performance within relatively similar time, it cannot produce any reasonable results without optimization as demonstrated by our approach.
386
+
387
+ <table><tr><td>Method</td><td># frames</td><td>opt. time</td><td>Prec↑</td><td>Recall↑</td><td>F-score↑</td></tr><tr><td>Neural-RGBD Azinović et al. (2022)</td><td>400</td><td>240</td><td>0.932</td><td>0.918</td><td>0.925</td></tr><tr><td>Go-surf Wang et al. (2022b)</td><td>400</td><td>35</td><td>0.946</td><td>0.956</td><td>0.950</td></tr><tr><td>Ours</td><td>400</td><td>15</td><td>0.947</td><td>0.962</td><td>0.954</td></tr><tr><td>Neural-RGBD Azinović et al. (2022)</td><td>40</td><td>240</td><td>0.837</td><td>0.855</td><td>0.846</td></tr><tr><td>Go-surf Wang et al. (2022b)</td><td>40</td><td>35</td><td>0.842</td><td>0.861</td><td>0.851</td></tr><tr><td>Ours</td><td>40</td><td>15</td><td>0.858</td><td>0.866</td><td>0.862</td></tr></table>
388
+
389
+ Table 5: Quantitative comparisons for mesh reconstruction on ScanNet.
390
+
391
+ <table><tr><td>Method</td><td>per-scene optim</td><td>opt. (min)</td><td>Acc↓</td><td>Comp↓</td><td>Prec↑</td><td>Recall↑</td><td>F-score↑</td></tr><tr><td>Manhattan SDF (Guo et al., 2022)</td><td>✓</td><td>640</td><td>0.072</td><td>0.068</td><td>0.621</td><td>0.586</td><td>0.602</td></tr><tr><td>MonoSDF (Yu et al., 2022)</td><td>✓</td><td>720</td><td>0.039</td><td>0.044</td><td>0.775</td><td>0.722</td><td>0.747</td></tr><tr><td>Ours-prior</td><td>X</td><td>≤5</td><td>0.084</td><td>0.057</td><td>0.695</td><td>0.764</td><td>0.737</td></tr></table>
392
+
393
+ Table 6: Quantitative comparisons of neural scene prior on ScanNet. Both Manhattan SDF and MonoSDF require to optimize on a specific scene for several hours, while the proposed neural scene prior can achieve comparable performance without any optimization.
394
+
395
+ # B.2 MODEL EFFICIENCY
396
+
397
+ We take Go-surf Wang et al. (2022b), which is so far one of the most efficient offline scene reconstruction approach, as the reference. Compared to it achieving an average run-time of 35 mins per scene, our Neural Scene Prior network takes only 5 mins (note that the Neural Scene Prior is a feed-forward network). The full pipeline leveraging the per-scene optimization stage takes an average run-time of 15 minutes, which is still obviously more efficient. More importantly, our model takes a surface representation that facilitates scaling up to larger scenes, compared to dense voxels used in Go-surf. A comprehensive comparison of running time can be found in Table 1 of the main paper.
398
+
399
+ # B.3 COMPARISON WITH MVS-BASED METHODS
400
+
401
+ We show quantitative comparisons of our method with the state-of-the-art approaches on surface reconstruction in Table 7. Different from what Table 1 reported in the main paper, we mainly compare with the MVS-based methods here. For a fair comparison, we follow the evaluation script used in Zou et al. (2022) for computing 3D metrics on the ScanNet testing set. The top part of Table 7 includes offline methods while the middle one contains online methods with the fusion strategy. The bottom part of the table shows the methods that are finetined on individual scenes. Compared to most MVS-based works that use a fusion strategy, our method achieves much better results in terms of F-score and normal consistency. Moreover, our method outperforms MonoNeuralFusion Zou et al. (2022), which also performs finetuning for individual scenes, by a large margin.
402
+
403
+ # B.4 NOVEL VIEW SYNTHESIS
404
+
405
+ Novel View Synthesis. We show more qualitative results on novel view synthesis on ScanNet Dai et al. (2017a) in Fig. 6 following the same setting described in the main paper. Both NerfingMVS (Wei et al., 2021) and Go-surf (Wang et al., 2022b) fail on scenes with complex geometry and large camera motion (bottom two rows). The generalized representation enables the volumetric rendering to focus on more informative regions during optimization and improves its performance for rendering RGB images of novel views.
406
+
407
+ Single-view Novel View Synthesis. We demonstrate that NFP enables high-quality novel view synthesis from single-view input (Fig. 7, mid), which has been rarely explored especially at the scene level, and potentially enables interesting applications, e.g., on mobile devices.
408
+
409
+ # B.5 QUALITATIVE RESULTS OF MESH RECONSTRUCTION
410
+
411
+ We show qualitative comparisons of our method with other baselines in Fig. 8. It demonstrates that the reconstructed mesh results of our approach are consistently more coherent and detailed than others. In addition, we show the qualitative results of textured mesh for different scenes that obtained via neural scene prior in Fig. 9. More video demos of texture mesh reconstruction can be found in the supplementary video.
412
+
413
+ ![](images/86bcec7976a3f39f4ce2a21de8acb0b34c9358f4bae68bb30958591aabbf3469.jpg)
414
+ Figure 6: Qualitative comparison for novel view synthesis on ScanNet.
415
+
416
+ ![](images/667726046cb2508157641d5c78a29bd98aadb4d9d4907773ad19a44f40a7a2ce.jpg)
417
+ Figure 7: Qualitative results for single-view novel view synthesis. The left column shows the training source view, and the appearance reconstruction of the novel view are reported in the second column. The ground-truth images are listed at the last column as reference. Better viewed when zoomed in.
418
+
419
+ <table><tr><td></td><td>| Acc ↓</td><td>Comp ↓</td><td>Chamfer ↓</td><td>Precision ↑</td><td>Recall ↑</td><td>F-score ↑</td><td>NC ↑</td></tr><tr><td>FastMVSNet Yu &amp; Gao (2020)</td><td>0.052</td><td>0.103</td><td>0.077</td><td>0.652</td><td>0.538</td><td>0.588</td><td>0.701</td></tr><tr><td>PointMVSNet Chen et al. (2019)</td><td>0.048</td><td>0.115</td><td>0.082</td><td>0.677</td><td>0.536</td><td>0.595</td><td>0.695</td></tr><tr><td>Atlas Murez et al. (2020)</td><td>0.072</td><td>0.078</td><td>0.075</td><td>0.675</td><td>0.609</td><td>0.638</td><td>0.819</td></tr><tr><td>GPMVS Hou et al. (2019)</td><td>0.058</td><td>0.078</td><td>0.068</td><td>0.621</td><td>0.543</td><td>0.578</td><td>0.715</td></tr><tr><td>DeepVideoMVS Duzceker et al. (2021)</td><td>0.066</td><td>0.082</td><td>0.074</td><td>0.590</td><td>0.535</td><td>0.560</td><td>0.765</td></tr><tr><td>TransformerFusion Azinović et al. (2022)</td><td>0.055</td><td>0.083</td><td>0.069</td><td>0.728</td><td>0.600</td><td>0.655</td><td>-</td></tr><tr><td>NeuralRecon Sun et al. (2021)</td><td>0.038</td><td>0.123</td><td>0.080</td><td>0.769</td><td>0.506</td><td>0.608</td><td>0.816</td></tr><tr><td>MonoNeuralFusion Zou et al. (2022)</td><td>0.039</td><td>0.094</td><td>0.067</td><td>0.775</td><td>0.604</td><td>0.677</td><td>0.842</td></tr><tr><td>Ours</td><td>0.086</td><td>0.068</td><td>0.077</td><td>0.917</td><td>0.889</td><td>0.875</td><td>0.878</td></tr></table>
420
+
421
+ Table 7: Quantitative comparisons of mesh reconstruction on ScanNet.
422
+
423
+ # B.6 MESH RECONSTRUCTION ON THE LARGE-SCALE SCENE
424
+
425
+ Our results demonstrate that the neural scene prior we propose can generalize well to large-scale scenes, as shown in Fig 10. In contrast to the previous four scenes, we selected a larger room from ScanNet Dai et al. (2017a) and applied our pre-trained model directly. The left figure in Fig 10 displays the mesh reconstruction obtained from the neural scene prior. Remarkably, our approach successfully recovers the geometry structure of the entire room with very sparse views (60 frames), without requiring any optimization process. Furthermore, by optimizing the prior on this scene for only 20 minutes on a single NVIDIA V100 GPU, we were able to achieve high-quality mesh reconstruction.
426
+
427
+ # B.7 MESH RECONSTRUCTION ON THE SELF-CAPTURED SCENE
428
+
429
+ To further demonstrate the robustness of the neural scene prior, we evaluate the pretrained model on a self-captured living room, and the reconstructed mesh w./w.o texture is shown in Fig. 11. Impressively, even without per-scene optimization, the proposed neural scene prior is capable of feasibly reconstructing a textured mesh.
430
+
431
+ # C LIMITATION
432
+
433
+ The proposed neural scene prior could extract the geometric and texture prior for arbitrary scenes, but it does require the sparse RGB-D images as the input. To adapt this neural scene prior for RGB images, one possibility would be to initially create a sparse point cloud using Structure from Motion (SfM) on RGB images. However, as of our submission time, we haven't yet experimented with this particular setup. Exploring this pathway in future research could certainly yield intriguing findings.
434
+
435
+ # D REPRODUCIBILITY STATEMENT
436
+
437
+ All experiments in this paper are reproducible. We are committed to releasing the source codes once accepted.
438
+
439
+ # E USE OF EXISTING ASSETS.
440
+
441
+ As mentioned in the NeurIPS 2023 checklist, we describe the existing assets we used in our paper and the corresponding license of these assets.
442
+
443
+ # F PERSONAL DATA AND HUMAN SUBJECTS
444
+
445
+ The dataset does not include the facial or other identifiable information of humans.
446
+
447
+ # G ETHICAL CONCERNS.
448
+
449
+ The datasets used are standard benchmarks proposed in previous works. Despite applying supervised learning, there may still be potential bias in our model trained with these datasets.
450
+
451
+ ![](images/cc06b43e517a4af08e12a09527a85fe3aa281939d48743ed67eab98ed95a7caa.jpg)
452
+ Figure 8: Qualitative comparisons of mesh reconstruction on ScanNet. Selected local regions are highlighted by the orange bounding box. Better viewed when zoomed in.
453
+
454
+ Datasets Most of the experiments are conducted on ScanNet dataset and 10 synthetic scenes collected by Dai et al. (2017a) and Azinović et al. (2022) which are released on their official website and public to everyone for non-commercial use.
455
+
456
+ Code. Our code is built upon the Pytorch Paszke et al. (2019). And we leverage the code from the released codes by nerfstudio Tancik et al. (2023) under the Apache License.
457
+
458
+ ![](images/be9eafcfff27b3c8c3a2d864ad7227878792b6eb0635b325a50e75a3c7fe350b.jpg)
459
+
460
+ ![](images/7c3c64c6e615c5b05b92bc65492e8fb483eb6d9638c371dcad0430de9f93ff45.jpg)
461
+
462
+ ![](images/044b0eee5cd3a43e8cd78c22e9a3b97a38118a5b3704978fc18b9652ef631586.jpg)
463
+
464
+ ![](images/e7c01f369fa084a7b4570329a0dce786f18587da4ab999648f129ebf6282c75c.jpg)
465
+ Figure 9: Qualitative results of Neural Scene Prior on ScanNet.
466
+
467
+ ![](images/c3617e0b22042893e272034622102ab22ef9de474e8b586270113625022a005a.jpg)
468
+
469
+ ![](images/49883f908500c27a3e3bcd57203eeb0c58a016460f01d8028badc39476fe6167.jpg)
470
+
471
+ ![](images/817e9d37eeccafe8762e82d5c7d60995f1aceeaa57c58a6dd39ea506378eac33.jpg)
472
+ Scene Prior
473
+
474
+ ![](images/f1e1ec664f4bf9e80125463d4554f52f1036cd0b1b1aefb29ec08f50db01d2e1.jpg)
475
+ Per-scene Optimization
476
+
477
+ ![](images/6827c693faf6b77116185e1a88d53d1add831d0b26a4a8a33f09462811d7cf56.jpg)
478
+ Figure 11: Mesh reconstruction results on the self-collected scene without any optimization.
479
+
480
+ ![](images/f9d343e7656228d53a91823d04d1c46ea52d2d69f0acd2006b6dbadf38184579.jpg)
481
+ Figure 10: Mesh reconstruction results on the large-scale scene.
2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9376e5d5289c9599ea3e9b0526b665e11867bcace9e5f03b8793978549aae26c
3
+ size 1270179
2024/3D Reconstruction with Generalizable Neural Fields using Scene Priors/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2024/3D-Aware Hypothesis & Verification for Generalizable Relative Object Pose Estimation/9e8e6d05-c99a-48b4-8cad-02bd3c61a78d_content_list.json ADDED
@@ -0,0 +1,1860 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "3D-AWARE HYPOTHESIS & VERIFICATION FOR GENERALIZABLE RELATIVE OBJECT POSE ESTIMATION",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 173,
8
+ 98,
9
+ 823,
10
+ 148
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Chen Zhao",
17
+ "bbox": [
18
+ 181,
19
+ 170,
20
+ 264,
21
+ 183
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "EPFL-CVLab",
28
+ "bbox": [
29
+ 181,
30
+ 184,
31
+ 277,
32
+ 196
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "chen.zhao@epfl.ch",
39
+ "bbox": [
40
+ 183,
41
+ 199,
42
+ 334,
43
+ 212
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Tong Zhang *",
50
+ "bbox": [
51
+ 375,
52
+ 170,
53
+ 473,
54
+ 184
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "EPFL-IVRL",
61
+ "bbox": [
62
+ 375,
63
+ 184,
64
+ 460,
65
+ 196
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "tong.zhang@epfl.ch",
72
+ "bbox": [
73
+ 375,
74
+ 199,
75
+ 535,
76
+ 212
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "Mathieu Salzmann",
83
+ "bbox": [
84
+ 576,
85
+ 170,
86
+ 710,
87
+ 183
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "EPFL-CVLab, ClearSpace SA",
94
+ "bbox": [
95
+ 576,
96
+ 184,
97
+ 777,
98
+ 198
99
+ ],
100
+ "page_idx": 0
101
+ },
102
+ {
103
+ "type": "text",
104
+ "text": "mathieu.salzmann@epfl.ch",
105
+ "bbox": [
106
+ 576,
107
+ 199,
108
+ 790,
109
+ 212
110
+ ],
111
+ "page_idx": 0
112
+ },
113
+ {
114
+ "type": "text",
115
+ "text": "ABSTRACT",
116
+ "text_level": 1,
117
+ "bbox": [
118
+ 450,
119
+ 250,
120
+ 545,
121
+ 263
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "text",
127
+ "text": "Prior methods that tackle the problem of generalizable object pose estimation highly rely on having dense views of the unseen object. By contrast, we address the scenario where only a single reference view of the object is available. Our goal then is to estimate the relative object pose between this reference view and a query image that depicts the object in a different pose. In this scenario, robust generalization is imperative due to the presence of unseen objects during testing and the large-scale object pose variation between the reference and the query. To this end, we present a new hypothesis-and-verification framework, in which we generate and evaluate multiple pose hypotheses, ultimately selecting the most reliable one as the relative object pose. To measure reliability, we introduce a 3D-aware verification that explicitly applies 3D transformations to the 3D object representations learned from the two input images. Our comprehensive experiments on the Objaverse, LINEMOD, and CO3D datasets evidence the superior accuracy of our approach in relative pose estimation and its robustness in large-scale pose variations, when dealing with unseen objects. Our project website is at: https://sailor-z.github.io/projects/ICLR2024_3DAHV.html.",
128
+ "bbox": [
129
+ 228,
130
+ 280,
131
+ 767,
132
+ 503
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "text",
138
+ "text": "1 INTRODUCTION",
139
+ "text_level": 1,
140
+ "bbox": [
141
+ 173,
142
+ 529,
143
+ 336,
144
+ 544
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "Object pose estimation is crucial in many computer vision and robotics tasks, such as VR/AR (Azuma, 1997), scene understanding (Geiger et al., 2012; Chen et al., 2017; Xu et al., 2018; Marchand et al., 2015), and robotic manipulation (Collet et al., 2011; Zhu et al., 2014; Tremblay et al., 2018; Pitteri et al., 2019). Much effort has been made toward estimating object pose parameters either by direct regression (Xiang et al., 2017; Wang et al., 2019a; Hu et al., 2020) or by establishing correspondences (Peng et al., 2019; Wang et al., 2021; Su et al., 2022) which act as input to a PnP algorithm (Lepetit et al., 2009). These methods have achieved promising results in the closed-set scenario, where the training and testing data contain the same object instances. However, this assumption restricts their applicability to the real world, where unseen objects from new categories often exist. Therefore, there has been growing interest in generalizable object pose estimation, aiming to develop models that generalize to unseen objects in the testing phase.",
151
+ "bbox": [
152
+ 169,
153
+ 560,
154
+ 826,
155
+ 715
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "text",
161
+ "text": "In this context, some approaches (Zhao et al., 2022b; Shugurov et al., 2022) follow a template-matching strategy, matching a query object image with reference images generated by rendering the 3D textured object mesh from various viewpoints. To address the scenario where the object mesh is unavailable, as illustrated in Fig. 1(a), some methods take real dense-view images as references. The object pose in the query image is estimated either by utilizing a template-matching mechanism (Liu et al., 2022) or by building 2D-3D correspondences (Sun et al., 2022). A computationally expensive 3D reconstruction (Schonberger & Frahm, 2016) is involved to either calibrate the reference images or reconstruct the 3D object point cloud. In any event, the requirement of dense-view references precludes the use of these methods for individual or sparse images, e.g., downloaded from the Internet. Intuitively, with sufficiently diverse training data, one could think of learning to regress the object pose parameters directly from a single query image. However, without access to a canonical object frame, the predicted object pose would be ill-defined as it represents the relative transformation between the camera frame and the object frame.",
162
+ "bbox": [
163
+ 169,
164
+ 720,
165
+ 826,
166
+ 902
167
+ ],
168
+ "page_idx": 0
169
+ },
170
+ {
171
+ "type": "header",
172
+ "text": "Published as a conference paper at ICLR 2024",
173
+ "bbox": [
174
+ 171,
175
+ 32,
176
+ 478,
177
+ 47
178
+ ],
179
+ "page_idx": 0
180
+ },
181
+ {
182
+ "type": "page_footnote",
183
+ "text": "*Corresponding author.",
184
+ "bbox": [
185
+ 191,
186
+ 910,
187
+ 334,
188
+ 924
189
+ ],
190
+ "page_idx": 0
191
+ },
192
+ {
193
+ "type": "page_number",
194
+ "text": "1",
195
+ "bbox": [
196
+ 493,
197
+ 948,
198
+ 504,
199
+ 959
200
+ ],
201
+ "page_idx": 0
202
+ },
203
+ {
204
+ "type": "image",
205
+ "img_path": "images/7f55ab2f77d2f079539b8fd2333c205179c2e9d330031e52ea976568b81b03d0.jpg",
206
+ "image_caption": [
207
+ "(a) Previous work"
208
+ ],
209
+ "image_footnote": [],
210
+ "bbox": [
211
+ 204,
212
+ 108,
213
+ 496,
214
+ 255
215
+ ],
216
+ "page_idx": 1
217
+ },
218
+ {
219
+ "type": "image",
220
+ "img_path": "images/2a84b839f1bcae3e324f4dbaec72a795e3f6030e36c45562c162ede1407671b8.jpg",
221
+ "image_caption": [
222
+ "Figure 1: Difference between previous work and our method. Previous approaches (a) estimate the pose of an unseen object building upon either template matching or 2D-3D correspondences, which requires dense views of the object as references. By contrast, our method (b) takes only one reference as input and predicts the relative object pose between the reference and query. The object pose in the query can be derived when the pose of the reference is available."
223
+ ],
224
+ "image_footnote": [],
225
+ "bbox": [
226
+ 501,
227
+ 108,
228
+ 656,
229
+ 255
230
+ ],
231
+ "page_idx": 1
232
+ },
233
+ {
234
+ "type": "image",
235
+ "img_path": "images/336559e28503140e89e6e6b3ec37c6318ffbfe04eb83a6cc6476940ff5e4946a.jpg",
236
+ "image_caption": [
237
+ "(b) Our method"
238
+ ],
239
+ "image_footnote": [],
240
+ "bbox": [
241
+ 656,
242
+ 108,
243
+ 792,
244
+ 253
245
+ ],
246
+ "page_idx": 1
247
+ },
248
+ {
249
+ "type": "text",
250
+ "text": "To bypass this issue, we assume the availability of a single reference image that contains the novel object. As shown in Fig. 1(b), we take this reference to be the canonical view and estimate the relative object pose between the reference view and the query view, which is thus well-defined. If the object pose in the reference is provided, the object pose in the query can be derived. In this scenario, one plausible solution is to compute the relative object pose based on pixel-level correspondences (Lowe, 2004; Rublee et al., 2011). However, the two views may depict a large-scale object pose variation, and our experiments will evidence that even the state-of-the-art feature-matching approaches (Sarlin et al., 2020; Sun et al., 2021; Goodwin et al., 2022) cannot generate reliable correspondences in this case, which thus results in inaccurate relative object pose estimates. As an alternative, Zhang et al. (2022); Lin et al. (2023) predict the likelihood of pose parameters leveraging an energy-based model, which, however, lacks the ability to capture 3D information when learning 2D feature embeddings.",
251
+ "bbox": [
252
+ 169,
253
+ 386,
254
+ 823,
255
+ 554
256
+ ],
257
+ "page_idx": 1
258
+ },
259
+ {
260
+ "type": "text",
261
+ "text": "By contrast, we adopt a hypothesis-and-verification paradigm, drawing inspiration from its remarkable success in robust estimation (Fischler & Bolles, 1981). We randomly sample pose parameter hypotheses and verify the reliability of these hypotheses. The relative object pose is determined as the most reliable hypothesis. Since relative pose denotes a 3D transformation, achieving robust verification from two 2D images is non-trivial. Our innovation lies in a 3D-aware verification mechanism. Specifically, we develop a 3D reasoning module over 2D feature maps, which infers 3D structural features represented as 3D volumes. This lets us explicitly apply the pose hypothesis as a 3D transformation to the reference volume. Intuitively, the transformed reference volume should be aligned with the query one if the sampled hypothesis is correct. We thus propose to verify the hypothesis by comparing the feature similarities of the reference and the query. To boost robustness, we aggregate the 3D features into orthogonal 2D plane embeddings and compare these embeddings to obtain a similarity score that indicates the reliability of the hypothesis.",
262
+ "bbox": [
263
+ 169,
264
+ 560,
265
+ 823,
266
+ 728
267
+ ],
268
+ "page_idx": 1
269
+ },
270
+ {
271
+ "type": "text",
272
+ "text": "Our method achieves state-of-the-art performance on an existing benchmark of Lin et al. (2023). Moreover, we extend the experiments to a new benchmark for generalizable relative object pose estimation, which we refer to as GROP. Our benchmark contains over 10,000 testing image pairs, exploiting objects from Objaverse (Deitke et al., 2023) and LINEMOD (Hinterstoisser et al., 2012) datasets, thus encompassing both synthetic and real images with diverse object poses. In the context of previously unseen objects, our method outperforms the feature-matching and energy-based techniques by a large margin in terms of both relative object pose estimation accuracy and robustness. We summarize our contributions as follows:",
273
+ "bbox": [
274
+ 169,
275
+ 734,
276
+ 823,
277
+ 845
278
+ ],
279
+ "page_idx": 1
280
+ },
281
+ {
282
+ "type": "list",
283
+ "sub_type": "text",
284
+ "list_items": [
285
+ "- We highlight the importance of relative pose estimation for novel objects in scenarios where only one reference image is available for each object.",
286
+ "- We present a new hypothesis-and-verification paradigm where verification is made aware of 3D by acting on a learnable 3D object representation."
287
+ ],
288
+ "bbox": [
289
+ 215,
290
+ 859,
291
+ 823,
292
+ 924
293
+ ],
294
+ "page_idx": 1
295
+ },
296
+ {
297
+ "type": "header",
298
+ "text": "Published as a conference paper at ICLR 2024",
299
+ "bbox": [
300
+ 173,
301
+ 32,
302
+ 478,
303
+ 47
304
+ ],
305
+ "page_idx": 1
306
+ },
307
+ {
308
+ "type": "page_number",
309
+ "text": "2",
310
+ "bbox": [
311
+ 493,
312
+ 948,
313
+ 503,
314
+ 959
315
+ ],
316
+ "page_idx": 1
317
+ },
318
+ {
319
+ "type": "text",
320
+ "text": "- We develop a new benchmark called GROP, where the evaluation of relative object pose estimation is conducted on both synthetic and real images with diverse object poses.",
321
+ "bbox": [
322
+ 215,
323
+ 103,
324
+ 823,
325
+ 133
326
+ ],
327
+ "page_idx": 2
328
+ },
329
+ {
330
+ "type": "text",
331
+ "text": "2 RELATED WORK",
332
+ "text_level": 1,
333
+ "bbox": [
334
+ 171,
335
+ 154,
336
+ 346,
337
+ 169
338
+ ],
339
+ "page_idx": 2
340
+ },
341
+ {
342
+ "type": "text",
343
+ "text": "Instance-Specific Object Pose Estimation. The advancements in deep learning have revolutionized the field of object pose estimation. Most existing studies have focused on instance-level object pose estimation (Xiang et al., 2017; Peng et al., 2019; Wang et al., 2021; Su et al., 2022; Wang et al., 2019a), aiming to determine the pose of specific object instances. These methods have achieved remarkable performance in the closed-set setting, which means that the training data and testing data contain the same object instances. However, such an instance-level assumption restricts the applications in the real world where previously unseen objects widely exist. The studies of Zhao et al. (2022b); Liu et al. (2022) have revealed the limited generalization ability of the instance-level approaches when confronted with unseen objects. Some approaches (Wang et al., 2019b; Chen et al., 2020a; Lin et al., 2022) relaxed the instance-level constraint and introduced category-level object pose estimation. More concretely, the testing and training datasets consist of different object instances but the same object categories. As different instances belonging to the same category depict similar visual patterns, the category-level object pose estimation methods are capable of generalizing well to new instances. However, these approaches still face challenges in generalizing to objects from novel categories, since the object appearance could vary significantly.",
344
+ "bbox": [
345
+ 169,
346
+ 186,
347
+ 826,
348
+ 398
349
+ ],
350
+ "page_idx": 2
351
+ },
352
+ {
353
+ "type": "text",
354
+ "text": "Generalizable Object Pose Estimation. Recently, some effort has been made toward generalizable object pose estimation. The testing data may include objects from categories that have not been encountered during training. The objective is to estimate the pose of these unseen objects without retraining the network. In such a context, the existing approaches can be categorized into two groups, i.e., template-matching methods (Sundermeyer et al., 2020; Labbe et al., 2022; Zhao et al., 2022b; Liu et al., 2022; Shugurov et al., 2022) and feature-matching methods (Sun et al., 2022; He et al., 2022b). Given a query image of the object, the template-matching methods retrieve the most similar reference image from a pre-generated database. The object pose is taken as that in the retrieved reference. The database is created by either rendering the 3D object model or capturing images from various viewpoints. The feature-matching methods reconstruct the 3D object point cloud by performing SFM (Schonberger & Frahm, 2016) over a sequence of images. The 2D-3D matches are then built over the query image and the reconstructed point cloud, from which the object pose is estimated by using the PnP algorithm. Notably, these two groups both require dense-view reference images to be available. Therefore, they cannot be applied in scenarios where only sparse images are accessible.",
355
+ "bbox": [
356
+ 169,
357
+ 402,
358
+ 826,
359
+ 611
360
+ ],
361
+ "page_idx": 2
362
+ },
363
+ {
364
+ "type": "text",
365
+ "text": "Relative Object Pose Estimation. Some existing methods could nonetheless be applied for relative object pose estimation, even though they were designed for a different purpose. For example, one could use traditional (Lowe, 2004) or learning-based (Sarlin et al., 2020; Sun et al., 2021; Goodwin et al., 2022) methods to build pixel-pixel correspondences and compute the relative pose by using multi-view geometry (Hartley & Zisserman, 2003). However, as only two views (one query and one reference) are available, large-scale object pose variations are inevitable, posing challenges to the correspondence-based approaches. Moreover, RelPose (Zhang et al., 2022) and RelPose++ (Lin et al., 2023) build upon an energy-based model, which combines the pose parameters with the two-view images as the input and predicts the likelihood of the relative camera pose. However, RelPose and RelPose++ exhibit a limitation in their ability to reason about 3D information, which we found crucial for inferring the 3D transformation between 2D images. By contrast, we propose to explicitly utilize 3D information in a new hypothesis-and-verification paradigm, achieving considerably better performance in our experiments.",
366
+ "bbox": [
367
+ 169,
368
+ 618,
369
+ 826,
370
+ 800
371
+ ],
372
+ "page_idx": 2
373
+ },
374
+ {
375
+ "type": "text",
376
+ "text": "3 METHOD",
377
+ "text_level": 1,
378
+ "bbox": [
379
+ 171,
380
+ 821,
381
+ 284,
382
+ 835
383
+ ],
384
+ "page_idx": 2
385
+ },
386
+ {
387
+ "type": "text",
388
+ "text": "3.1 PROBLEM FORMULATION",
389
+ "text_level": 1,
390
+ "bbox": [
391
+ 171,
392
+ 854,
393
+ 393,
394
+ 868
395
+ ],
396
+ "page_idx": 2
397
+ },
398
+ {
399
+ "type": "text",
400
+ "text": "We train a network on RGB images depicting specific object instances from a set $\\mathcal{O}_{train}$ . During testing, we aim for the network to generalize to new objects in the set $\\mathcal{O}_{test}$ , with $\\mathcal{O}_{test} \\cap \\mathcal{O}_{train} = \\emptyset$ . In contrast to some previous methods which assume that $\\mathcal{O}_{train}$ and $\\mathcal{O}_{test}$ contain the same cate",
401
+ "bbox": [
402
+ 169,
403
+ 881,
404
+ 823,
405
+ 925
406
+ ],
407
+ "page_idx": 2
408
+ },
409
+ {
410
+ "type": "header",
411
+ "text": "Published as a conference paper at ICLR 2024",
412
+ "bbox": [
413
+ 171,
414
+ 32,
415
+ 478,
416
+ 47
417
+ ],
418
+ "page_idx": 2
419
+ },
420
+ {
421
+ "type": "page_number",
422
+ "text": "3",
423
+ "bbox": [
424
+ 493,
425
+ 948,
426
+ 504,
427
+ 959
428
+ ],
429
+ "page_idx": 2
430
+ },
431
+ {
432
+ "type": "image",
433
+ "img_path": "images/854663faca01379ac343d2b81be7a1913b6d067c3a5c6fb07ed3bf935507f6b7.jpg",
434
+ "image_caption": [
435
+ "Figure 2: Overview of our framework. Our method estimates the relative pose of previously unseen objects given two images, building upon a new hypothesis-and-verification paradigm. A hypothesis $\\Delta \\mathbf{P}$ is randomly sampled and its accuracy is measured as a score $s$ . To explicitly integrate 3D information, we perform the verification over a 3D object representation indicated as a learnable 3D volume. The sampled hypothesis is coupled with the learned representation via a 3D transformation over the reference 3D volume. We learn the 3D volumes from the 2D feature maps extracted from the RGB images by introducing a 3D reasoning module. To improve robustness, we randomly mask out some blocks colored in white during training."
436
+ ],
437
+ "image_footnote": [],
438
+ "bbox": [
439
+ 187,
440
+ 101,
441
+ 810,
442
+ 241
443
+ ],
444
+ "page_idx": 3
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "gories, i.e., $\\mathcal{C}_{train} = \\mathcal{C}_{test}$ , we work on generalizable object pose estimation. The testing objects in $\\mathcal{O}_{test}$ may belong to previously unseen categories, i.e., $\\mathcal{C}_{test} \\neq \\mathcal{C}_{train}$ . In such a context, we propose to estimate the relative pose $\\Delta \\mathbf{P}$ of the object depicted in two images $\\mathbf{I}_q$ and $\\mathbf{I}_r$ . As the 3D object translation can be derived by utilizing 2D detection (Saito et al., 2022; Wang et al., 2023; Kirillov et al., 2023), we focus on the estimation of 3D object rotation $\\Delta \\mathbf{R} \\in SO(3)$ , which is more challenging. As illustrated in Fig. 2, our method builds upon a hypothesis-and-verification mechanism (Fischler & Bolles, 1981). Concretely, we randomly sample an orientation hypothesis $\\Delta \\mathbf{R}_i$ , utilizing the 6D continuous representation of Zhou et al. (2019). We then verify the correctness of $\\Delta \\mathbf{R}_i$ using a verification score $s_i = f(\\mathbf{I}_q, \\mathbf{I}_r | \\Delta \\mathbf{R}_i, \\Theta)$ , where $f$ indicates a network with learnable parameters $\\Theta$ . The expected $\\Delta \\mathbf{R}^*$ is determined as the hypothesis with the highest verification score, i.e.,",
449
+ "bbox": [
450
+ 169,
451
+ 391,
452
+ 823,
453
+ 542
454
+ ],
455
+ "page_idx": 3
456
+ },
457
+ {
458
+ "type": "equation",
459
+ "text": "\n$$\n\\Delta \\mathbf {R} ^ {*} = \\underset {\\Delta \\mathbf {R} _ {i} \\in S O (3)} {\\arg \\max } f \\left(\\mathbf {I} _ {q}, \\mathbf {I} _ {r} \\mid \\Delta \\mathbf {R} _ {i}, \\Theta\\right). \\tag {1}\n$$\n",
460
+ "text_format": "latex",
461
+ "bbox": [
462
+ 367,
463
+ 549,
464
+ 823,
465
+ 575
466
+ ],
467
+ "page_idx": 3
468
+ },
469
+ {
470
+ "type": "text",
471
+ "text": "To facilitate the verification, we develop a 3D transforming layer over a learnable 3D object representation. The details will be introduced in this section.",
472
+ "bbox": [
473
+ 169,
474
+ 582,
475
+ 823,
476
+ 609
477
+ ],
478
+ "page_idx": 3
479
+ },
480
+ {
481
+ "type": "text",
482
+ "text": "3.2 3D OBJECT REPRESENTATION LEARNING",
483
+ "text_level": 1,
484
+ "bbox": [
485
+ 171,
486
+ 627,
487
+ 503,
488
+ 641
489
+ ],
490
+ "page_idx": 3
491
+ },
492
+ {
493
+ "type": "text",
494
+ "text": "Predicting 3D transformations from 2D images is inherently challenging, as it necessitates the capability of 3D reasoning. Furthermore, the requirement of generalization ability to unseen objects makes the problem even harder. Existing methods (Zhang et al., 2022; Lin et al., 2023) tackle this challenge by deriving 3D information from global feature embeddings, which are obtained through global pooling over 2D feature maps. However, this design exhibits two key drawbacks: First, the low-level structural features which are crucial for reasoning about 3D transformations are lost; Second, the global pooling process incorporates high-level semantic information (Zhao et al., 2022b), which is coupled with the object category. Therefore, these approaches encounter difficulties in accurately estimating the relative pose of previously unseen objects.",
495
+ "bbox": [
496
+ 169,
497
+ 652,
498
+ 823,
499
+ 777
500
+ ],
501
+ "page_idx": 3
502
+ },
503
+ {
504
+ "type": "text",
505
+ "text": "To address this, we introduce a 3D object representation learning module that is capable of reasoning about 3D information from 2D structural features. Concretely, the process begins by feeding the query and reference images into a pretrained encoder (Ranftl et al., 2020), yielding two 2D feature maps $\\mathbf{F}^q$ , $\\mathbf{F}^r \\in \\mathbb{R}^{C \\times H_f \\times W_f}$ . As no global pooling layer is involved, $\\mathbf{F}^q$ and $\\mathbf{F}^r$ contain more structural information than the global feature embeddings of (Zhang et al., 2022; Lin et al., 2023). Subsequently, $\\mathbf{F}^q$ and $\\mathbf{F}^r$ serve as inputs to a 3D reasoning module. Since each RGB image depicts the object from a particular viewpoint, inferring 3D features from a single 2D feature map is intractable. To address this issue, we combine the query and reference views and utilize the transformer (Vaswani et al., 2017; Dosovitskiy et al., 2020), renowned for its ability to capture relationships among local patches.",
506
+ "bbox": [
507
+ 169,
508
+ 784,
509
+ 825,
510
+ 924
511
+ ],
512
+ "page_idx": 3
513
+ },
514
+ {
515
+ "type": "header",
516
+ "text": "Published as a conference paper at ICLR 2024",
517
+ "bbox": [
518
+ 173,
519
+ 32,
520
+ 478,
521
+ 47
522
+ ],
523
+ "page_idx": 3
524
+ },
525
+ {
526
+ "type": "page_number",
527
+ "text": "4",
528
+ "bbox": [
529
+ 493,
530
+ 948,
531
+ 503,
532
+ 959
533
+ ],
534
+ "page_idx": 3
535
+ },
536
+ {
537
+ "type": "text",
538
+ "text": "Our 3D reasoning block comprises a self-attention layer and a cross-attention layer, which account for the intra-view and inter-view relationships, respectively. Notably, unlike the existing method of Lin et al. (2023) that utilizes transformers at an image level, i.e., treating a global feature embedding as a token, our module takes $\\mathbf{F}^q$ and $\\mathbf{F}^r$ as input, thereby preserving more structural information throughout the process. Specifically, we compute",
539
+ "bbox": [
540
+ 169,
541
+ 103,
542
+ 823,
543
+ 174
544
+ ],
545
+ "page_idx": 4
546
+ },
547
+ {
548
+ "type": "equation",
549
+ "text": "\n$$\n\\mathbf {F} _ {l + 1} ^ {q} = g \\left(\\mathbf {F} _ {l} ^ {q}, \\mathbf {F} _ {l} ^ {r} \\mid \\Omega_ {\\text {s e l f}} ^ {q}, \\Omega_ {\\text {c r o s s}} ^ {q}\\right), \\tag {2}\n$$\n",
550
+ "text_format": "latex",
551
+ "bbox": [
552
+ 395,
553
+ 179,
554
+ 823,
555
+ 196
556
+ ],
557
+ "page_idx": 4
558
+ },
559
+ {
560
+ "type": "equation",
561
+ "text": "\n$$\n\\mathbf {F} _ {l + 1} ^ {r} = g \\left(\\mathbf {F} _ {l} ^ {r}, \\mathbf {F} _ {l} ^ {q} \\mid \\Omega_ {\\text {s e l f}} ^ {r}, \\Omega_ {\\text {c r o s s}} ^ {r}\\right), \\tag {3}\n$$\n",
562
+ "text_format": "latex",
563
+ "bbox": [
564
+ 395,
565
+ 199,
566
+ 823,
567
+ 215
568
+ ],
569
+ "page_idx": 4
570
+ },
571
+ {
572
+ "type": "text",
573
+ "text": "where $g$ denotes the 3D reasoning block with learnable parameters $\\{\\Omega_{\\mathrm{self}}^q,\\Omega_{\\mathrm{cross}}^q,\\Omega_{\\mathrm{self}}^r,\\Omega_{\\mathrm{cross}}^r\\}$ . Let us take $\\mathbf{F}^q$ as an example as the process over $\\mathbf{F}^r$ is symmetric. We serialize $\\mathbf{F}^q$ by flattening it from $\\mathbb{R}^{C\\times H_f\\times W_f}$ to $\\mathbb{R}^{N\\times C}$ , where $N = H_{f}\\times W_{f}$ . A position embedding (Dosovitskiy et al., 2020) is added to the sequence of tokens, which accounts for positional information. To ensure a broader receptive field that covers the entire object, the tokens are fed into a self-attention layer, which is formulated as $\\tilde{\\mathbf{F}}_l^q = t(\\mathbf{F}_l^q,\\mathbf{F}_l^q|\\Omega_{\\mathrm{self}}^q)$ , where $t$ denotes the attention layer. As aforementioned, $\\tilde{\\mathbf{F}}_l^q$ only describes the object in $\\mathbf{I}_q$ , which is captured from a single viewpoint. We thus develop a cross-attention layer, incorporating information from the other view $\\mathbf{I}_r$ into $\\tilde{\\mathbf{F}}_l^q$ . We denote the cross attention as $\\mathbf{F}_{l + 1}^q = t(\\tilde{\\mathbf{F}}_l^q,\\tilde{\\mathbf{F}}_l^r|\\Omega_{\\mathrm{cross}}^r)$ , where $\\mathbf{F}_{l + 1}^q$ serves as the input of the next 3D reasoning block.",
574
+ "bbox": [
575
+ 169,
576
+ 220,
577
+ 826,
578
+ 361
579
+ ],
580
+ "page_idx": 4
581
+ },
582
+ {
583
+ "type": "text",
584
+ "text": "We denote the output of the last 3D reasoning block as $\\hat{\\mathbf{F}}^q$ , $\\hat{\\mathbf{F}}^r \\in \\mathbb{R}^{C \\times H_f \\times W_f}$ . $\\hat{\\mathbf{F}}^q$ and $\\hat{\\mathbf{F}}^r$ comprise both intra-view and inter-view object-related information. Nevertheless, it is still non-trivial to couple the 3D transformation with the 2D feature maps, which is crucial in the following hypothesis-and-verification module. To handle this, we derive a 3D object representation from the 2D feature map in a simple yet effective manner. We lift $\\hat{\\mathbf{F}}^q$ and $\\hat{\\mathbf{F}}^r$ from 2D space to 3D space, i.e., $\\mathbb{R}^{C \\times H_f \\times W_f} \\to \\mathbb{R}^{C_{3d} \\times D_f \\times H_f \\times W_f}$ , where $C = C_{3d} \\times D_f$ . The 3D representations are thus encoded as 3D volumes $\\mathbf{V}^q$ and $\\mathbf{V}^r$ . Since the spatial dimensionality of $\\mathbf{V}^q$ and $\\mathbf{V}^r$ matches that of the 3D transformation, such a lifting process enables the subsequent 3D-aware verification.",
585
+ "bbox": [
586
+ 169,
587
+ 366,
588
+ 823,
589
+ 482
590
+ ],
591
+ "page_idx": 4
592
+ },
593
+ {
594
+ "type": "text",
595
+ "text": "3.3 3D-AWARE HYPOTHESIS AND VERIFICATION",
596
+ "text_level": 1,
597
+ "bbox": [
598
+ 171,
599
+ 497,
600
+ 529,
601
+ 512
602
+ ],
603
+ "page_idx": 4
604
+ },
605
+ {
606
+ "type": "text",
607
+ "text": "The hypothesis-and-verification mechanism has achieved tremendous success as a robust estimator (Fischler & Bolles, 1981) for image matching (Yi et al., 2018; Zhao et al., 2021). The objective is to identify the most reliable hypothesis from multiple samplings. In such a context, an effective verification process is critical. Moreover, in the scenario of relative object pose estimation, we expect the verification to be differentiable and aware of the 3D transformation. We thus tailor the hypothesis-and-verification mechanism to meet these new requirements.",
608
+ "bbox": [
609
+ 169,
610
+ 523,
611
+ 823,
612
+ 608
613
+ ],
614
+ "page_idx": 4
615
+ },
616
+ {
617
+ "type": "text",
618
+ "text": "We develop a 3D masking approach in latent space before sampling hypotheses, drawing inspiration from the success of the masked visual modeling methods (He et al., 2022a; Xie et al., 2022). Instead of masking the RGB images, we propose to mask the learnable 3D volumes, which we empirically found more compact and effective. Specifically, we sample two binary masks $\\mathbf{V}_b^q, \\mathbf{V}_b^r \\in \\mathbb{R}^{C_{3d} \\times D_f \\times H_f \\times W_f}$ during training, initialized as all ones. $h$ of the elements in each mask are randomly set to 0. 3D masking is performed as $\\tilde{\\mathbf{V}}^q = \\mathbf{V}^q \\odot \\mathbf{V}_b^q$ , $\\tilde{\\mathbf{V}}^r = \\mathbf{V}^r \\odot \\mathbf{V}_b^r$ , where $\\odot$ denotes the Hadamard product. Note that the masking is asymmetric over $\\mathbf{V}_b^q$ and $\\mathbf{V}_b^r$ . Such a design enables the modeling of object motion between two images (Gupta et al., 2023), offering potential benefits to the task of relative object pose estimation.",
619
+ "bbox": [
620
+ 169,
621
+ 614,
622
+ 826,
623
+ 743
624
+ ],
625
+ "page_idx": 4
626
+ },
627
+ {
628
+ "type": "text",
629
+ "text": "The hypothesis-and-verification process begins by randomly sampling hypotheses, utilizing the 6D continuous representation of Zhou et al. (2019). Each hypothesis is then converted to a 3D rotation matrix $\\Delta \\mathbf{R}_i$ , i.e., $\\mathbb{R}^6 \\to \\mathbb{R}^{3\\times 3}$ . During the verification, we explicitly couple the hypothesis with the learnable 3D representation by performing a 3D transformation. This is formulated as",
630
+ "bbox": [
631
+ 169,
632
+ 750,
633
+ 823,
634
+ 806
635
+ ],
636
+ "page_idx": 4
637
+ },
638
+ {
639
+ "type": "equation",
640
+ "text": "\n$$\n\\tilde {\\mathbf {V}} ^ {r} = \\varphi \\left(\\Delta \\mathbf {R} _ {i} \\mathbf {X} ^ {r}\\right), \\mathbf {X} ^ {r} \\in \\mathbb {R} ^ {3 \\times L}, \\tag {4}\n$$\n",
641
+ "text_format": "latex",
642
+ "bbox": [
643
+ 387,
644
+ 810,
645
+ 823,
646
+ 829
647
+ ],
648
+ "page_idx": 4
649
+ },
650
+ {
651
+ "type": "text",
652
+ "text": "where $\\mathbf{X}^r$ denotes the 3D coordinates of the elements in $\\tilde{\\mathbf{V}}^r$ with $L = D_f\\times H_f\\times W_f$ and $\\varphi$ indicates trilinear interpolation. We keep the query 3D volume unchanged and only transform the reference 3D volume. Intuitively, the transformed $\\tilde{\\mathbf{V}}^r$ should be aligned with $\\tilde{\\mathbf{V}}^q$ if the sampled hypothesis is correct. Conversely, an incorrect 3D transformation is supposed to result in a noticeable disparity between the two 3D volumes. Therefore, our transformation-based approach facilitates the verification of $\\Delta \\mathbf{R}_i$ , which could be implemented by assessing the similarity between $\\tilde{\\mathbf{V}}^q$ and",
653
+ "bbox": [
654
+ 169,
655
+ 835,
656
+ 826,
657
+ 925
658
+ ],
659
+ "page_idx": 4
660
+ },
661
+ {
662
+ "type": "header",
663
+ "text": "Published as a conference paper at ICLR 2024",
664
+ "bbox": [
665
+ 171,
666
+ 32,
667
+ 478,
668
+ 47
669
+ ],
670
+ "page_idx": 4
671
+ },
672
+ {
673
+ "type": "page_number",
674
+ "text": "5",
675
+ "bbox": [
676
+ 493,
677
+ 948,
678
+ 504,
679
+ 959
680
+ ],
681
+ "page_idx": 4
682
+ },
683
+ {
684
+ "type": "text",
685
+ "text": "$\\tilde{\\mathbf{V}}^r$ . However, the transformed $\\tilde{\\mathbf{V}}^r$ tends to be noisy in practice because of zero padding during the transformation and some nuisances such as the background. We thus introduce a feature aggregation module, aiming to distill meaningful information for robust verification. More concretely, we project $\\tilde{\\mathbf{V}}^q$ and $\\tilde{\\mathbf{V}}^r$ back to three orthogonal 2D planes, i.e., $\\mathbb{R}^{C_{3d} \\times D_f \\times H_f \\times W_f} \\to \\mathbb{R}^{3C \\times H_f \\times W_f}$ with $C = C_{3d} \\times D_f$ , and aggregate the projected features as $\\mathbf{A}^q = g(\\tilde{\\mathbf{V}}^q | \\Psi)$ , $\\mathbf{A}^r = g(\\tilde{\\mathbf{V}}^r | \\Psi)$ , where $\\mathbf{A}^q, \\mathbf{A}^r \\in \\mathbb{R}^{C_{2d} \\times H_f \\times W_f}$ represent the distilled feature embeddings and $g$ is the aggregation module with learnable parameters $\\Psi$ . The verification score is then computed as",
686
+ "bbox": [
687
+ 169,
688
+ 102,
689
+ 826,
690
+ 207
691
+ ],
692
+ "page_idx": 5
693
+ },
694
+ {
695
+ "type": "equation",
696
+ "text": "\n$$\ns _ {i} = \\frac {1}{N} \\sum_ {j, k} \\frac {\\mathbf {A} _ {j k} ^ {q} \\cdot \\mathbf {A} _ {j k} ^ {r}}{\\| \\mathbf {A} _ {j k} ^ {q} \\| \\cdot \\| \\mathbf {A} _ {j k} ^ {r} \\|}, \\quad \\mathbf {A} _ {j k} ^ {q}, \\mathbf {A} _ {j k} ^ {r} \\in \\mathbb {R} ^ {C _ {2 d}}. \\tag {5}\n$$\n",
697
+ "text_format": "latex",
698
+ "bbox": [
699
+ 328,
700
+ 212,
701
+ 823,
702
+ 253
703
+ ],
704
+ "page_idx": 5
705
+ },
706
+ {
707
+ "type": "text",
708
+ "text": "We run the hypothesis and verification $M$ times in parallel and the expected $\\Delta \\mathbf{R}^*$ is identified as",
709
+ "bbox": [
710
+ 171,
711
+ 258,
712
+ 810,
713
+ 273
714
+ ],
715
+ "page_idx": 5
716
+ },
717
+ {
718
+ "type": "equation",
719
+ "text": "\n$$\n\\Delta \\mathbf {R} ^ {*} = \\Delta \\mathbf {R} _ {k}, \\quad k = \\underset {i} {\\arg \\max } \\left\\{s _ {i}, i = 1, 2, \\dots , M \\right\\}. \\tag {6}\n$$\n",
720
+ "text_format": "latex",
721
+ "bbox": [
722
+ 321,
723
+ 281,
724
+ 825,
725
+ 306
726
+ ],
727
+ "page_idx": 5
728
+ },
729
+ {
730
+ "type": "text",
731
+ "text": "Note that compared with the dynamic rendering method (Park et al., 2020) which optimizes the object pose by rendering and comparing depth images, our approach performs verification in the latent space. This eliminates the need for computationally intensive rendering and operates independently of depth information. An alternative to the hypothesis-and-verification mechanism consists of optimizing $\\Delta \\mathbf{R}$ via gradient descent. However, our empirical observations indicate that this alternative often gets trapped in local optima. Moreover, compared with the energy-based approaches (Zhang et al., 2022; Lin et al., 2023), our method achieves a 3D-aware verification. To highlight this, let us formulate the energy-based model with some abuse of notation as",
732
+ "bbox": [
733
+ 169,
734
+ 319,
735
+ 823,
736
+ 431
737
+ ],
738
+ "page_idx": 5
739
+ },
740
+ {
741
+ "type": "equation",
742
+ "text": "\n$$\n\\Delta \\mathbf {R} ^ {*} = \\underset {\\Delta \\mathbf {R} _ {i} \\in S O (3)} {\\arg \\max } s _ {i}, \\quad s _ {i} = \\operatorname {F C} \\left(f \\left(\\mathbf {I} _ {q}, \\mathbf {I} _ {r}\\right) + h \\left(\\Delta \\mathbf {R} _ {i}\\right)\\right), \\tag {7}\n$$\n",
743
+ "text_format": "latex",
744
+ "bbox": [
745
+ 310,
746
+ 436,
747
+ 825,
748
+ 464
749
+ ],
750
+ "page_idx": 5
751
+ },
752
+ {
753
+ "type": "text",
754
+ "text": "where FC denotes fully connected layers. In this context, the 2D image embedding and the pose embedding are learned as $f(\\mathbf{I}_q,\\mathbf{I}_r)$ and $h(\\Delta \\mathbf{R}_i)$ , separately. By contrast, in our framework, the volume features are conditioned on $\\Delta \\mathbf{R}_i$ via the 3D transformation, which thus facilitates the 3D-aware verification.",
755
+ "bbox": [
756
+ 169,
757
+ 470,
758
+ 823,
759
+ 526
760
+ ],
761
+ "page_idx": 5
762
+ },
763
+ {
764
+ "type": "text",
765
+ "text": "We train our network using an infoNCE loss (Chen et al., 2020b), which is defined as",
766
+ "bbox": [
767
+ 171,
768
+ 532,
769
+ 733,
770
+ 547
771
+ ],
772
+ "page_idx": 5
773
+ },
774
+ {
775
+ "type": "equation",
776
+ "text": "\n$$\n\\mathcal {L} = - \\log \\frac {\\sum_ {j = 1} ^ {P} \\exp \\left(s _ {j} ^ {p} / \\tau\\right)}{\\sum_ {i = 1} ^ {M} \\exp \\left(s _ {i} / \\tau\\right)}, \\tag {8}\n$$\n",
777
+ "text_format": "latex",
778
+ "bbox": [
779
+ 401,
780
+ 554,
781
+ 823,
782
+ 595
783
+ ],
784
+ "page_idx": 5
785
+ },
786
+ {
787
+ "type": "text",
788
+ "text": "where $s_j^p$ denotes the score of a positive hypothesis, and $\\tau = 0.1$ is a predefined temperature. The positive samples are identified by computing the geodesic distance as",
789
+ "bbox": [
790
+ 169,
791
+ 601,
792
+ 823,
793
+ 631
794
+ ],
795
+ "page_idx": 5
796
+ },
797
+ {
798
+ "type": "equation",
799
+ "text": "\n$$\nD = \\arccos \\left(\\frac {\\operatorname {t r} \\left(\\Delta \\mathbf {R} _ {i} ^ {\\mathrm {T}} \\Delta \\mathbf {R} _ {\\mathrm {g t}}\\right) - 1}{2}\\right) / \\pi , \\tag {9}\n$$\n",
800
+ "text_format": "latex",
801
+ "bbox": [
802
+ 366,
803
+ 637,
804
+ 825,
805
+ 671
806
+ ],
807
+ "page_idx": 5
808
+ },
809
+ {
810
+ "type": "text",
811
+ "text": "where $\\Delta \\mathbf{R}_{\\mathrm{gt}}$ is the ground truth. We then consider hypotheses with $D < \\lambda$ as positive samples.",
812
+ "bbox": [
813
+ 171,
814
+ 676,
815
+ 794,
816
+ 693
817
+ ],
818
+ "page_idx": 5
819
+ },
820
+ {
821
+ "type": "text",
822
+ "text": "4 EXPERIMENTS",
823
+ "text_level": 1,
824
+ "bbox": [
825
+ 171,
826
+ 710,
827
+ 328,
828
+ 726
829
+ ],
830
+ "page_idx": 5
831
+ },
832
+ {
833
+ "type": "text",
834
+ "text": "4.1 IMPLEMENTATION DETAILS",
835
+ "text_level": 1,
836
+ "bbox": [
837
+ 171,
838
+ 743,
839
+ 408,
840
+ 757
841
+ ],
842
+ "page_idx": 5
843
+ },
844
+ {
845
+ "type": "text",
846
+ "text": "In our experiments, we employ 4 3D reasoning blocks. We set the number of hypotheses during training and testing to $M = 9,000$ and $M = 50,000$ , respectively. We define the masking threshold $h = 0.25$ and the geodesic distance threshold $\\lambda = 15^{\\circ}$ (Zhang et al., 2022; Lin et al., 2023). We train our network for 25 epochs using the AdamW (Loshchilov & Hutter, 2017) optimizer with a batch size of 48 and a learning rate of $10^{-4}$ , which is divided by 10 after 20 epochs. Training takes around 4 days on 4 NVIDIA Tesla V100s.",
847
+ "bbox": [
848
+ 169,
849
+ 768,
850
+ 823,
851
+ 853
852
+ ],
853
+ "page_idx": 5
854
+ },
855
+ {
856
+ "type": "text",
857
+ "text": "4.2 EXPERIMENTAL SETUP",
858
+ "text_level": 1,
859
+ "bbox": [
860
+ 171,
861
+ 869,
862
+ 375,
863
+ 883
864
+ ],
865
+ "page_idx": 5
866
+ },
867
+ {
868
+ "type": "text",
869
+ "text": "We compare our method with several relevant competitors including feature-matching methods, i.e., SuperGlue (Sarlin et al., 2020), LoFTR (Sun et al., 2021), and ZSP (Goodwin et al., 2022),",
870
+ "bbox": [
871
+ 169,
872
+ 895,
873
+ 823,
874
+ 925
875
+ ],
876
+ "page_idx": 5
877
+ },
878
+ {
879
+ "type": "header",
880
+ "text": "Published as a conference paper at ICLR 2024",
881
+ "bbox": [
882
+ 171,
883
+ 32,
884
+ 478,
885
+ 47
886
+ ],
887
+ "page_idx": 5
888
+ },
889
+ {
890
+ "type": "page_number",
891
+ "text": "6",
892
+ "bbox": [
893
+ 493,
894
+ 948,
895
+ 504,
896
+ 959
897
+ ],
898
+ "page_idx": 5
899
+ },
900
+ {
901
+ "type": "table",
902
+ "img_path": "images/534c9d64d9b457141c9f5ed5cf044d02fc79378a983cead928f2954066609dea.jpg",
903
+ "table_caption": [],
904
+ "table_footnote": [],
905
+ "table_body": "<table><tr><td></td><td>SuperGlue</td><td>LoFTR</td><td>ZSP</td><td>Regress</td><td>RelPose</td><td>RelPose++</td><td>Ours</td></tr><tr><td>Angular Error ↓</td><td>67.2</td><td>77.5</td><td>87.5</td><td>46.0</td><td>50.0</td><td>38.5</td><td>28.5</td></tr><tr><td>Acc @ 30° (%)↑</td><td>45.2</td><td>37.9</td><td>25.7</td><td>60.6</td><td>64.2</td><td>77.0</td><td>83.5</td></tr><tr><td>Acc @ 15° (%)↑</td><td>37.7</td><td>33.1</td><td>14.6</td><td>42.7</td><td>48.6</td><td>69.8</td><td>71.0</td></tr></table>",
906
+ "bbox": [
907
+ 181,
908
+ 99,
909
+ 816,
910
+ 161
911
+ ],
912
+ "page_idx": 6
913
+ },
914
+ {
915
+ "type": "table",
916
+ "img_path": "images/9e17063438d2189bf5b31ae1e2cf562f3d138bc3fd5e9ffb3bf7f33e3dc20496.jpg",
917
+ "table_caption": [
918
+ "Table 1: Experimental results on CO3D."
919
+ ],
920
+ "table_footnote": [],
921
+ "table_body": "<table><tr><td></td><td>SuperGlue</td><td>LoFTR</td><td>ZSP</td><td>Regress</td><td>RelPose</td><td>RelPose++</td><td>Ours</td></tr><tr><td>Angular Error ↓</td><td>102.4</td><td>134.1</td><td>107.2</td><td>55.9</td><td>80.4</td><td>33.5</td><td>28.1</td></tr><tr><td>Acc @ 30° (%) ↑</td><td>15.1</td><td>9.6</td><td>4.2</td><td>39.2</td><td>20.8</td><td>72.3</td><td>78.6</td></tr><tr><td>Acc @ 15° (%) ↑</td><td>12.1</td><td>7.7</td><td>1.5</td><td>15.6</td><td>6.7</td><td>42.9</td><td>58.4</td></tr></table>",
922
+ "bbox": [
923
+ 179,
924
+ 205,
925
+ 818,
926
+ 267
927
+ ],
928
+ "page_idx": 6
929
+ },
930
+ {
931
+ "type": "table",
932
+ "img_path": "images/9ecc9d9cc0e5c48bf26d3244b9bad7b72fcc2d7c8308303e01c072c35fd9f447.jpg",
933
+ "table_caption": [
934
+ "Table 2: Experimental results on Objaverse."
935
+ ],
936
+ "table_footnote": [],
937
+ "table_body": "<table><tr><td></td><td>SuperGlue</td><td>LoFTR</td><td>ZSP</td><td>Regress</td><td>RelPose</td><td>RelPose++</td><td>Ours</td></tr><tr><td>Angular Error ↓</td><td>64.8</td><td>84.5</td><td>78.6</td><td>52.1</td><td>58.3</td><td>46.6</td><td>41.7</td></tr><tr><td>Acc @ 30° (%) ↑</td><td>26.2</td><td>24.2</td><td>10.7</td><td>26.5</td><td>26.1</td><td>42.5</td><td>61.5</td></tr><tr><td>Acc @ 15° (%) ↑</td><td>14.3</td><td>13.5</td><td>2.7</td><td>7.6</td><td>7.0</td><td>15.8</td><td>29.9</td></tr></table>",
938
+ "bbox": [
939
+ 181,
940
+ 311,
941
+ 815,
942
+ 373
943
+ ],
944
+ "page_idx": 6
945
+ },
946
+ {
947
+ "type": "text",
948
+ "text": "Table 3: Experimental results on LINEMOD.",
949
+ "bbox": [
950
+ 339,
951
+ 388,
952
+ 656,
953
+ 405
954
+ ],
955
+ "page_idx": 6
956
+ },
957
+ {
958
+ "type": "text",
959
+ "text": "energy-based methods, i.e., RelPose (Zhang et al., 2022) and RelPose++ (Lin et al., 2023), and a regression method (Lin et al., 2023). We first perform an evaluation using the benchmark defined in (Lin et al., 2023), where the experiments are conducted on the CO3D (Reizenstein et al., 2021) dataset. We report the angular error between the predicted $\\Delta \\mathbf{R}$ and the ground truth, which is computed as in Eq. 9, and the accuracy with thresholds of $30^{\\circ}$ and $15^{\\circ}$ (Zhang et al., 2022; Lin et al., 2023). Furthermore, We extend the evaluation by introducing a new benchmark called GROP. To this end, we utilize the Objaverse (Deitke et al., 2023) and LINEMOD (Hinterstoisser et al., 2012) datasets, which include synthetic and real data, respectively. We retrain RelPose, RelPose++, and the regression method in our benchmark, and use the pretrained models for SuperGlue and LoFTR since retraining these two feature-matching approaches requires additional pixel-level annotations. For ZSP, as there is no training process involved, we evaluate it using the code released by the authors. We derive $\\Delta \\mathbf{R}$ from the estimated essential matrix (Hartley & Zisserman, 2003) for the feature-matching methods because we only have access to RGB images. We evaluate all methods on identical predefined query and reference pairs (8,304 on Objaverse and 5,000 on LINEMOD), which ensures a fair comparison. Given our emphasis on relative object rotation estimation, we crop the objects from the original RGB image utilizing the ground-truth object bounding boxes (Xiao et al., 2019; Zhao et al., 2022b; Park et al., 2020; Nguyen et al., 2022). In Sec. 4.5, we evaluate robustness against noise in the bounding boxes.",
960
+ "bbox": [
961
+ 169,
962
+ 430,
963
+ 826,
964
+ 681
965
+ ],
966
+ "page_idx": 6
967
+ },
968
+ {
969
+ "type": "text",
970
+ "text": "4.3 EXPERIMENTS ON CO3D",
971
+ "text_level": 1,
972
+ "bbox": [
973
+ 171,
974
+ 698,
975
+ 393,
976
+ 713
977
+ ],
978
+ "page_idx": 6
979
+ },
980
+ {
981
+ "type": "text",
982
+ "text": "Let us first evaluate our approach in the benchmark used in (Zhang et al., 2022; Lin et al., 2023), which builds upon the CO3D dataset (Reizenstein et al., 2021). All testing objects here are previously unseen and the evaluation thus emphasizes the generalization ability. Table 1 reports the results in terms of angular error and accuracy. Note that the results of SuperGlue, Regress, RelPose, and RelPose++ shown here align closely with the ones reported in (Lin et al., 2023), lending credibility to the evaluation. More importantly, our method produces consistently more precise relative object poses, with improvements of at least $10\\%$ in angular error. This evidences the generalization ability of our approach to unseen objects.",
983
+ "bbox": [
984
+ 169,
985
+ 724,
986
+ 826,
987
+ 838
988
+ ],
989
+ "page_idx": 6
990
+ },
991
+ {
992
+ "type": "text",
993
+ "text": "4.4 EXPERIMENTS ON GROP",
994
+ "text_level": 1,
995
+ "bbox": [
996
+ 171,
997
+ 854,
998
+ 393,
999
+ 869
1000
+ ],
1001
+ "page_idx": 6
1002
+ },
1003
+ {
1004
+ "type": "text",
1005
+ "text": "Let us now develop the evaluation in our benchmark. Table 2 and Table 3 provide the experimental results on Objaverse and LINEMOD, respectively. Our method also achieves superior generalization ability to unseen objects, outperforming the previous methods by a substantial margin. For instance,",
1006
+ "bbox": [
1007
+ 169,
1008
+ 881,
1009
+ 825,
1010
+ 925
1011
+ ],
1012
+ "page_idx": 6
1013
+ },
1014
+ {
1015
+ "type": "header",
1016
+ "text": "Published as a conference paper at ICLR 2024",
1017
+ "bbox": [
1018
+ 173,
1019
+ 32,
1020
+ 478,
1021
+ 47
1022
+ ],
1023
+ "page_idx": 6
1024
+ },
1025
+ {
1026
+ "type": "page_number",
1027
+ "text": "7",
1028
+ "bbox": [
1029
+ 493,
1030
+ 948,
1031
+ 504,
1032
+ 959
1033
+ ],
1034
+ "page_idx": 6
1035
+ },
1036
+ {
1037
+ "type": "image",
1038
+ "img_path": "images/bea25e1ad8350dd93c3de387d890ce304e3d4079084fcc9b5973383ee714d0e5.jpg",
1039
+ "image_caption": [
1040
+ "Figure 3: Qualitative results on Objaverse and LINEMOD. Here, we assume the reference to be calibrated and visualize the object pose in the query, which is derived from the estimated relative object pose. The predicted and ground-truth object poses are indicated by blue and green arrows, respectively."
1041
+ ],
1042
+ "image_footnote": [],
1043
+ "bbox": [
1044
+ 222,
1045
+ 98,
1046
+ 777,
1047
+ 344
1048
+ ],
1049
+ "page_idx": 7
1050
+ },
1051
+ {
1052
+ "type": "image",
1053
+ "img_path": "images/7ef00238b02283b9b9ba505792b18893dfa565c4df3d636825a4d8c837127c4e.jpg",
1054
+ "image_caption": [
1055
+ "(a)"
1056
+ ],
1057
+ "image_footnote": [],
1058
+ "bbox": [
1059
+ 205,
1060
+ 428,
1061
+ 498,
1062
+ 583
1063
+ ],
1064
+ "page_idx": 7
1065
+ },
1066
+ {
1067
+ "type": "image",
1068
+ "img_path": "images/ee7d73b83335e9db87257067e0d25678839e5b138d70a7179b1e4a5141bad49c.jpg",
1069
+ "image_caption": [
1070
+ "(b)",
1071
+ "Figure 4: Robustness. (a) Acc @ $30^{\\circ}$ curves obtained with varying degrees of object pose variation between the reference and the query, measured by the geodesic distance. (b) Similar curves but for different levels of noise added to the object bounding boxes."
1072
+ ],
1073
+ "image_footnote": [],
1074
+ "bbox": [
1075
+ 501,
1076
+ 435,
1077
+ 794,
1078
+ 583
1079
+ ],
1080
+ "page_idx": 7
1081
+ },
1082
+ {
1083
+ "type": "text",
1084
+ "text": "we achieve an improvement of at least $15.5\\%$ on Objaverse and $14.1\\%$ on LINEMOD, measured in terms of Acc @ $15^{\\circ}$ . Moreover, we illustrate some qualitative results in Fig. 3. To this end, we assume the object pose $\\mathbf{R}^r$ in the reference to be available, and the object pose $\\mathbf{R}^q$ in the query is computed as $\\mathbf{R}^q = \\Delta \\mathbf{R} \\mathbf{R}^r$ . We represent the predicted and the ground-truth object poses as blue and green arrows, respectively. This evidences that our method consistently yields better predictions. In the scenario where there is a notable difference in object pose between the reference and query (as in the cat images in the third row), the previous methods struggle to accurately predict the pose for the unseen object, while our approach continues to deliver an accurate prediction.",
1085
+ "bbox": [
1086
+ 169,
1087
+ 684,
1088
+ 823,
1089
+ 796
1090
+ ],
1091
+ "page_idx": 7
1092
+ },
1093
+ {
1094
+ "type": "text",
1095
+ "text": "4.5 ABLATION STUDIES",
1096
+ "text_level": 1,
1097
+ "bbox": [
1098
+ 171,
1099
+ 814,
1100
+ 354,
1101
+ 828
1102
+ ],
1103
+ "page_idx": 7
1104
+ },
1105
+ {
1106
+ "type": "text",
1107
+ "text": "To shed more light on the superiority of our method, we develop comprehensive ablation studies on Objaverse and LINEMOD. Most of the experiments are conducted on LINEMOD since it is a real dataset. As the two sparse views, i.e., a reference and a query, might result in a large-scale object pose variation, we start the ablations by analyzing the robustness in such a context. Specifically, we divide the Objaverse testing data into several groups based on the object pose variation between the reference and query, measured by geodesic distance. The task becomes progressively more",
1108
+ "bbox": [
1109
+ 169,
1110
+ 839,
1111
+ 823,
1112
+ 925
1113
+ ],
1114
+ "page_idx": 7
1115
+ },
1116
+ {
1117
+ "type": "header",
1118
+ "text": "Published as a conference paper at ICLR 2024",
1119
+ "bbox": [
1120
+ 171,
1121
+ 32,
1122
+ 478,
1123
+ 47
1124
+ ],
1125
+ "page_idx": 7
1126
+ },
1127
+ {
1128
+ "type": "page_number",
1129
+ "text": "8",
1130
+ "bbox": [
1131
+ 493,
1132
+ 948,
1133
+ 503,
1134
+ 959
1135
+ ],
1136
+ "page_idx": 7
1137
+ },
1138
+ {
1139
+ "type": "table",
1140
+ "img_path": "images/b35dc591d72a7807b2b4b558b11a17202b440b66a31aac1eb34c088602a12ad4.jpg",
1141
+ "table_caption": [],
1142
+ "table_footnote": [],
1143
+ "table_body": "<table><tr><td></td><td>w/o att.</td><td>w/o mask</td><td>w/ 2D mask</td><td>w/o agg.</td><td>RelPose*</td><td>Ours</td></tr><tr><td>Angular Error ↓</td><td>41.9</td><td>42.1</td><td>42.6</td><td>41.9</td><td>59.7</td><td>41.7</td></tr><tr><td>Acc @ 30° (%)↑</td><td>60.0</td><td>59.6</td><td>60.1</td><td>59.4</td><td>26.4</td><td>61.5</td></tr><tr><td>Acc @ 15°(%)↑</td><td>28.2</td><td>27.9</td><td>27.3</td><td>26.4</td><td>7.9</td><td>29.9</td></tr></table>",
1144
+ "bbox": [
1145
+ 200,
1146
+ 101,
1147
+ 795,
1148
+ 160
1149
+ ],
1150
+ "page_idx": 8
1151
+ },
1152
+ {
1153
+ "type": "text",
1154
+ "text": "Table 4: Effectiveness of the key components in our pipeline.",
1155
+ "bbox": [
1156
+ 287,
1157
+ 178,
1158
+ 705,
1159
+ 193
1160
+ ],
1161
+ "page_idx": 8
1162
+ },
1163
+ {
1164
+ "type": "text",
1165
+ "text": "challenging as the distance increases. We developed this experiment on Objaverse because of its wider range of pose variations compared to LINEMOD. Fig. 4(b) shows the Acc @ $30^{\\circ}$ curves as the distance varies from $0^{\\circ}$ to $180^{\\circ}$ . Note that all methods demonstrate satisfactory predictions when the distance is small, i.e., when the object orientations in the reference and query views are similar. However, the performance of feature-matching approaches, i.e., SuperGlue, LoFTR, and ZSP, dramatically drops as the distance increases. This observation supports our argument that the feature-matching methods are sensitive to the pose variations. By contrast, our method consistently surpasses all competitors, thus showing better robustness.",
1166
+ "bbox": [
1167
+ 174,
1168
+ 218,
1169
+ 821,
1170
+ 329
1171
+ ],
1172
+ "page_idx": 8
1173
+ },
1174
+ {
1175
+ "type": "text",
1176
+ "text": "As the object bounding boxes obtained in practice are inevitably noisy, we evaluate the robustness against the noise in this context on LINEMOD. Concretely, we add noise to the ground-truth bounding boxes by applying jittering to both the object center and scale. The jittering magnitude varies from 0.05 to 0.30, which results in different levels of noise. The experimental results are shown in Fig. 4(b), where our method outperforms the competitors across all scenarios. This promising robustness underscores the possibility of integrating our method with existing unseen object detectors (Zhao et al., 2022a; Liu et al., 2022). To showcase this, we extend our method to 6D unseen object pose estimation by combining it with the detector introduced in (Liu et al., 2022) and provide some results in the appendix.",
1177
+ "bbox": [
1178
+ 174,
1179
+ 335,
1180
+ 821,
1181
+ 460
1182
+ ],
1183
+ "page_idx": 8
1184
+ },
1185
+ {
1186
+ "type": "text",
1187
+ "text": "Furthermore, we evaluate the effectiveness of the key components in our framework. The results on LINEMOD are summarized in Table 4, where the evaluation of effectiveness encompasses four distinct aspects: First, we develop a counterpart by excluding self-attention and cross-attention layers (w/o att.) from the 3D reasoning blocks; Second, we modify the 3D masking by either omitting it (w/o mask) or substituting it with a 2D masking process over RGB images (w/ 2D mask); Third, we directly compute the similarity of 3D volumes without utilizing the 2D aggregation module (w/o agg.); Fourth, we replace our 3D-aware verification mechanism with the energy-based model (Zhang et al., 2022; Lin et al., 2023) (RelPose*), while retaining our feature extraction backbone unchanged. The modified versions, namely w/o att., w/o mask, w/ 2D mask, and w/o agg., exhibit worse performance, which thus demonstrates the effectiveness of the presented components, i.e., attention layers, 3D masking, and the feature aggregation module. Additionally, the inferior results yield by RelPose* highlight that the 3D-aware verification mechanism contributes to the high-accuracy predictions, instead of the feature extraction backbone in our framework. Consequently, this observation supports our claim that the proposed verification module facilitates the relative pose estimation for unseen objects by preserving the structural features and explicitly utilizing 3D information.",
1188
+ "bbox": [
1189
+ 174,
1190
+ 468,
1191
+ 821,
1192
+ 676
1193
+ ],
1194
+ "page_idx": 8
1195
+ },
1196
+ {
1197
+ "type": "text",
1198
+ "text": "5 CONCLUSION",
1199
+ "text_level": 1,
1200
+ "bbox": [
1201
+ 174,
1202
+ 696,
1203
+ 318,
1204
+ 712
1205
+ ],
1206
+ "page_idx": 8
1207
+ },
1208
+ {
1209
+ "type": "text",
1210
+ "text": "In this paper, we have tackled the problem of relative pose estimation for unseen objects. We assume the availability of only one object image as the reference and aim to estimate the relative object pose between the reference and a query image. In this context, we have tailored the hypothesis-and-verification paradigm by introducing a 3D-aware verification, where the 3D transformation is explicitly coupled with a learnable 3D object representation. We have developed comprehensive experiments on Objaverse, LINEMOD, and CO3D datasets, taking both synthetic and real data with diverse object poses into account. Our method remarkably outperforms the competitors across all scenarios and achieves better robustness against different levels of object pose variations and noise. Since our verification module incorporates local similarities when computing the verification scores, it could be affected by the occlusions. This stands as a potential limitation that we consider, and we intend to explore and address this issue in our future research endeavors.",
1211
+ "bbox": [
1212
+ 174,
1213
+ 728,
1214
+ 821,
1215
+ 880
1216
+ ],
1217
+ "page_idx": 8
1218
+ },
1219
+ {
1220
+ "type": "header",
1221
+ "text": "Published as a conference paper at ICLR 2024",
1222
+ "bbox": [
1223
+ 173,
1224
+ 32,
1225
+ 478,
1226
+ 47
1227
+ ],
1228
+ "page_idx": 8
1229
+ },
1230
+ {
1231
+ "type": "page_number",
1232
+ "text": "9",
1233
+ "bbox": [
1234
+ 493,
1235
+ 949,
1236
+ 503,
1237
+ 958
1238
+ ],
1239
+ "page_idx": 8
1240
+ },
1241
+ {
1242
+ "type": "text",
1243
+ "text": "ACKNOWLEDGMENT",
1244
+ "text_level": 1,
1245
+ "bbox": [
1246
+ 173,
1247
+ 102,
1248
+ 349,
1249
+ 118
1250
+ ],
1251
+ "page_idx": 9
1252
+ },
1253
+ {
1254
+ "type": "ref_text",
1255
+ "text": "This work was funded in part by the Swiss National Science Foundation via the Sinergia grant CRSII5-180359 and the Swiss Innovation Agency (Innosuisse) via the BRIDGE Discovery grant 40B2-0_194729.",
1256
+ "bbox": [
1257
+ 171,
1258
+ 133,
1259
+ 826,
1260
+ 176
1261
+ ],
1262
+ "page_idx": 9
1263
+ },
1264
+ {
1265
+ "type": "text",
1266
+ "text": "REFERENCES",
1267
+ "text_level": 1,
1268
+ "bbox": [
1269
+ 173,
1270
+ 196,
1271
+ 287,
1272
+ 213
1273
+ ],
1274
+ "page_idx": 9
1275
+ },
1276
+ {
1277
+ "type": "list",
1278
+ "sub_type": "ref_text",
1279
+ "list_items": [
1280
+ "Ronald T Azuma. A survey of augmented reality. Presence: teleoperators & virtual environments, 6(4):355-385, 1997.",
1281
+ "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016.",
1282
+ "Dengsheng Chen, Jun Li, Zheng Wang, and Kai Xu. Learning canonical shape space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11973-11982, 2020a.",
1283
+ "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International Conference on Machine Learning, pp. 1597-1607. PMLR, 2020b.",
1284
+ "Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 1907-1915, 2017.",
1285
+ "Alvaro Collet, Manuel Martinez, and Siddhartha S Srinivasa. The moped framework: Object recognition and pose estimation for manipulation. The International Journal of Robotics Research, 30 (10):1284-1306, 2011.",
1286
+ "Matt Deitke, Dustin Schwenk, Jordi Salvador, Luca Weihs, Oscar Michel, Eli VanderBilt, Ludwig Schmidt, Kiana Ehsani, Aniruddha Kembhavi, and Ali Farhadi. Objverse: A universe of annotated 3d objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13142-13153, 2023.",
1287
+ "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.",
1288
+ "Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24 (6):381-395, 1981.",
1289
+ "Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 3354-3361. IEEE, 2012.",
1290
+ "Walter Goodwin, Sagar Vaze, Ioannis Havoutis, and Ingmar Posner. Zero-shot category-level object pose estimation. In Proceedings of the European Conference on Computer Vision, pp. 516-532. Springer, 2022.",
1291
+ "Agrim Gupta, Jiajun Wu, Jia Deng, and Li Fei-Fei. Siamese masked autoencoders. arXiv preprint arXiv:2305.14344, 2023.",
1292
+ "Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003.",
1293
+ "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022a."
1294
+ ],
1295
+ "bbox": [
1296
+ 171,
1297
+ 220,
1298
+ 826,
1299
+ 925
1300
+ ],
1301
+ "page_idx": 9
1302
+ },
1303
+ {
1304
+ "type": "header",
1305
+ "text": "Published as a conference paper at ICLR 2024",
1306
+ "bbox": [
1307
+ 171,
1308
+ 32,
1309
+ 478,
1310
+ 47
1311
+ ],
1312
+ "page_idx": 9
1313
+ },
1314
+ {
1315
+ "type": "page_number",
1316
+ "text": "10",
1317
+ "bbox": [
1318
+ 490,
1319
+ 946,
1320
+ 509,
1321
+ 960
1322
+ ],
1323
+ "page_idx": 9
1324
+ },
1325
+ {
1326
+ "type": "list",
1327
+ "sub_type": "ref_text",
1328
+ "list_items": [
1329
+ "Xingyi He, Jiaming Sun, Yuang Wang, Di Huang, Hujun Bao, and Xiaowei Zhou. Onepose++: Keypoint-free one-shot object pose estimation without cad models. Advances in Neural Information Processing Systems, 35:35103-35115, 2022b.",
1330
+ "Stefan Hinterstoisser, Vincent Lepetit, Slobodan Ilic, Stefan Holzer, Gary Bradski, Kurt Konolige, and Nassir Navab. Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In Asian Conference on Computer Vision, pp. 548-562. Springer, 2012.",
1331
+ "Yinlin Hu, Pascal Fua, Wei Wang, and Mathieu Salzmann. Single-stage 6d object pose estimation. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 2930-2939, 2020.",
1332
+ "Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023.",
1333
+ "Yann Labbe, Lucas Manuelli, Arsalan Mousavian, Stephen Tyree, Stan Birchfield, Jonathan Tremblay, Justin Carpentier, Mathieu Aubry, Dieter Fox, and Josef Sivic. MegaPose: 6D Pose Estimation of Novel Objects via Render & Compare. In CoRL, 2022.",
1334
+ "Vincent Lepetit, Francesc Moreno-Noguer, and Pascal Fua. Epnp: An accurate o (n) solution to the pnp problem. International Journal of Computer Vision, 81:155-166, 2009.",
1335
+ "Amy Lin, Jason Y Zhang, Deva Ramanan, and Shubham Tulsiani. Relpose++: Recovering 6d poses from sparse-view observations. arXiv preprint arXiv:2305.04926, 2023.",
1336
+ "Jiehong Lin, Zewei Wei, Changxing Ding, and Kui Jia. Category-level 6d object pose and size estimation using self-supervised deep prior deformation networks. In Proceedings of the European Conference on Computer Vision, pp. 19-34. Springer, 2022.",
1337
+ "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Proceedings of the European Conference on Computer Vision, pp. 740-755. Springer, 2014.",
1338
+ "Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. arXiv preprint arXiv:2303.11328, 2023.",
1339
+ "Yuan Liu, Yilin Wen, Sida Peng, Cheng Lin, Xiaoxiao Long, Taku Komura, and Wenping Wang. Gen6d: Generalizable model-free 6-dof object pose estimation from rgb images. Proceedings of the European Conference on Computer Vision, 2022.",
1340
+ "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.",
1341
+ "David G Lowe. Distinctive image features from scale-invariant keypoints. International Journal of Computer Vision, 60(2):91-110, 2004.",
1342
+ "Eric Marchand, Hideaki Uchiyama, and Fabien Spindler. Pose estimation for augmented reality: a hands-on survey. IEEE Transactions on Visualization and Computer Graphics, 22(12):2633-2651, 2015.",
1343
+ "Van Nguyen Nguyen, Yinlin Hu, Yang Xiao, Mathieu Salzmann, and Vincent Lepetit. Templates for 3d object pose estimation revisited: Generalization to new objects and robustness to occlusions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6771-6780, 2022.",
1344
+ "Keunhong Park, Arsalan Mousavian, Yu Xiang, and Dieter Fox. Latentfusion: End-to-end differentiable reconstruction and rendering for unseen object pose estimation. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 10710-10719, 2020.",
1345
+ "Sida Peng, Yuan Liu, Qixing Huang, Xiaowei Zhou, and Hujun Bao. Pynet: Pixel-wise voting network for 6dof pose estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4561-4570, 2019."
1346
+ ],
1347
+ "bbox": [
1348
+ 171,
1349
+ 102,
1350
+ 825,
1351
+ 924
1352
+ ],
1353
+ "page_idx": 10
1354
+ },
1355
+ {
1356
+ "type": "header",
1357
+ "text": "Published as a conference paper at ICLR 2024",
1358
+ "bbox": [
1359
+ 171,
1360
+ 32,
1361
+ 478,
1362
+ 47
1363
+ ],
1364
+ "page_idx": 10
1365
+ },
1366
+ {
1367
+ "type": "page_number",
1368
+ "text": "11",
1369
+ "bbox": [
1370
+ 490,
1371
+ 948,
1372
+ 506,
1373
+ 959
1374
+ ],
1375
+ "page_idx": 10
1376
+ },
1377
+ {
1378
+ "type": "list",
1379
+ "sub_type": "ref_text",
1380
+ "list_items": [
1381
+ "Giorgia Pitteri, Slobodan Ilic, and Vincent Lepetit. Cornet: generic 3d corners for 6d pose estimation of new objects without retraining. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, pp. 0-0, 2019.",
1382
+ "René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE transactions on Pattern Analysis and Machine Intelligence, 44(3):1623-1637, 2020.",
1383
+ "Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler, Luca Sbordone, Patrick Labatut, and David Novotny. Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10901-10911, 2021.",
1384
+ "Ethan Rublee, Vincent Rabaud, Kurt Konolige, and Gary Bradski. Orb: An efficient alternative to sift or surf. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2564-2571. IEEE, 2011.",
1385
+ "Kuniaki Saito, Ping Hu, Trevor Darrell, and Kate Saenko. Learning to detect every thing in an open world. In Proceedings of the European Conference on Computer Vision, pp. 268-284. Springer, 2022.",
1386
+ "Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4938-4947, 2020.",
1387
+ "Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4104-4113, 2016.",
1388
+ "Ivan Shugurov, Fu Li, Benjamin Busam, and Slobodan Ilic. Osop: A multi-stage one shot object pose estimation framework. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6835-6844, 2022.",
1389
+ "Yongzhi Su, Mahdi Saleh, Torben Fetzer, Jason Rambach, Nassir Navab, Benjamin Busam, Didier Stricker, and Federico Tombari. Zebrapose: Coarse to fine surface encoding for 6dof object pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6738-6748, 2022.",
1390
+ "Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8922-8931, 2021.",
1391
+ "Jiaming Sun, Zihao Wang, Siyu Zhang, Xingyi He, Hongcheng Zhao, Guofeng Zhang, and Xiaowei Zhou. Onepose: One-shot object pose estimation without cad models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6825-6834, 2022.",
1392
+ "Martin Sundermeyer, Maximilian Durner, En Yen Huang, Zoltan-Csaba Marton, Narunas Vaskevicius, Kai O Arras, and Rudolph Triebel. Multi-path learning for object pose estimation across domains. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 13916-13925, 2020.",
1393
+ "Jonathan Tremblay, Thang To, Balakumar Sundaralingam, Yu Xiang, Dieter Fox, and Stan Birchfield. Deep object pose estimation for semantic robotic grasping of household objects. In Conference on Robot Learning, 2018. URL https://arxiv.org/abs/1809.10790.",
1394
+ "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.",
1395
+ "Chen Wang, Danfei Xu, Yuke Zhu, Roberto Martin-Martin, Cewu Lu, Li Fei-Fei, and Silvio Savarese. Densefusion: 6d object pose estimation by iterative dense fusion. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3343-3352, 2019a."
1396
+ ],
1397
+ "bbox": [
1398
+ 171,
1399
+ 102,
1400
+ 825,
1401
+ 925
1402
+ ],
1403
+ "page_idx": 11
1404
+ },
1405
+ {
1406
+ "type": "header",
1407
+ "text": "Published as a conference paper at ICLR 2024",
1408
+ "bbox": [
1409
+ 171,
1410
+ 32,
1411
+ 478,
1412
+ 47
1413
+ ],
1414
+ "page_idx": 11
1415
+ },
1416
+ {
1417
+ "type": "page_number",
1418
+ "text": "12",
1419
+ "bbox": [
1420
+ 490,
1421
+ 946,
1422
+ 508,
1423
+ 959
1424
+ ],
1425
+ "page_idx": 11
1426
+ },
1427
+ {
1428
+ "type": "list",
1429
+ "sub_type": "ref_text",
1430
+ "list_items": [
1431
+ "Gu Wang, Fabian Manhardt, Federico Tombari, and Xiangyang Ji. Gdr-net: Geometry-guided direct regression network for monocular 6d object pose estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 16611-16621, 2021.",
1432
+ "He Wang, Srinath Sridhar, Jingwei Huang, Julien Valentin, Shuran Song, and Leonidas J Guibas. Normalized object coordinate space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2642-2651, 2019b.",
1433
+ "Zhenyu Wang, Yali Li, Xi Chen, Ser-Nam Lim, Antonio Torralba, Hengshuang Zhao, and Shengjin Wang. Detecting everything in the open world: Towards universal object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11433-11443, 2023.",
1434
+ "Yu Xiang, Tanner Schmidt, Venkatraman Narayanan, and Dieter Fox. Posecnn: A convolutional neural network for 6d object pose estimation in cluttered scenes. arXiv preprint arXiv:1711.00199, 2017.",
1435
+ "Yang Xiao, Xuchong Qiu, Pierre-Alain Langlois, Mathieu Aubry, and Renaud Marlet. Pose from shape: Deep pose estimation for arbitrary 3D objects. In *British Machine Vision Conference (BMVC)*, 2019.",
1436
+ "Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9653-9663, 2022.",
1437
+ "Danfei Xu, Dragomir Anguelov, and Ashesh Jain. Pointfusion: Deep sensor fusion for 3d bounding box estimation. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 244-253, 2018.",
1438
+ "Kwang Moo Yi, Eduard Trulls, Yuki Ono, Vincent Lepetit, Mathieu Salzmann, and Pascal Fua. Learning to find good correspondences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2666-2674, 2018.",
1439
+ "Jason Y Zhang, Deva Ramanan, and Shubham Tulsiani. Relpose: Predicting probabilistic relative rotation for single objects in the wild. In Proceedings of the European Conference on Computer Vision, pp. 592-611. Springer, 2022.",
1440
+ "Chen Zhao, Yixiao Ge, Feng Zhu, Rui Zhao, Hongsheng Li, and Mathieu Salzmann. Progressive correspondence pruning by consensus learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6464-6473, 2021.",
1441
+ "Chen Zhao, Yinlin Hu, and Mathieu Salzmann. Locposenet: Robust location prior for unseen object pose estimation. arXiv preprint arXiv:2211.16290v2, 2022a.",
1442
+ "Chen Zhao, Yinlin Hu, and Mathieu Salzmann. Fusing local similarities for retrieval-based 3d orientation estimation of unseen objects. In Proceedings of the European Conference on Computer Vision, pp. 106-122. Springer, 2022b.",
1443
+ "Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5745-5753, 2019.",
1444
+ "Menglong Zhu, Konstantinos G Derpanis, Yinfei Yang, Samarth Brahmbhatt, Mabel Zhang, Cody Phillips, Matthieu Lecce, and Kostas Daniilidis. Single image 3d object detection and pose estimation for grasping. In Proceedings of the IEEE International Conference on Robotics and Automation, pp. 3936-3943. IEEE, 2014."
1445
+ ],
1446
+ "bbox": [
1447
+ 171,
1448
+ 102,
1449
+ 825,
1450
+ 915
1451
+ ],
1452
+ "page_idx": 12
1453
+ },
1454
+ {
1455
+ "type": "header",
1456
+ "text": "Published as a conference paper at ICLR 2024",
1457
+ "bbox": [
1458
+ 171,
1459
+ 32,
1460
+ 478,
1461
+ 47
1462
+ ],
1463
+ "page_idx": 12
1464
+ },
1465
+ {
1466
+ "type": "page_number",
1467
+ "text": "13",
1468
+ "bbox": [
1469
+ 490,
1470
+ 946,
1471
+ 508,
1472
+ 959
1473
+ ],
1474
+ "page_idx": 12
1475
+ },
1476
+ {
1477
+ "type": "text",
1478
+ "text": "APPENDIX",
1479
+ "text_level": 1,
1480
+ "bbox": [
1481
+ 171,
1482
+ 102,
1483
+ 264,
1484
+ 118
1485
+ ],
1486
+ "page_idx": 13
1487
+ },
1488
+ {
1489
+ "type": "text",
1490
+ "text": "A ARCHITECTURE OF THE 3D REASONING MODULE",
1491
+ "text_level": 1,
1492
+ "bbox": [
1493
+ 171,
1494
+ 135,
1495
+ 627,
1496
+ 152
1497
+ ],
1498
+ "page_idx": 13
1499
+ },
1500
+ {
1501
+ "type": "text",
1502
+ "text": "We show the architecture of the 3D reasoning module in Fig. 5. Each 3D reasoning block consists of a self-attention layer and a cross-attention layer, which excel at capturing intra-view and inter-view relationships, respectively. The input 2D feature map is flattened from $\\mathbb{R}^{C\\times H_f\\times W_f}$ to $\\mathbb{R}^{N\\times C}$ , where $N = H_{f}\\times W_{f}$ . A position embedding, denoted as PE, is added to the flattened feature map. Fig. 5(b) illustrates the attention layer. The context refers to the input feature map itself in the self-attention layer and it represents the feature map of another view in the cross-attention layer. We use the standard multi-head attention (Vaswani et al., 2017) and layer normalization (Ba et al., 2016) in our attention layers.",
1503
+ "bbox": [
1504
+ 169,
1505
+ 167,
1506
+ 826,
1507
+ 280
1508
+ ],
1509
+ "page_idx": 13
1510
+ },
1511
+ {
1512
+ "type": "text",
1513
+ "text": "B DATA CONFIGURATION",
1514
+ "text_level": 1,
1515
+ "bbox": [
1516
+ 171,
1517
+ 299,
1518
+ 401,
1519
+ 316
1520
+ ],
1521
+ "page_idx": 13
1522
+ },
1523
+ {
1524
+ "type": "text",
1525
+ "text": "The synthetic images are generated by rendering objects of Objaverse from randomly sampled viewpoints (Liu et al., 2023). We attach these images to random backgrounds which are sampled from COCO (Lin et al., 2014). We randomly sample 128 objects from Objaverse and use 5 objects from LINEMOD sampled by Liu et al. (2022) as testing data, reserving the remaining objects for training. This design guarantees that all objects are previously unseen during the testing phase. We train the network on both synthetic and real data, alleviating the problem of domain gap.",
1526
+ "bbox": [
1527
+ 169,
1528
+ 330,
1529
+ 823,
1530
+ 416
1531
+ ],
1532
+ "page_idx": 13
1533
+ },
1534
+ {
1535
+ "type": "text",
1536
+ "text": "Recall that we assume we have access to only one reference image and the objective is to estimate the relative object pose between the reference and the query. Therefore, the selection of the reference image is a crucial aspect of our benchmark. As multi-view images are available in Objaverse and LINEMOD datasets, one could randomly sample a reference given a query. However, such a strategy may yield an inappropriate reference. As shown in Fig. 6, the object depicted in the reference image barely overlaps with the one in the query, which makes the relative object pose estimation too challenging. Therefore, we filter out the inappropriate references from the datasets during training and testing, which makes our evaluation more reasonable.",
1537
+ "bbox": [
1538
+ 169,
1539
+ 421,
1540
+ 825,
1541
+ 532
1542
+ ],
1543
+ "page_idx": 13
1544
+ },
1545
+ {
1546
+ "type": "text",
1547
+ "text": "Specifically, we convert the object rotation matrices $\\mathbf{R}^r$ and $\\mathbf{R}^q$ to Euler angles $(\\alpha_r, \\beta_r, \\gamma_r)$ and $(\\alpha_q, \\beta_q, \\gamma_q)$ , which indicate azimuth, elevation, and in-plane rotation, respectively. Note that only azimuth and elevation lead to viewpoint changes, which thus determine the co-visible regions between the reference and query. Consequently, we set the in-plane rotation to 0 and convert the Euler angle back to the rotation matrix, i.e., $\\tilde{\\mathbf{R}} = h(\\alpha, \\beta, 0)$ . We then measure the difference of the new rotation matrices $\\tilde{\\mathbf{R}}^r$ and $\\tilde{\\mathbf{R}}^q$ by computing the geodesic distance. We exclude the image pair with a distance larger than a predefined threshold ( $90^\\circ$ by default in our experiments). As illustrated in Fig. 4 in our main paper, the retained image pairs display acceptable variations in object pose. Moreover, we utilize the synthetic images on Objaverse generated by Liu et al. (2023). Each 3D object model is rendered from 10 randomly sampled viewpoints, which yields synthetic images without in-plane rotations. To introduce in-plane rotations, we rotate the reference and query images using randomly sampled 2D in-plane rotations during training and testing.",
1548
+ "bbox": [
1549
+ 169,
1550
+ 540,
1551
+ 826,
1552
+ 712
1553
+ ],
1554
+ "page_idx": 13
1555
+ },
1556
+ {
1557
+ "type": "text",
1558
+ "text": "Fig. 7 shows the histograms of object pose variations between the reference and query images. We measure the variations based on the geodesic distance between the two object rotation matrices $\\mathbf{R}^r$ and $\\mathbf{R}^q$ . The histograms show that the image pairs we used in our experiments exhibit a diverse range of object pose variations, which makes our evaluation results convincing.",
1559
+ "bbox": [
1560
+ 169,
1561
+ 717,
1562
+ 823,
1563
+ 776
1564
+ ],
1565
+ "page_idx": 13
1566
+ },
1567
+ {
1568
+ "type": "text",
1569
+ "text": "C QUALITATIVE RESULTS OF 6D OBJECT POSE ESTIMATION",
1570
+ "text_level": 1,
1571
+ "bbox": [
1572
+ 169,
1573
+ 794,
1574
+ 694,
1575
+ 811
1576
+ ],
1577
+ "page_idx": 13
1578
+ },
1579
+ {
1580
+ "type": "text",
1581
+ "text": "We extend our method to 6D pose estimation for unseen objects by utilizing an off-the-shelf generalizable object detector (Liu et al., 2022). More concretely, instead of using dense-view reference images, we feed the one reference we have in our benchmark to the pretrained detection network, which predicts the object bounding box in the query image. We use the parameters of the object bounding box to compute 3D object translation, following the implementation in (Liu et al., 2022). Subsequently, we crop the object from the query and employ our approach to predict the relative 3D object rotation. The object rotation in the query is derived as $\\mathbf{R}^q = \\Delta \\mathbf{RR}^r$ . Fig. 8 shows some qual",
1582
+ "bbox": [
1583
+ 169,
1584
+ 825,
1585
+ 823,
1586
+ 926
1587
+ ],
1588
+ "page_idx": 13
1589
+ },
1590
+ {
1591
+ "type": "header",
1592
+ "text": "Published as a conference paper at ICLR 2024",
1593
+ "bbox": [
1594
+ 171,
1595
+ 32,
1596
+ 478,
1597
+ 47
1598
+ ],
1599
+ "page_idx": 13
1600
+ },
1601
+ {
1602
+ "type": "page_number",
1603
+ "text": "14",
1604
+ "bbox": [
1605
+ 490,
1606
+ 946,
1607
+ 508,
1608
+ 959
1609
+ ],
1610
+ "page_idx": 13
1611
+ },
1612
+ {
1613
+ "type": "image",
1614
+ "img_path": "images/1b488458df80cefe7bd4846f84e2a5470b56e6f67f6d4f42dc39565946c1264d.jpg",
1615
+ "image_caption": [
1616
+ "(a) 3D reasoning block"
1617
+ ],
1618
+ "image_footnote": [],
1619
+ "bbox": [
1620
+ 178,
1621
+ 106,
1622
+ 460,
1623
+ 210
1624
+ ],
1625
+ "page_idx": 14
1626
+ },
1627
+ {
1628
+ "type": "image",
1629
+ "img_path": "images/f1d6d86d9f9d979e7e40aa1515d946b1f4f02e68889716186ff1e8377a2e4587.jpg",
1630
+ "image_caption": [
1631
+ "(b) Attention layer",
1632
+ "Figure 5: Architecture of the 3D reasoning module."
1633
+ ],
1634
+ "image_footnote": [],
1635
+ "bbox": [
1636
+ 464,
1637
+ 107,
1638
+ 818,
1639
+ 210
1640
+ ],
1641
+ "page_idx": 14
1642
+ },
1643
+ {
1644
+ "type": "image",
1645
+ "img_path": "images/ae67f3237ba717b3a65d4799e72578af8d6a0c35698e2a5a1e77e5b8470fc698.jpg",
1646
+ "image_caption": [
1647
+ "Figure 6: Examples of inappropriate references."
1648
+ ],
1649
+ "image_footnote": [],
1650
+ "bbox": [
1651
+ 176,
1652
+ 279,
1653
+ 823,
1654
+ 440
1655
+ ],
1656
+ "page_idx": 14
1657
+ },
1658
+ {
1659
+ "type": "image",
1660
+ "img_path": "images/37593ee3c1b325bee6f398aeac2c7bdc26f2ce492e3b25c1e57d8e5c24e76704.jpg",
1661
+ "image_caption": [
1662
+ "(a) Objverse",
1663
+ "Figure 7: Histograms of the object pose variation between the reference and query. We measure the object pose variation as the geodesic distance between the two object rotation matrices $\\mathbf{R}^r$ and $\\mathbf{R}^q$ . The histogram depicts the number of image pairs falling within different distance intervals."
1664
+ ],
1665
+ "image_footnote": [],
1666
+ "bbox": [
1667
+ 181,
1668
+ 491,
1669
+ 393,
1670
+ 604
1671
+ ],
1672
+ "page_idx": 14
1673
+ },
1674
+ {
1675
+ "type": "image",
1676
+ "img_path": "images/2956ab1b88473948ddfaf32e46c5c965220ed3efdd8c23b58c8e953d28fd1c6c.jpg",
1677
+ "image_caption": [
1678
+ "(b) LINEMOD"
1679
+ ],
1680
+ "image_footnote": [],
1681
+ "bbox": [
1682
+ 395,
1683
+ 491,
1684
+ 602,
1685
+ 604
1686
+ ],
1687
+ "page_idx": 14
1688
+ },
1689
+ {
1690
+ "type": "image",
1691
+ "img_path": "images/96bf1062a8bd863d75b97bb4f6b9e553c19f3e154992384a436f52c95af022ad.jpg",
1692
+ "image_caption": [
1693
+ "(c) CO3D"
1694
+ ],
1695
+ "image_footnote": [],
1696
+ "bbox": [
1697
+ 604,
1698
+ 493,
1699
+ 813,
1700
+ 604
1701
+ ],
1702
+ "page_idx": 14
1703
+ },
1704
+ {
1705
+ "type": "text",
1706
+ "text": "itative results of 6D pose estimation for the unseen objects on LINEMOD. We draw the 3D object bounding boxes in blue and green, using the predicted 6D object pose and the ground truth, respectively. The promising results demonstrate the potential of our approach in terms of generalizable 6D object pose estimation.",
1707
+ "bbox": [
1708
+ 169,
1709
+ 709,
1710
+ 823,
1711
+ 766
1712
+ ],
1713
+ "page_idx": 14
1714
+ },
1715
+ {
1716
+ "type": "text",
1717
+ "text": "D MORE DETAIL ABOUT THE ABLATION STUDIES",
1718
+ "text_level": 1,
1719
+ "bbox": [
1720
+ 171,
1721
+ 790,
1722
+ 606,
1723
+ 806
1724
+ ],
1725
+ "page_idx": 14
1726
+ },
1727
+ {
1728
+ "type": "text",
1729
+ "text": "As we introduced in the main paper, we performed an ablation study, evaluating the robustness against the noise added to the 2D object bounding boxes. We simulate the bounding boxes in real-world applications by performing jittering to the ground truth with different levels of noise. We denote the object center and the size of the bounding box as $c$ and $s$ . We then randomly sample the perturbed parameters from the intervals $(c - 0.5 * n * s, c + 0.5 * n * s)$ and $(\\frac{s}{1 + n}, s * (1 + n))$ , respectively, where $n$ indicates the noise. We varied $n$ from 0.05 to 0.3 in our experiments. Please refer to Fig. 5(b) in our main paper for the experimental results.",
1730
+ "bbox": [
1731
+ 169,
1732
+ 825,
1733
+ 823,
1734
+ 925
1735
+ ],
1736
+ "page_idx": 14
1737
+ },
1738
+ {
1739
+ "type": "header",
1740
+ "text": "Published as a conference paper at ICLR 2024",
1741
+ "bbox": [
1742
+ 171,
1743
+ 32,
1744
+ 478,
1745
+ 47
1746
+ ],
1747
+ "page_idx": 14
1748
+ },
1749
+ {
1750
+ "type": "page_number",
1751
+ "text": "15",
1752
+ "bbox": [
1753
+ 490,
1754
+ 946,
1755
+ 508,
1756
+ 959
1757
+ ],
1758
+ "page_idx": 14
1759
+ },
1760
+ {
1761
+ "type": "image",
1762
+ "img_path": "images/1b12299dd5c687255b8ac69831eeeab20c171e5005c73d798cdba45403e04692.jpg",
1763
+ "image_caption": [
1764
+ "Figure 8: Qualitative results of 6D pose estimation for unseen objects on LINEMOD. The blue and green 3D object bounding boxes are drawn using the predicted 6D object pose and the ground truth, respectively."
1765
+ ],
1766
+ "image_footnote": [],
1767
+ "bbox": [
1768
+ 173,
1769
+ 101,
1770
+ 823,
1771
+ 180
1772
+ ],
1773
+ "page_idx": 15
1774
+ },
1775
+ {
1776
+ "type": "image",
1777
+ "img_path": "images/503e9c6f10d39165bf4b37f5b7371f8d978a34ab83d2a9d5e110a2460cdb6719.jpg",
1778
+ "image_caption": [
1779
+ "Figure 9: Verification scores of all sampled pose hypotheses. The x-axis and y-axis represent the geodesic distance between the pose samplings and the ground-truth relative object pose, and the verification scores, respectively."
1780
+ ],
1781
+ "image_footnote": [],
1782
+ "bbox": [
1783
+ 305,
1784
+ 250,
1785
+ 697,
1786
+ 494
1787
+ ],
1788
+ "page_idx": 15
1789
+ },
1790
+ {
1791
+ "type": "table",
1792
+ "img_path": "images/23254b4606d43c2dc9bbb1075328b3613646b26a3a818854b2698168d9167c9f.jpg",
1793
+ "table_caption": [],
1794
+ "table_footnote": [],
1795
+ "table_body": "<table><tr><td>Method</td><td>RelPose++</td><td>Ours</td><td>RelPose++-5000</td><td>Ours-5000</td></tr><tr><td>MACs</td><td>94.6</td><td>54.7</td><td>11.3</td><td>16.3</td></tr><tr><td>Angular Error</td><td>38.5</td><td>28.5</td><td>50.7</td><td>35.3</td></tr></table>",
1796
+ "bbox": [
1797
+ 256,
1798
+ 563,
1799
+ 740,
1800
+ 609
1801
+ ],
1802
+ "page_idx": 15
1803
+ },
1804
+ {
1805
+ "type": "text",
1806
+ "text": "Table 5: Efficiency. Relpose++ uses 500,000 pose samples by default, while we sample 50,000 poses for our method in our experiments. RelPose++-5000 and Ours-5000 denote RelPose++ and our method with 5,000 pose samples, respectively. The multiply-accumulate operations (MACs) are used to measure the computation consumption.",
1807
+ "bbox": [
1808
+ 169,
1809
+ 619,
1810
+ 823,
1811
+ 678
1812
+ ],
1813
+ "page_idx": 15
1814
+ },
1815
+ {
1816
+ "type": "text",
1817
+ "text": "E EFFICIENCY",
1818
+ "text_level": 1,
1819
+ "bbox": [
1820
+ 171,
1821
+ 707,
1822
+ 308,
1823
+ 722
1824
+ ],
1825
+ "page_idx": 15
1826
+ },
1827
+ {
1828
+ "type": "text",
1829
+ "text": "It is worth noting that during testing, our method utilizes 50,000 pose samples, while RelPose++ uses 500,000. Despite processing fewer samples, our method achieves better accuracy in relative object pose estimation. To further evaluate the efficiency, we measure the computation cost in multiply-accumulate operations (MACs) and show the results in Table 5. All evaluated methods process the pose samples in parallel. \"RelPose++-5000 and \"Ours-5000\" refer to RelPose++ and our method with 5,000 samples, respectively. The results clearly show that our method achieves a better tradeoff between efficiency and accuracy in relative object pose estimation. Additionally, our method with only 5,000 samples still delivers more accurate results than RelPose++ with 500,000 samples.",
1830
+ "bbox": [
1831
+ 169,
1832
+ 739,
1833
+ 826,
1834
+ 851
1835
+ ],
1836
+ "page_idx": 15
1837
+ },
1838
+ {
1839
+ "type": "header",
1840
+ "text": "Published as a conference paper at ICLR 2024",
1841
+ "bbox": [
1842
+ 171,
1843
+ 32,
1844
+ 478,
1845
+ 47
1846
+ ],
1847
+ "page_idx": 15
1848
+ },
1849
+ {
1850
+ "type": "page_number",
1851
+ "text": "16",
1852
+ "bbox": [
1853
+ 490,
1854
+ 946,
1855
+ 508,
1856
+ 959
1857
+ ],
1858
+ "page_idx": 15
1859
+ }
1860
+ ]