Add Batch 6430854c-41e2-4dfe-8750-b7b0f5419fce data
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +63 -0
- 2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/4e246949-740d-49da-9b11-c4e162e5444b_content_list.json +0 -0
- 2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/4e246949-740d-49da-9b11-c4e162e5444b_model.json +0 -0
- 2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/4e246949-740d-49da-9b11-c4e162e5444b_origin.pdf +3 -0
- 2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/full.md +0 -0
- 2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/images.zip +3 -0
- 2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/layout.json +0 -0
- 2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/0556a44d-0dc6-414e-954b-026617063a1b_content_list.json +1552 -0
- 2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/0556a44d-0dc6-414e-954b-026617063a1b_model.json +0 -0
- 2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/0556a44d-0dc6-414e-954b-026617063a1b_origin.pdf +3 -0
- 2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/full.md +287 -0
- 2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/images.zip +3 -0
- 2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/layout.json +0 -0
- 2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/3c396736-487a-4b9c-ac0f-42da9c476758_content_list.json +0 -0
- 2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/3c396736-487a-4b9c-ac0f-42da9c476758_model.json +0 -0
- 2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/3c396736-487a-4b9c-ac0f-42da9c476758_origin.pdf +3 -0
- 2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/full.md +529 -0
- 2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/images.zip +3 -0
- 2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/layout.json +0 -0
- 2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/c98e433f-25fa-4049-854e-e3fdde6ceb89_content_list.json +0 -0
- 2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/c98e433f-25fa-4049-854e-e3fdde6ceb89_model.json +0 -0
- 2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/c98e433f-25fa-4049-854e-e3fdde6ceb89_origin.pdf +3 -0
- 2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/full.md +0 -0
- 2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/images.zip +3 -0
- 2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/layout.json +0 -0
- 2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/3820601e-9eb2-4b22-a325-26893c36ef95_content_list.json +1459 -0
- 2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/3820601e-9eb2-4b22-a325-26893c36ef95_model.json +0 -0
- 2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/3820601e-9eb2-4b22-a325-26893c36ef95_origin.pdf +3 -0
- 2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/full.md +266 -0
- 2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/images.zip +3 -0
- 2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/layout.json +0 -0
- 2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/821ea593-4f97-43c8-a9eb-9a23f906d645_content_list.json +0 -0
- 2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/821ea593-4f97-43c8-a9eb-9a23f906d645_model.json +0 -0
- 2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/821ea593-4f97-43c8-a9eb-9a23f906d645_origin.pdf +3 -0
- 2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/full.md +0 -0
- 2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/images.zip +3 -0
- 2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/layout.json +0 -0
- 2025/$q$-exponential family for policy optimization/e136c89b-38c0-4066-b262-245af928b74a_content_list.json +0 -0
- 2025/$q$-exponential family for policy optimization/e136c89b-38c0-4066-b262-245af928b74a_model.json +0 -0
- 2025/$q$-exponential family for policy optimization/e136c89b-38c0-4066-b262-245af928b74a_origin.pdf +3 -0
- 2025/$q$-exponential family for policy optimization/full.md +658 -0
- 2025/$q$-exponential family for policy optimization/images.zip +3 -0
- 2025/$q$-exponential family for policy optimization/layout.json +0 -0
- 2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/03952676-7c33-4a19-867c-762469c73779_content_list.json +0 -0
- 2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/03952676-7c33-4a19-867c-762469c73779_model.json +0 -0
- 2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/03952676-7c33-4a19-867c-762469c73779_origin.pdf +3 -0
- 2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/full.md +0 -0
- 2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/images.zip +3 -0
- 2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/layout.json +0 -0
- 2025/3D StreetUnveiler with Semantic-aware 2DGS - a simple baseline/3f23c088-7d80-4cce-b138-008f4d7a0b93_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -271,3 +271,66 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 271 |
2025/WizardMath_[[:space:]]Empowering[[:space:]]Mathematical[[:space:]]Reasoning[[:space:]]for[[:space:]]Large[[:space:]]Language[[:space:]]Models[[:space:]]via[[:space:]]Reinforced[[:space:]]Evol-Instruct/ba5e0f91-ca36-4106-b383-678d1f720355_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 272 |
2025/Your[[:space:]]Mixture-of-Experts[[:space:]]LLM[[:space:]]Is[[:space:]]Secretly[[:space:]]an[[:space:]]Embedding[[:space:]]Model[[:space:]]for[[:space:]]Free/a344fd58-4205-4a4f-ad73-63b80c7526fd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 273 |
2025/miniCTX_[[:space:]]Neural[[:space:]]Theorem[[:space:]]Proving[[:space:]]with[[:space:]](Long-)Contexts/d1602efc-ff3d-4736-a7c2-22f295baf1e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 271 |
2025/WizardMath_[[:space:]]Empowering[[:space:]]Mathematical[[:space:]]Reasoning[[:space:]]for[[:space:]]Large[[:space:]]Language[[:space:]]Models[[:space:]]via[[:space:]]Reinforced[[:space:]]Evol-Instruct/ba5e0f91-ca36-4106-b383-678d1f720355_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 272 |
2025/Your[[:space:]]Mixture-of-Experts[[:space:]]LLM[[:space:]]Is[[:space:]]Secretly[[:space:]]an[[:space:]]Embedding[[:space:]]Model[[:space:]]for[[:space:]]Free/a344fd58-4205-4a4f-ad73-63b80c7526fd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 273 |
2025/miniCTX_[[:space:]]Neural[[:space:]]Theorem[[:space:]]Proving[[:space:]]with[[:space:]](Long-)Contexts/d1602efc-ff3d-4736-a7c2-22f295baf1e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 274 |
+
2025/$InterLCM$_[[:space:]]Low-Quality[[:space:]]Images[[:space:]]as[[:space:]]Intermediate[[:space:]]States[[:space:]]of[[:space:]]Latent[[:space:]]Consistency[[:space:]]Models[[:space:]]for[[:space:]]Effective[[:space:]]Blind[[:space:]]Face[[:space:]]Restoration/4e246949-740d-49da-9b11-c4e162e5444b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 275 |
+
2025/$_gamma-$MoD_[[:space:]]Exploring[[:space:]]Mixture-of-Depth[[:space:]]Adaptation[[:space:]]for[[:space:]]Multimodal[[:space:]]Large[[:space:]]Language[[:space:]]Models/0556a44d-0dc6-414e-954b-026617063a1b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 276 |
+
2025/$_mathbb{X}$-Sample[[:space:]]Contrastive[[:space:]]Loss_[[:space:]]Improving[[:space:]]Contrastive[[:space:]]Learning[[:space:]]with[[:space:]]Sample[[:space:]]Similarity[[:space:]]Graphs/3c396736-487a-4b9c-ac0f-42da9c476758_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 277 |
+
2025/$_phi$-Update_[[:space:]]A[[:space:]]Class[[:space:]]of[[:space:]]Policy[[:space:]]Update[[:space:]]Methods[[:space:]]with[[:space:]]Policy[[:space:]]Convergence[[:space:]]Guarantee/c98e433f-25fa-4049-854e-e3fdde6ceb89_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 278 |
+
2025/$_sigma$-zero_[[:space:]]Gradient-based[[:space:]]Optimization[[:space:]]of[[:space:]]$_ell_0$-norm[[:space:]]Adversarial[[:space:]]Examples/3820601e-9eb2-4b22-a325-26893c36ef95_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 279 |
+
2025/$_text{D}_{2}_text{O}$_[[:space:]]Dynamic[[:space:]]Discriminative[[:space:]]Operations[[:space:]]for[[:space:]]Efficient[[:space:]]Long-Context[[:space:]]Inference[[:space:]]of[[:space:]]Large[[:space:]]Language[[:space:]]Models/821ea593-4f97-43c8-a9eb-9a23f906d645_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 280 |
+
2025/$q$-exponential[[:space:]]family[[:space:]]for[[:space:]]policy[[:space:]]optimization/e136c89b-38c0-4066-b262-245af928b74a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 281 |
+
2025/(Mis)Fitting[[:space:]]Scaling[[:space:]]Laws_[[:space:]]A[[:space:]]Survey[[:space:]]of[[:space:]]Scaling[[:space:]]Law[[:space:]]Fitting[[:space:]]Techniques[[:space:]]in[[:space:]]Deep[[:space:]]Learning/03952676-7c33-4a19-867c-762469c73779_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 282 |
+
2025/3D[[:space:]]StreetUnveiler[[:space:]]with[[:space:]]Semantic-aware[[:space:]]2DGS[[:space:]]-[[:space:]]a[[:space:]]simple[[:space:]]baseline/3f23c088-7d80-4cce-b138-008f4d7a0b93_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 283 |
+
2025/3D[[:space:]]Vision-Language[[:space:]]Gaussian[[:space:]]Splatting/7fefc9a8-a83a-4a6e-b7a2-6451555cd541_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 284 |
+
2025/3D-AffordanceLLM_[[:space:]]Harnessing[[:space:]]Large[[:space:]]Language[[:space:]]Models[[:space:]]for[[:space:]]Open-Vocabulary[[:space:]]Affordance[[:space:]]Detection[[:space:]]in[[:space:]]3D[[:space:]]Worlds/f32a19e7-8ead-4470-8dcd-ee0e042554b2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 285 |
+
2025/3D-MolT5_[[:space:]]Leveraging[[:space:]]Discrete[[:space:]]Structural[[:space:]]Information[[:space:]]for[[:space:]]Molecule-Text[[:space:]]Modeling/7fe24d6b-0196-4bae-b5c6-92593a9ce526_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 286 |
+
2025/3D-Properties_[[:space:]]Identifying[[:space:]]Challenges[[:space:]]in[[:space:]]DPO[[:space:]]and[[:space:]]Charting[[:space:]]a[[:space:]]Path[[:space:]]Forward/8fedd307-9532-4e32-ad92-bd929756427e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 287 |
+
2025/3D-SPATIAL[[:space:]]MULTIMODAL[[:space:]]MEMORY/dfa8acf4-63ec-4e98-bb58-719ef5f72492_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 288 |
+
2025/3DGS-Drag_[[:space:]]Dragging[[:space:]]Gaussians[[:space:]]for[[:space:]]Intuitive[[:space:]]Point-Based[[:space:]]3D[[:space:]]Editing/3772ab48-3125-4695-a4cf-9b4e8a378d76_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 289 |
+
2025/3DMolFormer_[[:space:]]A[[:space:]]Dual-channel[[:space:]]Framework[[:space:]]for[[:space:]]Structure-based[[:space:]]Drug[[:space:]]Discovery/5336a4e7-b106-4ad1-a8ae-4707462c1dc6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 290 |
+
2025/3DTrajMaster_[[:space:]]Mastering[[:space:]]3D[[:space:]]Trajectory[[:space:]]for[[:space:]]Multi-Entity[[:space:]]Motion[[:space:]]in[[:space:]]Video[[:space:]]Generation/676ebc6a-2e43-43ba-bc5e-c1ea226e8932_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 291 |
+
2025/3DitScene_[[:space:]]Editing[[:space:]]Any[[:space:]]Scene[[:space:]]via[[:space:]]Language-guided[[:space:]]Disentangled[[:space:]]Gaussian[[:space:]]Splatting/5b6c955e-a799-4e63-99df-a2223da2aaec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 292 |
+
2025/6D[[:space:]]Object[[:space:]]Pose[[:space:]]Tracking[[:space:]]in[[:space:]]Internet[[:space:]]Videos[[:space:]]for[[:space:]]Robotic[[:space:]]Manipulation/51fb314b-a029-49ce-a4ca-1d4517796522_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 293 |
+
2025/6DGS_[[:space:]]Enhanced[[:space:]]Direction-Aware[[:space:]]Gaussian[[:space:]]Splatting[[:space:]]for[[:space:]]Volumetric[[:space:]]Rendering/0af60704-1ec8-4b95-96b8-897f27fe8b8b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 294 |
+
2025/A[[:space:]]Benchmark[[:space:]]for[[:space:]]Semantic[[:space:]]Sensitive[[:space:]]Information[[:space:]]in[[:space:]]LLMs[[:space:]]Outputs/0036e4ff-30bf-4a88-a93b-d81a17c3f7c0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 295 |
+
2025/A[[:space:]]Black[[:space:]]Swan[[:space:]]Hypothesis_[[:space:]]The[[:space:]]Role[[:space:]]of[[:space:]]Human[[:space:]]Irrationality[[:space:]]in[[:space:]]AI[[:space:]]Safety/be0a9caf-50e4-469b-b266-fc4dba31cd03_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 296 |
+
2025/A[[:space:]]Causal[[:space:]]Lens[[:space:]]for[[:space:]]Learning[[:space:]]Long-term[[:space:]]Fair[[:space:]]Policies/ed1df79f-6ee4-402f-ae06-3795367412cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 297 |
+
2025/A[[:space:]]Closer[[:space:]]Look[[:space:]]at[[:space:]]Machine[[:space:]]Unlearning[[:space:]]for[[:space:]]Large[[:space:]]Language[[:space:]]Models/f4c38dbf-4240-4b31-bbd3-230b9f20d741_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 298 |
+
2025/A[[:space:]]Coefficient[[:space:]]Makes[[:space:]]SVRG[[:space:]]Effective/b81fbe5e-0bd4-4815-87c7-e9d80b089dc1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 299 |
+
2025/A[[:space:]]Common[[:space:]]Pitfall[[:space:]]of[[:space:]]Margin-based[[:space:]]Language[[:space:]]Model[[:space:]]Alignment_[[:space:]]Gradient[[:space:]]Entanglement/6ae52b73-0995-42e9-a855-8e051e9d0223_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 300 |
+
2025/A[[:space:]]Conditional[[:space:]]Independence[[:space:]]Test[[:space:]]in[[:space:]]the[[:space:]]Presence[[:space:]]of[[:space:]]Discretization/9f1abbd4-b323-4886-bef4-c5eadda1e024_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 301 |
+
2025/A[[:space:]]Deep[[:space:]]Generative[[:space:]]Learning[[:space:]]Approach[[:space:]]for[[:space:]]Two-stage[[:space:]]Adaptive[[:space:]]Robust[[:space:]]Optimization/cc3312b4-8675-448b-93e9-e9160620ee17_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 302 |
+
2025/A[[:space:]]Differentiable[[:space:]]Rank-Based[[:space:]]Objective[[:space:]]for[[:space:]]Better[[:space:]]Feature[[:space:]]Learning/b7756356-f2d6-4b3c-8865-fcac7297aec8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 303 |
+
2025/A[[:space:]]Distributional[[:space:]]Approach[[:space:]]to[[:space:]]Uncertainty-Aware[[:space:]]Preference[[:space:]]Alignment[[:space:]]Using[[:space:]]Offline[[:space:]]Demonstrations/42d66f31-3d8d-47a8-b853-6462b0079aad_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 304 |
+
2025/A[[:space:]]Formal[[:space:]]Framework[[:space:]]for[[:space:]]Understanding[[:space:]]Length[[:space:]]Generalization[[:space:]]in[[:space:]]Transformers/d4b8cc61-50da-4c36-9381-d20485ea10b0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 305 |
+
2025/A[[:space:]]General[[:space:]]Framework[[:space:]]for[[:space:]]Off-Policy[[:space:]]Learning[[:space:]]with[[:space:]]Partially-Observed[[:space:]]Reward/cb5006a6-4c52-4cf2-a266-018ffc086b54_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 306 |
+
2025/A[[:space:]]General[[:space:]]Framework[[:space:]]for[[:space:]]Producing[[:space:]]Interpretable[[:space:]]Semantic[[:space:]]Text[[:space:]]Embeddings/bba75cd0-30a9-4efc-83f1-a541f7ca20c3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 307 |
+
2025/A[[:space:]]Generalist[[:space:]]Hanabi[[:space:]]Agent/935fb97a-af5a-4d92-a14a-22dca748fc90_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 308 |
+
2025/A[[:space:]]Generic[[:space:]]Framework[[:space:]]for[[:space:]]Conformal[[:space:]]Fairness/ff5cc7ae-23ab-4e12-9347-35db7511e688_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 309 |
+
2025/A[[:space:]]Graph[[:space:]]Enhanced[[:space:]]Symbolic[[:space:]]Discovery[[:space:]]Framework[[:space:]]For[[:space:]]Efficient[[:space:]]Logic[[:space:]]Optimization/78737269-398f-490a-b077-52cf2601f991_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 310 |
+
2025/A[[:space:]]Large-scale[[:space:]]Dataset[[:space:]]and[[:space:]]Benchmark[[:space:]]for[[:space:]]Commuting[[:space:]]Origin-Destination[[:space:]]Flow[[:space:]]Generation/ff4009a7-8c22-41bf-ad09-4b39c3583ea0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 311 |
+
2025/A[[:space:]]Large-scale[[:space:]]Training[[:space:]]Paradigm[[:space:]]for[[:space:]]Graph[[:space:]]Generative[[:space:]]Models/637fb6b1-512f-46f4-b85a-de3c79450342_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 312 |
+
2025/A[[:space:]]Little[[:space:]]Goes[[:space:]]a[[:space:]]Long[[:space:]]Way_[[:space:]]Efficient[[:space:]]Long[[:space:]]Context[[:space:]]Training[[:space:]]and[[:space:]]Inference[[:space:]]with[[:space:]]Partial[[:space:]]Contexts/f190f340-8238-4195-8600-ba55adcf8781_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 313 |
+
2025/A[[:space:]]Meta-Learning[[:space:]]Approach[[:space:]]to[[:space:]]Bayesian[[:space:]]Causal[[:space:]]Discovery/017767f2-aa9b-42d0-b0ba-27ae2f4cd47d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 314 |
+
2025/A[[:space:]]Multi-Power[[:space:]]Law[[:space:]]for[[:space:]]Loss[[:space:]]Curve[[:space:]]Prediction[[:space:]]Across[[:space:]]Learning[[:space:]]Rate[[:space:]]Schedules/efa64375-842a-42aa-8e9e-bc4aa20084b2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 315 |
+
2025/A[[:space:]]Multiscale[[:space:]]Frequency[[:space:]]Domain[[:space:]]Causal[[:space:]]Framework[[:space:]]for[[:space:]]Enhanced[[:space:]]Pathological[[:space:]]Analysis/0c6735e8-c3df-440f-a5ef-991ce662c3cf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 316 |
+
2025/A[[:space:]]New[[:space:]]Perspective[[:space:]]on[[:space:]]Shampoo's[[:space:]]Preconditioner/cdf355c0-1789-4581-83ee-7261f092d029_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 317 |
+
2025/A[[:space:]]Non-Contrastive[[:space:]]Learning[[:space:]]Framework[[:space:]]for[[:space:]]Sequential[[:space:]]Recommendation[[:space:]]with[[:space:]]Preference-Preserving[[:space:]]Profile[[:space:]]Generation/6db7d99a-fbd7-4a02-9c76-46a7e7c42dbe_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 318 |
+
2025/A[[:space:]]Percolation[[:space:]]Model[[:space:]]of[[:space:]]Emergence_[[:space:]]Analyzing[[:space:]]Transformers[[:space:]]Trained[[:space:]]on[[:space:]]a[[:space:]]Formal[[:space:]]Language/3e3cc47c-454c-4ce6-9b92-e3b900164186_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 319 |
+
2025/A[[:space:]]Policy-Gradient[[:space:]]Approach[[:space:]]to[[:space:]]Solving[[:space:]]Imperfect-Information[[:space:]]Games[[:space:]]with[[:space:]]Best-Iterate[[:space:]]Convergence/04b6ba8f-68e6-4e34-a9a6-73638b1a5c5d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 320 |
+
2025/A[[:space:]]Quantum[[:space:]]Circuit-Based[[:space:]]Compression[[:space:]]Perspective[[:space:]]for[[:space:]]Parameter-Efficient[[:space:]]Learning/1082e608-c4b8-4386-9502-3586e82e16ca_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 321 |
+
2025/A[[:space:]]Riemannian[[:space:]]Framework[[:space:]]for[[:space:]]Learning[[:space:]]Reduced-order[[:space:]]Lagrangian[[:space:]]Dynamics/e96e8907-259c-4cd4-af32-08753eef03e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 322 |
+
2025/A[[:space:]]Robust[[:space:]]Method[[:space:]]to[[:space:]]Discover[[:space:]]Causal[[:space:]]or[[:space:]]Anticausal[[:space:]]Relation/ecd6cc53-20c8-49c6-91e1-8f75ed2053d8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 323 |
+
2025/A[[:space:]]Sanity[[:space:]]Check[[:space:]]for[[:space:]]AI-generated[[:space:]]Image[[:space:]]Detection/9af7c0ec-37de-4bb0-a1b4-86e17aa00d8e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 324 |
+
2025/A[[:space:]]Simple[[:space:]]Approach[[:space:]]to[[:space:]]Unifying[[:space:]]Diffusion-based[[:space:]]Conditional[[:space:]]Generation/eead2418-0a76-411b-a390-833536728f7f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 325 |
+
2025/A[[:space:]]Simple[[:space:]]Framework[[:space:]]for[[:space:]]Open-Vocabulary[[:space:]]Zero-Shot[[:space:]]Segmentation/8fd7a220-6d0a-4e01-a68f-e502481d585d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 326 |
+
2025/A[[:space:]]Simple[[:space:]]yet[[:space:]]Effective[[:space:]]$_Delta_Delta[[:space:]]G$[[:space:]]Predictor[[:space:]]is[[:space:]]An[[:space:]]Unsupervised[[:space:]]Antibody[[:space:]]Optimizer[[:space:]]and[[:space:]]Explainer/26694a35-ba7a-44bb-9efb-1b3ba6c62a28_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 327 |
+
2025/A[[:space:]]Single[[:space:]]Goal[[:space:]]is[[:space:]]All[[:space:]]You[[:space:]]Need_[[:space:]]Skills[[:space:]]and[[:space:]]Exploration[[:space:]]Emerge[[:space:]]from[[:space:]]Contrastive[[:space:]]RL[[:space:]]without[[:space:]]Rewards,[[:space:]]Demonstrations,[[:space:]]or[[:space:]]Subgoals/0a2cfd1a-e5b1-4e06-83a7-e99da1ee3616_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 328 |
+
2025/A[[:space:]]Skewness-Based[[:space:]]Criterion[[:space:]]for[[:space:]]Addressing[[:space:]]Heteroscedastic[[:space:]]Noise[[:space:]]in[[:space:]]Causal[[:space:]]Discovery/64119344-1269-4692-aea0-4ddf5ba01f84_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 329 |
+
2025/A[[:space:]]Solvable[[:space:]]Attention[[:space:]]for[[:space:]]Neural[[:space:]]Scaling[[:space:]]Laws/75623645-3421-4173-b462-6c3905436d5a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 330 |
+
2025/A[[:space:]]Spark[[:space:]]of[[:space:]]Vision-Language[[:space:]]Intelligence_[[:space:]]2-Dimensional[[:space:]]Autoregressive[[:space:]]Transformer[[:space:]]for[[:space:]]Efficient[[:space:]]Finegrained[[:space:]]Image[[:space:]]Generation/4bdda7db-25f5-4a1d-a1dc-016da7a22112_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 331 |
+
2025/A[[:space:]]Statistical[[:space:]]Approach[[:space:]]for[[:space:]]Controlled[[:space:]]Training[[:space:]]Data[[:space:]]Detection/d9d4dd9b-0bb7-463e-b4f2-889aa19edc04_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 332 |
+
2025/A[[:space:]]Statistical[[:space:]]Framework[[:space:]]for[[:space:]]Ranking[[:space:]]LLM-based[[:space:]]Chatbots/75a949a9-7964-4937-818d-ded668736f51_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 333 |
+
2025/A[[:space:]]Stochastic[[:space:]]Approach[[:space:]]to[[:space:]]the[[:space:]]Subset[[:space:]]Selection[[:space:]]Problem[[:space:]]via[[:space:]]Mirror[[:space:]]Descent/d6886a4b-7f20-46f0-958b-0f60bb404628_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 334 |
+
2025/A[[:space:]]Theoretical[[:space:]]Analysis[[:space:]]of[[:space:]]Self-Supervised[[:space:]]Learning[[:space:]]for[[:space:]]Vision[[:space:]]Transformers/462128b6-95a5-41f1-925b-818461386644_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 335 |
+
2025/A[[:space:]]Theoretical[[:space:]]Framework[[:space:]]for[[:space:]]Partially-Observed[[:space:]]Reward[[:space:]]States[[:space:]]in[[:space:]]RLHF/b7667567-3e3d-462a-a083-b8c819d42495_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 336 |
+
2025/A[[:space:]]Theoretical[[:space:]]Perspective_[[:space:]]How[[:space:]]to[[:space:]]Prevent[[:space:]]Model[[:space:]]Collapse[[:space:]]in[[:space:]]Self-consuming[[:space:]]Training[[:space:]]Loops/6ed4b455-5a5f-4892-9cba-c9c5c6dfe524_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/4e246949-740d-49da-9b11-c4e162e5444b_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/4e246949-740d-49da-9b11-c4e162e5444b_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/4e246949-740d-49da-9b11-c4e162e5444b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24a3265175c7391d1ea948ac7e6b974e9631b0911349eca80f63118e69a7f61f
|
| 3 |
+
size 28753161
|
2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8dc3ca0c9fe65f572c2578061d154e04b2a141397cbc556a4ba61f570bceba5
|
| 3 |
+
size 4715784
|
2025/$InterLCM$_ Low-Quality Images as Intermediate States of Latent Consistency Models for Effective Blind Face Restoration/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/0556a44d-0dc6-414e-954b-026617063a1b_content_list.json
ADDED
|
@@ -0,0 +1,1552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "$\\gamma$ -MOD: EXPLORING MIXTURE-OF-DEPTH ADAPTATION FOR MULTIMODAL LARGE LANGUAGE MODELS",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
171,
|
| 8 |
+
99,
|
| 9 |
+
828,
|
| 10 |
+
148
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Yaxin Luo $^{1}$ , Gen Luo $^{2\\dagger}$ , Jiayi Ji $^{3,4}$ , Yiyi Zhou $^{3}$ , Xiaoshuai Sun $^{3}$ , Zhiqiang Shen $^{1}$ , Rongrong Ji $^{3}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
184,
|
| 19 |
+
174,
|
| 20 |
+
818,
|
| 21 |
+
191
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{1}$ MBZUAI $^{2}$ OpenGVLab, Shanghai AI Laboratory \n $^{3}$ Xiamen University $^{4}$ National University of Singapore",
|
| 28 |
+
"bbox": [
|
| 29 |
+
307,
|
| 30 |
+
200,
|
| 31 |
+
691,
|
| 32 |
+
233
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Project Page: Gamma-MOD",
|
| 39 |
+
"bbox": [
|
| 40 |
+
398,
|
| 41 |
+
241,
|
| 42 |
+
598,
|
| 43 |
+
257
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "ABSTRACT",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
450,
|
| 53 |
+
297,
|
| 54 |
+
547,
|
| 55 |
+
313
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Despite the significant progress in multimodal large language models (MLLMs), their high computational cost remains a barrier to real-world deployment. Inspired by the mixture of depths (MoDs) in natural language processing, we aim to address this limitation from the perspective of \"activated tokens\". Our key insight is that if most tokens are redundant for the layer computation, then can be skipped directly via the MoD layer. However, directly converting the dense layers of MLLMs to MoD layers leads to substantial performance degradation. To address this issue, we propose an innovative MoD adaptation strategy for existing MLLMs called $\\gamma$ -MoD. In $\\gamma$ -MoD, a novel metric is proposed to guide the deployment of MoDs in the MLLM, namely rank of attention maps (ARank). Through ARank, we can effectively identify which layer is redundant and should be replaced with the MoD layer. Based on ARank, we further propose two novel designs to maximize the computational sparsity of MLLM while maintaining its performance, namely shared vision-language router and masked routing learning. With these designs, more than $90\\%$ dense layers of the MLLM can be effectively converted to the MoD ones. To validate our method, we apply it to three popular MLLMs, and conduct extensive experiments on 9 benchmark datasets. Experimental results not only validate the significant efficiency benefit of $\\gamma$ -MoD to existing MLLMs but also confirm its generalization ability on various MLLMs. For example, with a minor performance drop, i.e., $-0.9\\%$ , $\\gamma$ -MoD can reduce the training and inference time of LLaVA-HR by $31.0\\%$ and $53.2\\%$ , respectively.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
228,
|
| 64 |
+
329,
|
| 65 |
+
769,
|
| 66 |
+
623
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "1 INTRODUCTION",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
173,
|
| 76 |
+
650,
|
| 77 |
+
336,
|
| 78 |
+
666
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Recent years have witnessed the great success of large language models (LLMs) in natural language processing (NLP) (Achiam et al., 2023; Touvron et al., 2023; Cai et al., 2024b), which attracts increasing attentions in extending LLMs to vision-language (VL) tasks. Despite the progress, recent multimodal large language models (MLLMs) (Liu et al., 2024d;c; Chen et al., 2024a; Alayrac et al., 2022) are often criticized by their expensive computational costs. For example, the inference speed of existing MLLMs like LLaVA-HR (Luo et al., 2024) is still far from practical requirements, e.g., 4.7 samples per second. Driven by the progress of NLP, recent advances have employed the mixture-of-experts (MoEs) (Lin et al., 2024a; Jiang et al., 2024) to MLLMs to reduce the \"activated parameters\", thus achieving trade-off between efficiency and performance.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
169,
|
| 87 |
+
681,
|
| 88 |
+
826,
|
| 89 |
+
809
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "Orthogonal to MoEs, we aim to tackle the efficiency bottleneck of MLLMs from the perspective of \"activated tokens\". As shown in Fig. 1 (a), a large number of tokens are less important in the computation, such as visual background and prepositional words. However, existing MoEs still allocate the same experts to all input tokens, leading to redundant computational costs. A promising solution to this issue is the recently proposed mixture-of-depths (MoDs) in NLP (Raposo et al., 2024), which equips each token with a router to determine whether a module should be computed.",
|
| 96 |
+
"bbox": [
|
| 97 |
+
169,
|
| 98 |
+
814,
|
| 99 |
+
828,
|
| 100 |
+
902
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "header",
|
| 106 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 107 |
+
"bbox": [
|
| 108 |
+
171,
|
| 109 |
+
32,
|
| 110 |
+
478,
|
| 111 |
+
47
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "page_footnote",
|
| 117 |
+
"text": "†Corresponding author.",
|
| 118 |
+
"bbox": [
|
| 119 |
+
199,
|
| 120 |
+
910,
|
| 121 |
+
341,
|
| 122 |
+
924
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "page_number",
|
| 128 |
+
"text": "1",
|
| 129 |
+
"bbox": [
|
| 130 |
+
493,
|
| 131 |
+
948,
|
| 132 |
+
503,
|
| 133 |
+
959
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "image",
|
| 139 |
+
"img_path": "images/f1ee1dc2ccdc41c0873b4e55fedd21de34d748a47a91de0c71650d99df1a9860.jpg",
|
| 140 |
+
"image_caption": [
|
| 141 |
+
"(a) Attention maps of different layers in LLaVA-HR",
|
| 142 |
+
"Figure 1: Visualization of attention maps in the MLLM and comparison of MoE with MoD. (a) Lower-rank layers often exhibit redundancy in their attention computation. (b) Different from MoE, MoD achieves the computational sparsity from the perspective of \"activated token\", where the computational budget is dynamically allocated to each token."
|
| 143 |
+
],
|
| 144 |
+
"image_footnote": [],
|
| 145 |
+
"bbox": [
|
| 146 |
+
174,
|
| 147 |
+
99,
|
| 148 |
+
496,
|
| 149 |
+
279
|
| 150 |
+
],
|
| 151 |
+
"page_idx": 1
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"type": "image",
|
| 155 |
+
"img_path": "images/b8ec55006bcb4e333be4f29f1122dd41d59c5a4242e9c6385ce69903e4ded3b7.jpg",
|
| 156 |
+
"image_caption": [
|
| 157 |
+
"(b) Comparison of MoE and MoD"
|
| 158 |
+
],
|
| 159 |
+
"image_footnote": [],
|
| 160 |
+
"bbox": [
|
| 161 |
+
500,
|
| 162 |
+
102,
|
| 163 |
+
818,
|
| 164 |
+
276
|
| 165 |
+
],
|
| 166 |
+
"page_idx": 1
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"text": "However, recent MoDs (Raposo et al., 2024) typically require pre-training LLMs from scratch, and their employment on MLLMs still remains under-explored.",
|
| 171 |
+
"bbox": [
|
| 172 |
+
169,
|
| 173 |
+
407,
|
| 174 |
+
823,
|
| 175 |
+
438
|
| 176 |
+
],
|
| 177 |
+
"page_idx": 1
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"text": "In this paper, we focus on the efficient adaptation of MoDs to existing MLLMs. In particular, our goal is to maximize the computational sparsity of MLLMs while maintaining competitive performance. However, directly converting all dense layers of MLLMs to MoD layers leads to significant performance degradation, e.g., $-33.3\\%$ of LLaVA-HR (Luo et al., 2024) on TextVQA (Singh et al., 2019). In practice, we observe that such issue is mainly caused by two aspects. Firstly, the deployment of MoDs lacks a practical guidance to measure the layer redundancy, thus undermining the necessary dense layers. As illustrated in Fig. 1 (a), attention patterns vary significantly across layers, and some layers exhibit less redundancy. Additionally, the setting of MLLMs, e.g., input modality, differs substantially from that of LLMs, making the direct adaptation of MoDs suboptimal.",
|
| 182 |
+
"bbox": [
|
| 183 |
+
169,
|
| 184 |
+
443,
|
| 185 |
+
826,
|
| 186 |
+
570
|
| 187 |
+
],
|
| 188 |
+
"page_idx": 1
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "text",
|
| 192 |
+
"text": "To overcome these limitations, we first propose a novel metric to guide the deployment of MoDs in MLLMs, called the rank of attention maps (ARank). Our key insight is that lower-rank attention maps indicate that fewer tokens are necessary for computation. As shown in Fig. 1 (a), most of tokens of Layer-4 are assigned small attention weights, contributing minimally to the final output. This provides a valuable hint for us to replace the redundant layer with the MoD one under the guidance of ARank. In practice, the calculation of ARank is both efficient and flexible. Empirically, we find that the average ARank always keeps the similar despite the change of samples. Therefore, randomly sampling a small amount of data can already accurately estimate the ARanks.",
|
| 193 |
+
"bbox": [
|
| 194 |
+
169,
|
| 195 |
+
575,
|
| 196 |
+
823,
|
| 197 |
+
688
|
| 198 |
+
],
|
| 199 |
+
"page_idx": 1
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"text": "Based on the ARank, we propose an innovative MoD adaptation strategy for existing MLLMs, called $\\gamma$ -MoD. Specifically, $\\gamma$ -MoD is a plug-and-play adaptation approach that can be seamlessly integrated into current MLLMs via instruction tuning. In $\\gamma$ -MoD, two novel designs are adopted to maximize its benefits to MLLMs, namely shared vision-language router and masked routing learning. The shared vision-language router performs routing on the entire multimodal sequence and uses a weight-sharing strategy to facilitate optimization. Then, masked routing learning is introduced to prevent critical tokens from being skipped during training, i.e., instruction tokens. With these designs, over $90\\%$ of dense layers can be converted to MoD layers with minimal performance sacrifice, resulting in even larger computational sparsity than the native MoD-based LLM (Raposo et al., 2024).",
|
| 204 |
+
"bbox": [
|
| 205 |
+
169,
|
| 206 |
+
694,
|
| 207 |
+
826,
|
| 208 |
+
820
|
| 209 |
+
],
|
| 210 |
+
"page_idx": 1
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "text",
|
| 214 |
+
"text": "To validate $\\gamma$ -MoD, we apply it to two popular MLLMs and conduct extensive experiments on 9 vision-language benchmarks. Experimental results show that $\\gamma$ -MoD significantly improves the training and inference efficiency of existing MLLMs while keeping their performance competitive. For example, $\\gamma$ -MoD reduces $51.6\\%$ Flops, $31\\%$ training time and $53.2\\%$ inference time for LLaVA-HR (Luo et al., 2024), but its average performance decline is only $-1.5\\%$ . More importantly, the great generalization ability of $\\gamma$ -MoD is also witnessed on different MLLM structures and parameter sizes. Overall, the contribution of the paper can be summarized in three folds:",
|
| 215 |
+
"bbox": [
|
| 216 |
+
169,
|
| 217 |
+
825,
|
| 218 |
+
828,
|
| 219 |
+
925
|
| 220 |
+
],
|
| 221 |
+
"page_idx": 1
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "header",
|
| 225 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 226 |
+
"bbox": [
|
| 227 |
+
173,
|
| 228 |
+
32,
|
| 229 |
+
478,
|
| 230 |
+
47
|
| 231 |
+
],
|
| 232 |
+
"page_idx": 1
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "page_number",
|
| 236 |
+
"text": "2",
|
| 237 |
+
"bbox": [
|
| 238 |
+
493,
|
| 239 |
+
948,
|
| 240 |
+
503,
|
| 241 |
+
959
|
| 242 |
+
],
|
| 243 |
+
"page_idx": 1
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"type": "list",
|
| 247 |
+
"sub_type": "text",
|
| 248 |
+
"list_items": [
|
| 249 |
+
"- We present a novel mixture-of-depth (MoD) framework for the sparse computation of existing MLLMs, namely $\\gamma$ -MoD, which can seamlessly convert most dense layers in MLLMs to the sparse MoD layers.",
|
| 250 |
+
"- We propose an innovative metric to measure the layer redundancy, namely rank of attention maps (ARank). With ARank, we can best determine that which dense layer should be convert to the MoD one.",
|
| 251 |
+
"- We carefully explore the design of $\\gamma$ -MoD in existing MLLMs, including the shared vision-language router and the masked routing learning, which can achieve up to $51.6\\%$ computational sparsity with minor performance sacrifice. Extensive experiments also confirm the generalization ability of $\\gamma$ -MoD."
|
| 252 |
+
],
|
| 253 |
+
"bbox": [
|
| 254 |
+
215,
|
| 255 |
+
103,
|
| 256 |
+
825,
|
| 257 |
+
260
|
| 258 |
+
],
|
| 259 |
+
"page_idx": 2
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"type": "text",
|
| 263 |
+
"text": "2 RELATED WORK",
|
| 264 |
+
"text_level": 1,
|
| 265 |
+
"bbox": [
|
| 266 |
+
171,
|
| 267 |
+
282,
|
| 268 |
+
346,
|
| 269 |
+
300
|
| 270 |
+
],
|
| 271 |
+
"page_idx": 2
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"type": "text",
|
| 275 |
+
"text": "2.1 MULTIMODAL LARGE LANGUAGE MODELS",
|
| 276 |
+
"text_level": 1,
|
| 277 |
+
"bbox": [
|
| 278 |
+
171,
|
| 279 |
+
316,
|
| 280 |
+
517,
|
| 281 |
+
332
|
| 282 |
+
],
|
| 283 |
+
"page_idx": 2
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"type": "text",
|
| 287 |
+
"text": "Large language models (LLMs) (Achiam et al., 2023; Touvron et al., 2023; Jiang et al., 2024; Almazrouei et al., 2023; Cai et al., 2024b; Abdin et al., 2024; Shen et al., 2023) have proven their strong capabilities in various natural language processing tasks (Paperno et al., 2016; Fyodorov et al., 2000; Reddy et al., 2019; Ziegler et al., 2019). Motivated by this, numerous efforts (Liu et al., 2024d; Bai et al., 2023a; Ye et al., 2023; Dai et al., 2023; Chen et al., 2024b; Li et al., 2024b; Tong et al., 2024; Rasheed et al., 2024; Dong et al., 2023; Xie et al., 2024; Zhou et al., 2024; Chen et al., 2023; Alayrac et al., 2022; Sun et al., 2024) have been devoted into extending LLMs to multimodal large language models (MLLMs). Among them, the most representative work is LLaVA (Liu et al., 2024d), which uses a lightweight project to connect a visual encoder and an LLM. This simple framework has now become the de-facto paradigm in the community, empowering a set of MLLMs like Mini-Gemini (Li et al., 2024b) and InternVL (Chen et al., 2024b). Recently, researchers have shifted their attention to high-resolution MLLMs. For example, LLaVA-NexT (Liu et al., 2024c) and InternVL-1.5 (Chen et al., 2024a) adopt the dynamic image slicing strategy for high-resolution adaptation. LLaVA-HR (Luo et al., 2024) further propose a dual-branch structure to reduce the cost of high-resolution MLLMs. Despite the effectiveness, existing high-resolution MLLMs (Liu et al., 2024c; Li et al., 2024a) will produce a much longer input tokens, resulting in prohibitively expensive computational costs. In this paper, the proposed $\\gamma$ -MoD can greatly overcome the efficiency bottleneck of existing MLLMs, which is significant for their practical applications.",
|
| 288 |
+
"bbox": [
|
| 289 |
+
169,
|
| 290 |
+
344,
|
| 291 |
+
826,
|
| 292 |
+
595
|
| 293 |
+
],
|
| 294 |
+
"page_idx": 2
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"type": "text",
|
| 298 |
+
"text": "2.2 SPARSE COMPUTATION FOR LLMS",
|
| 299 |
+
"text_level": 1,
|
| 300 |
+
"bbox": [
|
| 301 |
+
171,
|
| 302 |
+
614,
|
| 303 |
+
455,
|
| 304 |
+
630
|
| 305 |
+
],
|
| 306 |
+
"page_idx": 2
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"type": "text",
|
| 310 |
+
"text": "Recently, an influx of attentions have been focused on the sparse computation of LLMs. Specifically, the mixture of experts (MoEs) are the most popular technology in the community (McKinzie et al., 2024; Cai et al., 2024a; Xue et al., 2024), which dynamically activates part of expert networks for each token, thereby achieving trade-offs between capability and efficiency. For instance, MoE-LLaVA (Lin et al., 2024a) proposed a novel approach to convert a dense MLLM to a mixture-of-expert structure. However, these methods often require additional training costs to realize the adaptation to MLLMs. Orthogonal to MoE, Raposo et al. (2024) proposed the mixture of depths (MoDs) to dynamically allocate computations for each token. Compared to MoE, the main principle of MoD is to reduce the \"activated tokens\" instead of the \"activated parameters\". This paradigm has shown great potentials for the sparse computation of LLMs, but its potential on MLLM is still under exploration. Recently, token-based pruning methods have emerged as a new promising solution. The most representative one is the FastV (Chen et al., 2025), which directly deletes the unimportant visual tokens according to their attention scores, thus achieving significant computational savings without compromising performance. Orthogonal to these works, we are the first to explore MoDs on MLLMs, which can seamlessly realize sparse computations of exiting MLLMs on both visual and textual tokens.",
|
| 311 |
+
"bbox": [
|
| 312 |
+
169,
|
| 313 |
+
642,
|
| 314 |
+
828,
|
| 315 |
+
852
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 2
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "text",
|
| 321 |
+
"text": "3 PRELIMINARIES",
|
| 322 |
+
"text_level": 1,
|
| 323 |
+
"bbox": [
|
| 324 |
+
171,
|
| 325 |
+
875,
|
| 326 |
+
339,
|
| 327 |
+
891
|
| 328 |
+
],
|
| 329 |
+
"page_idx": 2
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"type": "text",
|
| 333 |
+
"text": "We first recap the mechanism of Mixture of Experts (MoEs) and Mixture of Depths (MoDs).",
|
| 334 |
+
"bbox": [
|
| 335 |
+
169,
|
| 336 |
+
907,
|
| 337 |
+
774,
|
| 338 |
+
925
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 2
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "header",
|
| 344 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 345 |
+
"bbox": [
|
| 346 |
+
171,
|
| 347 |
+
32,
|
| 348 |
+
478,
|
| 349 |
+
47
|
| 350 |
+
],
|
| 351 |
+
"page_idx": 2
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"type": "page_number",
|
| 355 |
+
"text": "3",
|
| 356 |
+
"bbox": [
|
| 357 |
+
493,
|
| 358 |
+
948,
|
| 359 |
+
504,
|
| 360 |
+
959
|
| 361 |
+
],
|
| 362 |
+
"page_idx": 2
|
| 363 |
+
},
|
| 364 |
+
{
|
| 365 |
+
"type": "image",
|
| 366 |
+
"img_path": "images/dbc42071578530760961e95876f28195b547ca483cb309ca6996bab02f27db8d.jpg",
|
| 367 |
+
"image_caption": [
|
| 368 |
+
"Stage-1: Vision-Language Alignment",
|
| 369 |
+
"Stage-2: Instruction Tuning",
|
| 370 |
+
"Figure 2: Illustration of our $\\gamma$ -MoD adaptation on LLaVA-HR. $\\gamma$ -MoD is a plug-and-play approach that can be directly applied in existing MLLMs. After vision-language alignment, $\\gamma$ -MoD can replace most redundant layers with MoD ones via the rank-based redundancy estimation."
|
| 371 |
+
],
|
| 372 |
+
"image_footnote": [],
|
| 373 |
+
"bbox": [
|
| 374 |
+
181,
|
| 375 |
+
102,
|
| 376 |
+
823,
|
| 377 |
+
335
|
| 378 |
+
],
|
| 379 |
+
"page_idx": 3
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"text": "Mixture of experts. In particular, the main principle of MoE is to reduce the \"activated parameters\" in dense models. Existing MoE-based LLMs (Dai et al., 2024; Liu et al., 2024a; Lin et al., 2024a; Jiang et al., 2024) and MLLMs (Luo et al., 2024; Chen et al., 2024a; Liu et al., 2024d) often contain multiple FFN modules in their layers, also termed experts. During training and inference, only few experts are activated to participate in computations, thus retaining the trade-offs between performance and efficiency. Given input features $x \\in \\mathbb{R}^{l \\times d}$ , MoE mechanism can be defined by",
|
| 384 |
+
"bbox": [
|
| 385 |
+
169,
|
| 386 |
+
417,
|
| 387 |
+
826,
|
| 388 |
+
502
|
| 389 |
+
],
|
| 390 |
+
"page_idx": 3
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "equation",
|
| 394 |
+
"text": "\n$$\nx = x + \\sum_ {j = 1} ^ {k} \\mathcal {D} _ {j} (x) R _ {j} (x). \\tag {1}\n$$\n",
|
| 395 |
+
"text_format": "latex",
|
| 396 |
+
"bbox": [
|
| 397 |
+
398,
|
| 398 |
+
511,
|
| 399 |
+
825,
|
| 400 |
+
554
|
| 401 |
+
],
|
| 402 |
+
"page_idx": 3
|
| 403 |
+
},
|
| 404 |
+
{
|
| 405 |
+
"type": "text",
|
| 406 |
+
"text": "Here, $\\mathcal{D}(\\cdot)$ denotes the expert layer, i.e., FFN. $k$ is the number of activated experts, and $R_{j}(\\cdot)$ is the corresponding routing function. In practice, top-k experts are selected according to their routing scores, where $k$ is much smaller than the total number of experts $K$ .",
|
| 407 |
+
"bbox": [
|
| 408 |
+
169,
|
| 409 |
+
561,
|
| 410 |
+
823,
|
| 411 |
+
604
|
| 412 |
+
],
|
| 413 |
+
"page_idx": 3
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"type": "text",
|
| 417 |
+
"text": "Mixture of depths. Different from MoEs, MoDs aim to improve the model efficiency via the reduction of \"activated tokens\". Compared to MoEs, the routing mechanism of MoDs performs on input tokens, and most tokens will directly skip the dense layer in MLLMs. Thus, MoDs can be written as",
|
| 418 |
+
"bbox": [
|
| 419 |
+
169,
|
| 420 |
+
611,
|
| 421 |
+
823,
|
| 422 |
+
666
|
| 423 |
+
],
|
| 424 |
+
"page_idx": 3
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"type": "equation",
|
| 428 |
+
"text": "\n$$\nx _ {j} = \\left\\{ \\begin{array}{l l} x _ {j} + \\mathcal {D} \\left(x _ {j}\\right) R \\left(x _ {j}\\right) & \\text {i f} R \\left(x _ {j}\\right) \\geq \\delta_ {s}, \\\\ x _ {j} & \\text {i f} R \\left(x _ {j}\\right) < \\delta_ {s}, \\end{array} \\right. \\tag {2}\n$$\n",
|
| 429 |
+
"text_format": "latex",
|
| 430 |
+
"bbox": [
|
| 431 |
+
351,
|
| 432 |
+
667,
|
| 433 |
+
825,
|
| 434 |
+
700
|
| 435 |
+
],
|
| 436 |
+
"page_idx": 3
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"type": "text",
|
| 440 |
+
"text": "where $x_{j} \\in \\mathbb{R}^{d}$ denotes the token vector in $x$ , and $\\delta_{s}$ is a routing threshold. As defined in Eq. 2, inactive tokens will directly skip the layer $\\mathcal{D}(\\cdot)$ to save the computational cost.",
|
| 441 |
+
"bbox": [
|
| 442 |
+
169,
|
| 443 |
+
708,
|
| 444 |
+
826,
|
| 445 |
+
738
|
| 446 |
+
],
|
| 447 |
+
"page_idx": 3
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "text",
|
| 451 |
+
"text": "Discussion. In existing MLLMs (Lin et al., 2024a), MoE is typically used to efficiently scale up the model size, while its computations are not directly reduced. In contrast, MoD can perform as a plug-and-play module to save the cost of a common dense layer, which is more significant to the efficient scenario. Unfortunately, the adaptation of MoD to existing MLLMs is still under-explored, and its practical use in LLMs also requires expensive pretraining.",
|
| 452 |
+
"bbox": [
|
| 453 |
+
169,
|
| 454 |
+
744,
|
| 455 |
+
826,
|
| 456 |
+
814
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 3
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"text": "4 METHOD",
|
| 463 |
+
"text_level": 1,
|
| 464 |
+
"bbox": [
|
| 465 |
+
171,
|
| 466 |
+
835,
|
| 467 |
+
282,
|
| 468 |
+
849
|
| 469 |
+
],
|
| 470 |
+
"page_idx": 3
|
| 471 |
+
},
|
| 472 |
+
{
|
| 473 |
+
"type": "text",
|
| 474 |
+
"text": "4.1 OVERVIEW",
|
| 475 |
+
"text_level": 1,
|
| 476 |
+
"bbox": [
|
| 477 |
+
171,
|
| 478 |
+
869,
|
| 479 |
+
290,
|
| 480 |
+
881
|
| 481 |
+
],
|
| 482 |
+
"page_idx": 3
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "text",
|
| 486 |
+
"text": "In this paper, we propose a novel method to efficiently deploy MoDs to existing MLLMs, namely $\\gamma$ -MoD. The core principle of $\\gamma$ -MoD is to identify redundant MLLM layers via a novel metric called",
|
| 487 |
+
"bbox": [
|
| 488 |
+
169,
|
| 489 |
+
895,
|
| 490 |
+
823,
|
| 491 |
+
925
|
| 492 |
+
],
|
| 493 |
+
"page_idx": 3
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"type": "header",
|
| 497 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 498 |
+
"bbox": [
|
| 499 |
+
171,
|
| 500 |
+
32,
|
| 501 |
+
478,
|
| 502 |
+
47
|
| 503 |
+
],
|
| 504 |
+
"page_idx": 3
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"type": "page_number",
|
| 508 |
+
"text": "4",
|
| 509 |
+
"bbox": [
|
| 510 |
+
493,
|
| 511 |
+
948,
|
| 512 |
+
503,
|
| 513 |
+
959
|
| 514 |
+
],
|
| 515 |
+
"page_idx": 3
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"type": "image",
|
| 519 |
+
"img_path": "images/cb1d7a508bcf184e0a2c4365a5bb9f819295ba724e67b66ac07b91b43b286758.jpg",
|
| 520 |
+
"image_caption": [
|
| 521 |
+
"Figure 3: Visualization of ARank based on different tasks (left) and sample sizes (right). The horizontal axis represents the layer index of LLaVA-HR. The darker color indicates the larger ARank."
|
| 522 |
+
],
|
| 523 |
+
"image_footnote": [],
|
| 524 |
+
"bbox": [
|
| 525 |
+
174,
|
| 526 |
+
102,
|
| 527 |
+
514,
|
| 528 |
+
196
|
| 529 |
+
],
|
| 530 |
+
"page_idx": 4
|
| 531 |
+
},
|
| 532 |
+
{
|
| 533 |
+
"type": "image",
|
| 534 |
+
"img_path": "images/bfdf8f3c59b20f001ddd2af9d262778cedd121fc29b5cac7d2ee97b037e7d5c8.jpg",
|
| 535 |
+
"image_caption": [],
|
| 536 |
+
"image_footnote": [],
|
| 537 |
+
"bbox": [
|
| 538 |
+
514,
|
| 539 |
+
102,
|
| 540 |
+
821,
|
| 541 |
+
196
|
| 542 |
+
],
|
| 543 |
+
"page_idx": 4
|
| 544 |
+
},
|
| 545 |
+
{
|
| 546 |
+
"type": "text",
|
| 547 |
+
"text": "rank of attention maps (ARank) and replace them with the proposed MoD layer. Therefore, the deployment of $\\gamma$ -MoD in the given MLLM, i.e., $\\mathcal{F}_{\\mathrm{MLLM}}(\\cdot)$ , can be formulated by",
|
| 548 |
+
"bbox": [
|
| 549 |
+
169,
|
| 550 |
+
241,
|
| 551 |
+
823,
|
| 552 |
+
271
|
| 553 |
+
],
|
| 554 |
+
"page_idx": 4
|
| 555 |
+
},
|
| 556 |
+
{
|
| 557 |
+
"type": "equation",
|
| 558 |
+
"text": "\n$$\n\\mathcal {F} _ {\\mathrm {M L L M}} = \\mathcal {G} _ {0} \\circ \\mathcal {G} _ {1} \\circ \\mathcal {G} _ {2} \\dots \\circ \\mathcal {G} _ {n},\n$$\n",
|
| 559 |
+
"text_format": "latex",
|
| 560 |
+
"bbox": [
|
| 561 |
+
392,
|
| 562 |
+
277,
|
| 563 |
+
601,
|
| 564 |
+
292
|
| 565 |
+
],
|
| 566 |
+
"page_idx": 4
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"text": "where $\\mathcal{G}_i = \\left\\{ \\begin{array}{ll}\\mathcal{D}_i & \\text{if}\\tau (\\mathcal{D}_i)\\geq \\delta_\\tau ,\\\\ \\mathcal{S}_i & \\text{if}\\tau (\\mathcal{D}_i) < \\delta_\\tau . \\end{array} \\right.$ (3)",
|
| 571 |
+
"bbox": [
|
| 572 |
+
367,
|
| 573 |
+
296,
|
| 574 |
+
825,
|
| 575 |
+
330
|
| 576 |
+
],
|
| 577 |
+
"page_idx": 4
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "text",
|
| 581 |
+
"text": "Here, $\\mathcal{G}(\\cdot)$ denotes the layer of the MLLM, where $S(\\cdot)$ and $\\mathcal{D}(\\cdot)$ indicate the dense layer and its MoD alternative, respectively. $\\tau (\\cdot)$ is a function to estimate the redundancy of the given dense layer $\\mathcal{D}_i$ and $\\delta_{\\tau}$ is a threshold. Given the architecture in Eq. 3, $\\gamma$ -MoD aims to maximize the sparsity while maintaining the performance. Thus, the optimization objective of $\\gamma$ -MoD can be written as:",
|
| 582 |
+
"bbox": [
|
| 583 |
+
169,
|
| 584 |
+
338,
|
| 585 |
+
826,
|
| 586 |
+
396
|
| 587 |
+
],
|
| 588 |
+
"page_idx": 4
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "equation",
|
| 592 |
+
"text": "\n$$\n\\arg \\min _ {\\theta , \\theta_ {r}} \\mathcal {L} _ {o b j} \\left(\\mathcal {F} _ {\\mathrm {M L L M}} \\left(x ^ {0}; \\theta\\right)\\right) + \\sum_ {i = 1} ^ {k} \\mathcal {L} _ {a u g} \\left(R \\left(x ^ {i}; \\theta_ {r}\\right)\\right),\n$$\n",
|
| 593 |
+
"text_format": "latex",
|
| 594 |
+
"bbox": [
|
| 595 |
+
320,
|
| 596 |
+
402,
|
| 597 |
+
676,
|
| 598 |
+
441
|
| 599 |
+
],
|
| 600 |
+
"page_idx": 4
|
| 601 |
+
},
|
| 602 |
+
{
|
| 603 |
+
"type": "text",
|
| 604 |
+
"text": "s.t. $\\frac{1}{k\\cdot d}\\sum_{i = 1}^{k}\\sum_{j = 1}^{d}\\mathbb{I}_{R(x_j^i) < \\delta_s} = \\alpha .$ (4)",
|
| 605 |
+
"bbox": [
|
| 606 |
+
320,
|
| 607 |
+
440,
|
| 608 |
+
823,
|
| 609 |
+
487
|
| 610 |
+
],
|
| 611 |
+
"page_idx": 4
|
| 612 |
+
},
|
| 613 |
+
{
|
| 614 |
+
"type": "text",
|
| 615 |
+
"text": "Here, $\\mathcal{L}_{obj}$ and $\\mathcal{L}_{aug}$ denote the auto-regressive loss and the routing loss for the router $R(\\cdot)$ , respectively. $x^{i}$ is the input tokens of $i$ -th layer, and $\\alpha$ is the pre-defined sparse target. $\\mathbb{I}_{R(x_{j}^{i}) < \\delta_{s}} \\to \\{0,1\\}$ is the indicator function, which is equal to 1 when $R(x_{j}^{i}) < \\delta_{s}$ . And $k$ is the number of layers, $d$ denotes the number of tokens per layer.",
|
| 616 |
+
"bbox": [
|
| 617 |
+
169,
|
| 618 |
+
494,
|
| 619 |
+
826,
|
| 620 |
+
559
|
| 621 |
+
],
|
| 622 |
+
"page_idx": 4
|
| 623 |
+
},
|
| 624 |
+
{
|
| 625 |
+
"type": "text",
|
| 626 |
+
"text": "4.2 RANK-BASED REDUNDANCY ESTIMATION",
|
| 627 |
+
"text_level": 1,
|
| 628 |
+
"bbox": [
|
| 629 |
+
171,
|
| 630 |
+
574,
|
| 631 |
+
508,
|
| 632 |
+
588
|
| 633 |
+
],
|
| 634 |
+
"page_idx": 4
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "text",
|
| 638 |
+
"text": "The key challenge of $\\gamma$ -MoD is how to identify the dense layer that should be converted to the MoD one. The original MoD-based LLM (Raposo et al., 2024) overcomes this issue by the hand-craft attempt, which is still sub-optimal and time-consuming. However, in existing MLLMs, the LLM is already pre-trained on large scale of corpus, which can intuitively provide sufficient knowledge to achieve the process automatically.",
|
| 639 |
+
"bbox": [
|
| 640 |
+
169,
|
| 641 |
+
599,
|
| 642 |
+
823,
|
| 643 |
+
672
|
| 644 |
+
],
|
| 645 |
+
"page_idx": 4
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"text": "Motivated by this, we propose an innovative metric to estimate the token-wise redundancy of a layer in MLLM, namely rank of attention maps (ARank). In particular, given tokens $x^{i} \\in \\mathbb{R}^{l \\times d}$ of $i$ -th layer, ARank is defined by the average rank of attention maps:",
|
| 650 |
+
"bbox": [
|
| 651 |
+
169,
|
| 652 |
+
676,
|
| 653 |
+
826,
|
| 654 |
+
720
|
| 655 |
+
],
|
| 656 |
+
"page_idx": 4
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "equation",
|
| 660 |
+
"text": "\n$$\n\\tau \\left(x ^ {i}, \\mathcal {D} _ {i}\\right) = \\frac {1}{n _ {h}} \\sum_ {h = 1} ^ {n _ {h}} \\operatorname {r a n k} \\left(A _ {h}\\right), \\tag {5}\n$$\n",
|
| 661 |
+
"text_format": "latex",
|
| 662 |
+
"bbox": [
|
| 663 |
+
383,
|
| 664 |
+
726,
|
| 665 |
+
825,
|
| 666 |
+
766
|
| 667 |
+
],
|
| 668 |
+
"page_idx": 4
|
| 669 |
+
},
|
| 670 |
+
{
|
| 671 |
+
"type": "text",
|
| 672 |
+
"text": "where $A_{h} = (x^{i}W_{Q}^{h})(x^{i}W_{K}^{h})^{T}$",
|
| 673 |
+
"bbox": [
|
| 674 |
+
383,
|
| 675 |
+
768,
|
| 676 |
+
611,
|
| 677 |
+
787
|
| 678 |
+
],
|
| 679 |
+
"page_idx": 4
|
| 680 |
+
},
|
| 681 |
+
{
|
| 682 |
+
"type": "text",
|
| 683 |
+
"text": "Here, $\\mathrm{rank}(\\cdot)$ denotes the rank calculation. $n_h$ is the number of attention heads. $A_h \\in \\mathbb{R}^{l \\times l}$ is the attention map in $h$ -th head, and $W_Q^h \\in \\mathbb{R}^{d \\times \\frac{d}{h}}$ and $W_K^h \\in \\mathbb{R}^{d \\times \\frac{d}{h}}$ are the corresponding weights.",
|
| 684 |
+
"bbox": [
|
| 685 |
+
169,
|
| 686 |
+
794,
|
| 687 |
+
823,
|
| 688 |
+
829
|
| 689 |
+
],
|
| 690 |
+
"page_idx": 4
|
| 691 |
+
},
|
| 692 |
+
{
|
| 693 |
+
"type": "text",
|
| 694 |
+
"text": "Theoretical analysis of ARank. In Eq. 5, attention map $A_{h}$ can well reflect the contribution of different tokens. Thus, $A_{h}$ with a low rank suggests that most tokens are less informative. To validate this, we conduct a SVD (G.H.Goulb & C.Reinsch, 1971) analysis for $A_{h}$ , which is written as",
|
| 695 |
+
"bbox": [
|
| 696 |
+
169,
|
| 697 |
+
834,
|
| 698 |
+
825,
|
| 699 |
+
877
|
| 700 |
+
],
|
| 701 |
+
"page_idx": 4
|
| 702 |
+
},
|
| 703 |
+
{
|
| 704 |
+
"type": "equation",
|
| 705 |
+
"text": "\n$$\nA _ {h} = \\sum_ {i = 1} ^ {r} \\sigma_ {i} u _ {i} v _ {i} ^ {T} = \\sum_ {i = 1} ^ {r ^ {\\prime}} \\sigma_ {i} u _ {i} v _ {i} ^ {T} + \\sum_ {i = r ^ {\\prime} + 1} ^ {r} \\sigma_ {i} u _ {i} v _ {i} ^ {T}, \\tag {6}\n$$\n",
|
| 706 |
+
"text_format": "latex",
|
| 707 |
+
"bbox": [
|
| 708 |
+
320,
|
| 709 |
+
883,
|
| 710 |
+
825,
|
| 711 |
+
926
|
| 712 |
+
],
|
| 713 |
+
"page_idx": 4
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "header",
|
| 717 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 718 |
+
"bbox": [
|
| 719 |
+
171,
|
| 720 |
+
32,
|
| 721 |
+
478,
|
| 722 |
+
47
|
| 723 |
+
],
|
| 724 |
+
"page_idx": 4
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "page_number",
|
| 728 |
+
"text": "5",
|
| 729 |
+
"bbox": [
|
| 730 |
+
493,
|
| 731 |
+
946,
|
| 732 |
+
503,
|
| 733 |
+
959
|
| 734 |
+
],
|
| 735 |
+
"page_idx": 4
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "text",
|
| 739 |
+
"text": "where $r$ is the rank of $A_{h}$ and $r' \\ll r$ is a constant value. $\\sigma_{i}, u_{i}$ and $v_{i}$ denote the $i$ -th single value, left single vector and right single vector of $A_{h}$ , respectively. As shown in Eq. 6, $A_{h}$ can be deposed to a matrix of rank $r'$ and additional information, i.e., $\\sum_{i=r'+1}^{r} \\sigma_{i} u_{i} v_{i}^{T}$ . Therefore, lower-rank attention map suggests higher redundancy, which implies that MoD can be deployed to skip most tokens.",
|
| 740 |
+
"bbox": [
|
| 741 |
+
169,
|
| 742 |
+
103,
|
| 743 |
+
826,
|
| 744 |
+
161
|
| 745 |
+
],
|
| 746 |
+
"page_idx": 5
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "text",
|
| 750 |
+
"text": "Practical calculation of ARank. As defined in Eq. 5, it is still challenging to accurately calculate the ARank due to the variance of individual samples. Inspired by HRank (Lin et al., 2020), we estimate ARank using its expectation on a batch of samples. Different from HRank, we aim to estimate the layer redundancy by the rank of their attention maps, thus guiding the deployment of MoD. Specifically, ARank estimates layer redundancy based on the rank of attention maps, enabling its use in guiding the deployment of MoD. As shown in Fig. 3, we visualize the average ARank values of LLaVA-HR (Luo et al., 2024) across different input samples. These results demonstrate that the expected ARank remains largely consistent across tasks, indicating that a small batch size is sufficient for reliable computation. In our experiments, we set the sample size to 50 to balance computational efficiency and accuracy.",
|
| 751 |
+
"bbox": [
|
| 752 |
+
169,
|
| 753 |
+
166,
|
| 754 |
+
826,
|
| 755 |
+
308
|
| 756 |
+
],
|
| 757 |
+
"page_idx": 5
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "text",
|
| 761 |
+
"text": "4.3 MIXTURE-OF-DEPTH ADAPTATION",
|
| 762 |
+
"text_level": 1,
|
| 763 |
+
"bbox": [
|
| 764 |
+
171,
|
| 765 |
+
323,
|
| 766 |
+
459,
|
| 767 |
+
337
|
| 768 |
+
],
|
| 769 |
+
"page_idx": 5
|
| 770 |
+
},
|
| 771 |
+
{
|
| 772 |
+
"type": "text",
|
| 773 |
+
"text": "To maximize the effectiveness of MoDs to existing MLLMs, we carefully investigate the micro design of MoDs, including the shared vision-language router and the masked routing learning.",
|
| 774 |
+
"bbox": [
|
| 775 |
+
169,
|
| 776 |
+
349,
|
| 777 |
+
823,
|
| 778 |
+
378
|
| 779 |
+
],
|
| 780 |
+
"page_idx": 5
|
| 781 |
+
},
|
| 782 |
+
{
|
| 783 |
+
"type": "text",
|
| 784 |
+
"text": "Shared vision-language router. Conventional MoDs (Raposo et al., 2024) are designed for LLMs, so their routing is only performed on textual tokens. In MLLMs, such a strategy is sub-optimal due to the large redundancy of visual tokens (Jin et al., 2024; Kim et al., 2024). Therefore, the router of $\\gamma$ -MoD, i.e., $R(\\cdot)$ , aims to skip both visual and textual tokens, which is defined by",
|
| 785 |
+
"bbox": [
|
| 786 |
+
169,
|
| 787 |
+
383,
|
| 788 |
+
826,
|
| 789 |
+
441
|
| 790 |
+
],
|
| 791 |
+
"page_idx": 5
|
| 792 |
+
},
|
| 793 |
+
{
|
| 794 |
+
"type": "equation",
|
| 795 |
+
"text": "\n$$\nR (x) = \\operatorname {s o f t m a x} \\left(x W _ {R} + b _ {R}\\right), \\tag {7}\n$$\n",
|
| 796 |
+
"text_format": "latex",
|
| 797 |
+
"bbox": [
|
| 798 |
+
395,
|
| 799 |
+
448,
|
| 800 |
+
825,
|
| 801 |
+
465
|
| 802 |
+
],
|
| 803 |
+
"page_idx": 5
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "text",
|
| 807 |
+
"text": "where $x = \\{q, a, t\\}$ denotes the vision-language tokens, which consist of question tokens $q \\in \\mathbb{R}^{l_q \\times d}$ , image tokens $a \\in \\mathbb{R}^{l_a \\times d}$ and textual response tokens $t \\in \\mathbb{R}^{l_t \\times d}$ . $W_R \\in \\mathbb{R}^{l \\times 2}$ and $b_R \\in \\mathbb{R}^2$ are the weights and bias, respectively. Notably, we use a binary softmax function to produce the routing probability, where $R(x)^0$ denotes the probability of skipping. Based on Eq. 7, we further share the router parameters for all MoD layers, which is significant for the stable optimization. To explain, the shared router receives more gradients from different layers, greatly facilitating its convergence at the beginning of training.",
|
| 808 |
+
"bbox": [
|
| 809 |
+
169,
|
| 810 |
+
472,
|
| 811 |
+
823,
|
| 812 |
+
571
|
| 813 |
+
],
|
| 814 |
+
"page_idx": 5
|
| 815 |
+
},
|
| 816 |
+
{
|
| 817 |
+
"type": "text",
|
| 818 |
+
"text": "Masked routing learning. During VL training, not all tokens contribute equally to the optimizing process. In particular, the skip of key tokens in the question, e.g., subject, will greatly hurt the generative training as the answer relies on these conditional elements. Therefore, we introduce a masked routing learning strategy to prevent these tokens from being dropped during training. In this case, the objective of the routing learning can be defined by",
|
| 819 |
+
"bbox": [
|
| 820 |
+
169,
|
| 821 |
+
578,
|
| 822 |
+
823,
|
| 823 |
+
648
|
| 824 |
+
],
|
| 825 |
+
"page_idx": 5
|
| 826 |
+
},
|
| 827 |
+
{
|
| 828 |
+
"type": "equation",
|
| 829 |
+
"text": "\n$$\n\\mathcal {L} _ {a u g} (x) = \\log \\left(R (x) ^ {1} \\cdot M _ {q}\\right) \\hat {R} + \\log \\left(1 - R (x) ^ {0} \\cdot M _ {q}\\right) (1 - \\hat {R}). \\tag {8}\n$$\n",
|
| 830 |
+
"text_format": "latex",
|
| 831 |
+
"bbox": [
|
| 832 |
+
281,
|
| 833 |
+
656,
|
| 834 |
+
825,
|
| 835 |
+
674
|
| 836 |
+
],
|
| 837 |
+
"page_idx": 5
|
| 838 |
+
},
|
| 839 |
+
{
|
| 840 |
+
"type": "text",
|
| 841 |
+
"text": "Here, $M_q \\in \\mathbb{R}^{l \\times 1}$ denotes the binary mask, where the question tokens are assigned to 0. $\\hat{R} \\in \\mathbb{R}$ is the one-hot vector, where the position with top-k routing scores are assigned to 1.",
|
| 842 |
+
"bbox": [
|
| 843 |
+
169,
|
| 844 |
+
683,
|
| 845 |
+
823,
|
| 846 |
+
713
|
| 847 |
+
],
|
| 848 |
+
"page_idx": 5
|
| 849 |
+
},
|
| 850 |
+
{
|
| 851 |
+
"type": "text",
|
| 852 |
+
"text": "The training scheme. Typically, MLLM training is divided into two stages: vision-language (VL) alignment and instruction tuning. $\\gamma$ -MoD is a plug-and-play adaptation method, which is deployed in the instruction tuning stage. Therefore, we can skip the VL alignment by directly using the well pre-trained projector. Then, $\\gamma$ -MoD then evaluates layer redundancy using the ARank metric and replaces redundant layers with MoD layers. During instruction tuning, the routing parameters are jointly optimized via the routing and task objectives. Importantly, all other training configurations can remain consistent with the original MLLM setup, ensuring seamless integration of $\\gamma$ -MoD.",
|
| 853 |
+
"bbox": [
|
| 854 |
+
169,
|
| 855 |
+
719,
|
| 856 |
+
826,
|
| 857 |
+
819
|
| 858 |
+
],
|
| 859 |
+
"page_idx": 5
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"type": "text",
|
| 863 |
+
"text": "5 EXPERIMENTS",
|
| 864 |
+
"text_level": 1,
|
| 865 |
+
"bbox": [
|
| 866 |
+
171,
|
| 867 |
+
838,
|
| 868 |
+
328,
|
| 869 |
+
853
|
| 870 |
+
],
|
| 871 |
+
"page_idx": 5
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"type": "text",
|
| 875 |
+
"text": "5.1 DATASETS AND METRICS",
|
| 876 |
+
"text_level": 1,
|
| 877 |
+
"bbox": [
|
| 878 |
+
171,
|
| 879 |
+
869,
|
| 880 |
+
393,
|
| 881 |
+
883
|
| 882 |
+
],
|
| 883 |
+
"page_idx": 5
|
| 884 |
+
},
|
| 885 |
+
{
|
| 886 |
+
"type": "text",
|
| 887 |
+
"text": "We evaluate our $\\gamma$ -MoD on five MLLM benchmarks, which includes POPE (Li et al., 2023), MME (Fu et al., 2024), MMB (Liu et al., 2024e), MMMU (Yue et al., 2024) and MM-Vet (Yu et al., 2023). We",
|
| 888 |
+
"bbox": [
|
| 889 |
+
169,
|
| 890 |
+
895,
|
| 891 |
+
825,
|
| 892 |
+
925
|
| 893 |
+
],
|
| 894 |
+
"page_idx": 5
|
| 895 |
+
},
|
| 896 |
+
{
|
| 897 |
+
"type": "header",
|
| 898 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 899 |
+
"bbox": [
|
| 900 |
+
171,
|
| 901 |
+
32,
|
| 902 |
+
478,
|
| 903 |
+
47
|
| 904 |
+
],
|
| 905 |
+
"page_idx": 5
|
| 906 |
+
},
|
| 907 |
+
{
|
| 908 |
+
"type": "page_number",
|
| 909 |
+
"text": "6",
|
| 910 |
+
"bbox": [
|
| 911 |
+
493,
|
| 912 |
+
948,
|
| 913 |
+
504,
|
| 914 |
+
959
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 5
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "table",
|
| 920 |
+
"img_path": "images/123a5bd0d8026fd1e39f28bbb01bea7b0fd7a5fee84caac715528135fd9ca867.jpg",
|
| 921 |
+
"table_caption": [
|
| 922 |
+
"Table 1: Comparison of different $\\gamma$ -MoD configurations on LLaVA-HR. The default setting used in the table is colored in gray. \"Q\" and \"A\" refer to question and answer tokens."
|
| 923 |
+
],
|
| 924 |
+
"table_footnote": [],
|
| 925 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"2\">GQA</td><td colspan=\"2\">SQA</td><td colspan=\"2\">MMMU</td><td colspan=\"2\">TextVQA</td><td colspan=\"2\">Average</td></tr><tr><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>TFlops</td></tr><tr><td>LLaVA-HR (Luo et al., 2024)</td><td>64.2</td><td>0%</td><td>67.9</td><td>0%</td><td>34.6</td><td>0%</td><td>67.1</td><td>0%</td><td>58.5</td><td>19.2</td></tr><tr><td>MoD layer:</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>All layers</td><td>45.9</td><td>38.2%</td><td>42.6</td><td>33.7%</td><td>25.9</td><td>32.8%</td><td>33.8</td><td>34.1%</td><td>37.1</td><td>12.3</td></tr><tr><td>1 MoD per 2 layers</td><td>57.8</td><td>19.1%</td><td>52.3</td><td>16.5%</td><td>26.9</td><td>16.6%</td><td>54.0</td><td>17.9%</td><td>47.8</td><td>16.1</td></tr><tr><td>2 MoDs per 3 layers</td><td>38.1</td><td>26.8%</td><td>46.5</td><td>24.6%</td><td>24.3</td><td>24.4%</td><td>42.1</td><td>24.9%</td><td>37.8</td><td>15.9</td></tr><tr><td>ARank-based deployment</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>36.8%</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td></tr><tr><td>Masked token:</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>None</td><td>63.2</td><td>52.0%</td><td>66.8</td><td>46.9%</td><td>33.9</td><td>47.0%</td><td>64.7</td><td>49.8%</td><td>57.2</td><td>10.7</td></tr><tr><td>Q</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>36.8%</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td></tr><tr><td>Q + A</td><td>62.8</td><td>38.8%</td><td>68.6</td><td>30.5%</td><td>34.7</td><td>35.4%</td><td>62.0</td><td>37.2%</td><td>57.0</td><td>13.0</td></tr><tr><td>Shared router:</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Not Share</td><td>60.6</td><td>55.8%</td><td>64.5</td><td>48.2%</td><td>32.1</td><td>48.9%</td><td>58.4</td><td>52.9%</td><td>53.9</td><td>10.3</td></tr><tr><td>Share</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td></tr><tr><td>Routing ratio:</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>17%</td><td>63.6</td><td>18.9%</td><td>68.9</td><td>15.5%</td><td>34.7</td><td>14.7%</td><td>66.1</td><td>16.5%</td><td>58.3</td><td>16.3</td></tr><tr><td>34%</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>36.8%</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td></tr><tr><td>51%</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td></tr><tr><td>68%</td><td>59.1</td><td>77.8%</td><td>70.1</td><td>73.5%</td><td>33.7</td><td>71.8%</td><td>58.4</td><td>74.1%</td><td>55.3</td><td>6.5</td></tr></table>",
|
| 926 |
+
"bbox": [
|
| 927 |
+
174,
|
| 928 |
+
141,
|
| 929 |
+
823,
|
| 930 |
+
422
|
| 931 |
+
],
|
| 932 |
+
"page_idx": 6
|
| 933 |
+
},
|
| 934 |
+
{
|
| 935 |
+
"type": "table",
|
| 936 |
+
"img_path": "images/1b32f7ba2a470c64caddf7c78d011cdb60c9f738547dbae865cfe040e3cf96fc.jpg",
|
| 937 |
+
"table_caption": [
|
| 938 |
+
"Table 2: Ablation study of $\\gamma$ -MoD on LLaVA-HR. \"Param\", \"Acc.\" and \"Skip\" indicate the parameter, accuracy, and skip ratio, respectively."
|
| 939 |
+
],
|
| 940 |
+
"table_footnote": [],
|
| 941 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td rowspan=\"2\">Param</td><td colspan=\"2\">GQA</td><td colspan=\"2\">SQA</td><td colspan=\"2\">MMMU</td><td colspan=\"2\">TextVQA</td><td rowspan=\"2\">Acc.</td><td rowspan=\"2\">Average TFlops</td><td rowspan=\"2\">Skip</td></tr><tr><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td></tr><tr><td>LLaVA-HR (Luo et al., 2024)</td><td>7.4B</td><td>64.2</td><td>0%</td><td>67.9</td><td>0%</td><td>34.6</td><td>0%</td><td>67.1</td><td>0%</td><td>58.5</td><td>19.2</td><td>0%</td></tr><tr><td>+ Default MoD (Raposo et al., 2024)</td><td>7.4B</td><td>45.9</td><td>38.2%</td><td>42.6</td><td>33.7%</td><td>25.9</td><td>32.8%</td><td>33.8</td><td>34.1%</td><td>37.1</td><td>12.3</td><td>34.7%</td></tr><tr><td>+ ARank-based deployment (ours)</td><td>7.4B</td><td>63.2</td><td>52.0%</td><td>66.8</td><td>46.9%</td><td>33.9</td><td>47.0%</td><td>64.7</td><td>49.8%</td><td>57.2</td><td>10.7</td><td>48.9%</td></tr><tr><td>+ Masked routing learning (ours)</td><td>7.4B</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td><td>57.7%</td></tr></table>",
|
| 942 |
+
"bbox": [
|
| 943 |
+
174,
|
| 944 |
+
469,
|
| 945 |
+
821,
|
| 946 |
+
551
|
| 947 |
+
],
|
| 948 |
+
"page_idx": 6
|
| 949 |
+
},
|
| 950 |
+
{
|
| 951 |
+
"type": "text",
|
| 952 |
+
"text": "report all the results in their default settings. In addition, we evaluate $\\gamma$ -MoD on six image question answering benchmarks: VQAv2 (Goyal et al., 2017), VizWiz (Gurari et al., 2018), TextVQA (Singh et al., 2019), SQA (Lu et al., 2022), GQA (Hudson & Manning, 2019) and SEED (Ge et al., 2023). We report all the results in their default settings. For MME, we report the perception score.",
|
| 953 |
+
"bbox": [
|
| 954 |
+
169,
|
| 955 |
+
563,
|
| 956 |
+
826,
|
| 957 |
+
621
|
| 958 |
+
],
|
| 959 |
+
"page_idx": 6
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "text",
|
| 963 |
+
"text": "5.2 IMPLEMENTATION DETAILS",
|
| 964 |
+
"text_level": 1,
|
| 965 |
+
"bbox": [
|
| 966 |
+
171,
|
| 967 |
+
640,
|
| 968 |
+
408,
|
| 969 |
+
654
|
| 970 |
+
],
|
| 971 |
+
"page_idx": 6
|
| 972 |
+
},
|
| 973 |
+
{
|
| 974 |
+
"type": "text",
|
| 975 |
+
"text": "For all models, pre-training is conducted on LCS-558K dataset (Liu et al., 2024b), which includes high-quality 558k image-text pairs. For instruction tuning, we follow LLaVA-1.5 (Liu et al., 2024b) to use 665k vision-language instruction data. To deploy $\\gamma$ -MoD to MLLMs, ARank is calculated to identify redundant layers after the pre-training stage. For all models, the fourth largest ARank value is used as the threshold for converting dense layers to MoD ones. During instruction tuning, the coefficient for the routing loss is set to 0.01. The remaining settings are kept the same with LLaVA-HR (Luo et al., 2024) and LLaVA (Liu et al., 2024b), including learning rate, training epochs, optimizer and datasets, etc.",
|
| 976 |
+
"bbox": [
|
| 977 |
+
169,
|
| 978 |
+
667,
|
| 979 |
+
826,
|
| 980 |
+
780
|
| 981 |
+
],
|
| 982 |
+
"page_idx": 6
|
| 983 |
+
},
|
| 984 |
+
{
|
| 985 |
+
"type": "text",
|
| 986 |
+
"text": "5.3 EXPERIMENTAL RESULTS",
|
| 987 |
+
"text_level": 1,
|
| 988 |
+
"bbox": [
|
| 989 |
+
171,
|
| 990 |
+
800,
|
| 991 |
+
393,
|
| 992 |
+
814
|
| 993 |
+
],
|
| 994 |
+
"page_idx": 6
|
| 995 |
+
},
|
| 996 |
+
{
|
| 997 |
+
"type": "text",
|
| 998 |
+
"text": "5.3.1 QUANTITATIVE ANALYSIS",
|
| 999 |
+
"text_level": 1,
|
| 1000 |
+
"bbox": [
|
| 1001 |
+
171,
|
| 1002 |
+
828,
|
| 1003 |
+
408,
|
| 1004 |
+
843
|
| 1005 |
+
],
|
| 1006 |
+
"page_idx": 6
|
| 1007 |
+
},
|
| 1008 |
+
{
|
| 1009 |
+
"type": "text",
|
| 1010 |
+
"text": "Comparison with different MoD configurations. In Tab. 1, we first compare different settings of MoD on LLaVA-HR (Luo et al., 2024). From this table, the first observation is that directly converting all layers to MoD ones leads to worse results, e.g., $33.8\\%$ on TextVQA. Besides, although the hand-craft strategy performs much better, its performance declines are still obvious, e.g., $-10.7\\%$ of 1 MoD per 2 layers on average. These results confirm the challenges of adopting MoDs to MLLMs.",
|
| 1011 |
+
"bbox": [
|
| 1012 |
+
169,
|
| 1013 |
+
854,
|
| 1014 |
+
826,
|
| 1015 |
+
925
|
| 1016 |
+
],
|
| 1017 |
+
"page_idx": 6
|
| 1018 |
+
},
|
| 1019 |
+
{
|
| 1020 |
+
"type": "header",
|
| 1021 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1022 |
+
"bbox": [
|
| 1023 |
+
173,
|
| 1024 |
+
32,
|
| 1025 |
+
478,
|
| 1026 |
+
47
|
| 1027 |
+
],
|
| 1028 |
+
"page_idx": 6
|
| 1029 |
+
},
|
| 1030 |
+
{
|
| 1031 |
+
"type": "page_number",
|
| 1032 |
+
"text": "7",
|
| 1033 |
+
"bbox": [
|
| 1034 |
+
493,
|
| 1035 |
+
948,
|
| 1036 |
+
504,
|
| 1037 |
+
959
|
| 1038 |
+
],
|
| 1039 |
+
"page_idx": 6
|
| 1040 |
+
},
|
| 1041 |
+
{
|
| 1042 |
+
"type": "table",
|
| 1043 |
+
"img_path": "images/2e58cba9fa780d34bec1288a7bf66fbed79c72db98fdbfb73050d1934946eedf.jpg",
|
| 1044 |
+
"table_caption": [
|
| 1045 |
+
"Table 3: Results of $\\gamma$ -MoD on different MLLM architectures and model scales. $\\gamma$ -MoD-0.3 and $\\gamma$ -MoD-0.5 denote the routing ratio of $30\\%$ and $50\\%$ , respectively."
|
| 1046 |
+
],
|
| 1047 |
+
"table_footnote": [],
|
| 1048 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td rowspan=\"2\">Param</td><td colspan=\"2\">GQA</td><td colspan=\"2\">SQA</td><td colspan=\"2\">MMMU</td><td colspan=\"2\">TextVQA</td><td rowspan=\"2\">Acc.</td><td rowspan=\"2\">Average TFlops</td><td rowspan=\"2\">Skip</td></tr><tr><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td></tr><tr><td colspan=\"13\">MLLM architecture:</td></tr><tr><td>LLaVA</td><td>7B</td><td>62.0</td><td>0%</td><td>66.8</td><td>0%</td><td>34.3</td><td>0%</td><td>58.2</td><td>0%</td><td>55.3</td><td>10.7</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>7B</td><td>61.1</td><td>34.1%</td><td>64.7</td><td>29.4%</td><td>35.4</td><td>29.8%</td><td>56.3</td><td>30.7%</td><td>54.4</td><td>7.7</td><td>31.0%</td></tr><tr><td>+γ-MoD-0.5</td><td>7B</td><td>41.4</td><td>60.9%</td><td>62.3</td><td>54.8%</td><td>31.0</td><td>53.6%</td><td>42.9</td><td>56.2%</td><td>44.4</td><td>5.3</td><td>56.4%</td></tr><tr><td>LLaVA-HR</td><td>7B</td><td>64.2</td><td>0%</td><td>67.9</td><td>0%</td><td>34.6</td><td>0%</td><td>67.1</td><td>0%</td><td>58.5</td><td>19.2</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>7B</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>36.8%</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td><td>37.9%</td></tr><tr><td>+γ-MoD-0.5</td><td>7B</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td><td>57.7%</td></tr><tr><td>Mini-Gemini-HD</td><td>7B</td><td>62.9</td><td>0%</td><td>69.6</td><td>0%</td><td>36.8</td><td>0%</td><td>66.5</td><td>0%</td><td>59.0</td><td>60.2</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>7B</td><td>62.1</td><td>37.1%</td><td>69.0</td><td>34.6%</td><td>34.1</td><td>36.4%</td><td>66.4</td><td>36.6%</td><td>57.9</td><td>39.4</td><td>36.2%</td></tr><tr><td>+γ-MoD-0.5</td><td>7B</td><td>62.2</td><td>59.2%</td><td>70.4</td><td>56.8%</td><td>33.9</td><td>58.6%</td><td>67.0</td><td>57.7%</td><td>58.4</td><td>27.8</td><td>58.1%</td></tr><tr><td colspan=\"13\">Model scales:</td></tr><tr><td>LLaVA-HR</td><td>7B</td><td>64.2</td><td>0%</td><td>67.9</td><td>0%</td><td>34.6</td><td>0%</td><td>67.1</td><td>0%</td><td>58.5</td><td>19.2</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>7B</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>\\( {36.8}\\% \\)</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td><td>37.9%</td></tr><tr><td>+γ-MoD-0.5</td><td>7B</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td><td>57.1%</td></tr><tr><td>LLaVA-HR</td><td>13B</td><td>64.8</td><td>0%</td><td>68.1</td><td>0%</td><td>36.7</td><td>0%</td><td>68.1</td><td>0%</td><td>59.4</td><td>37.1</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>13B</td><td>64.5</td><td>38.1%</td><td>70.5</td><td>33.1%</td><td>37.8</td><td>32.5%</td><td>67.0</td><td>36.0%</td><td>60.0</td><td>25.1</td><td>34.9%</td></tr><tr><td>+γ-MoD-0.5</td><td>13B</td><td>64.8</td><td>58.8%</td><td>69.5</td><td>52.2%</td><td>35.8</td><td>53.8%</td><td>66.8</td><td>55.4%</td><td>59.2</td><td>18.4</td><td>55.1%</td></tr></table>",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
173,
|
| 1051 |
+
146,
|
| 1052 |
+
826,
|
| 1053 |
+
402
|
| 1054 |
+
],
|
| 1055 |
+
"page_idx": 7
|
| 1056 |
+
},
|
| 1057 |
+
{
|
| 1058 |
+
"type": "table",
|
| 1059 |
+
"img_path": "images/d85d862225b8792b8f495f46b9698d13f4d63ffd69b3621867d6a1e9cb8dc512.jpg",
|
| 1060 |
+
"table_caption": [
|
| 1061 |
+
"Table 4: Training and inference efficiency of $\\gamma$ -MoD on LLaVA-HR. The inference efficiency is tested on an NVIDIA A100 GPU, which is the average value of GQA, SQA, MMMU, and TextVQA."
|
| 1062 |
+
],
|
| 1063 |
+
"table_footnote": [],
|
| 1064 |
+
"table_body": "<table><tr><td>Methods</td><td>Training Time ↓</td><td>Inference Throughput ↑</td><td>Inference Memory ↓</td><td>Inference TFlops ↓</td><td>Avg. Acc. ↑</td></tr><tr><td>LLaVA-HR</td><td>20.7 h</td><td>4.7 samples/s</td><td>19 G</td><td>19.2</td><td>58.5</td></tr><tr><td>+γ-MoD-0.3</td><td>15.4 h</td><td>5.9 samples/s</td><td>15 G</td><td>12.6</td><td>58.3</td></tr><tr><td>+γ-MoD-0.5</td><td>14.3 h</td><td>7.2 samples/s</td><td>14 G</td><td>9.3</td><td>57.6</td></tr><tr><td>Gains</td><td>-31.0%</td><td>+53.2%</td><td>-26.3%</td><td>-51.6%</td><td>-0.9%</td></tr></table>",
|
| 1065 |
+
"bbox": [
|
| 1066 |
+
174,
|
| 1067 |
+
439,
|
| 1068 |
+
823,
|
| 1069 |
+
531
|
| 1070 |
+
],
|
| 1071 |
+
"page_idx": 7
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"type": "text",
|
| 1075 |
+
"text": "However, after employing our ARank-based strategy, the efficiency of LLaVA-HR is greatly increased while the performance is well maintained.",
|
| 1076 |
+
"bbox": [
|
| 1077 |
+
169,
|
| 1078 |
+
546,
|
| 1079 |
+
823,
|
| 1080 |
+
575
|
| 1081 |
+
],
|
| 1082 |
+
"page_idx": 7
|
| 1083 |
+
},
|
| 1084 |
+
{
|
| 1085 |
+
"type": "text",
|
| 1086 |
+
"text": "In Tab. 1, we also validate different micro-designs for deploying MoD on MLLM, including the masked routing learning, the shared router and the routing ratio. From these comparisons, we first see that the masked learning strategy is much beneficial to the optimization of $\\gamma$ -MoD, providing up to $+1.7\\%$ gains on SQA. In addition, we also find that the router sharing strategy plays a significant role in $\\gamma$ -MoD. After removing this strategy, model performance will obviously drop on TextVQA by $-6.5\\%$ . For routing threshold, we observe that the adaptive thresholds perform better while the default one is more efficient. Finally, we validate the impact of different routing ratio on LLaVA-HR. From results we can see that model performance can be retained under relatively small routing ratios, i.e., $17\\%$ and $34\\%$ . When routing ratio is increased to $51\\%$ , model performance drops slightly from $58.3\\%$ to $57.6\\%$ on average. However, the benefit of efficiency is still notable, i.e., $-51.5\\%$ Flops.",
|
| 1087 |
+
"bbox": [
|
| 1088 |
+
169,
|
| 1089 |
+
580,
|
| 1090 |
+
826,
|
| 1091 |
+
722
|
| 1092 |
+
],
|
| 1093 |
+
"page_idx": 7
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "text",
|
| 1097 |
+
"text": "Ablation studies. To validate contributions of each design in $\\gamma$ -MoD, we conduct ablation study in Tab. 2. From this table, we can see that the default MoD will cause obvious performance degeneration, resulting up to $-25.3\\%$ on SQA. In stark contrast, with our ARank-based deployment, the average performance of LLaVA-HR is improved from $37.1\\%$ to $57.6\\%$ , and the computational sparsity also boosts from $34.7\\%$ to $48.9\\%$ . Such comparison confirms that not all layers can be converted to MoD layers, and ARank is critical to identify the redundant ones. In addition, the use of masked routing learning can further benefit the model training, providing $+0.8\\%$ on MMMU and $+0.2\\%$ on TextVQA, respectively. These results further confirm the effectiveness of $\\gamma$ -MoD.",
|
| 1098 |
+
"bbox": [
|
| 1099 |
+
169,
|
| 1100 |
+
727,
|
| 1101 |
+
828,
|
| 1102 |
+
839
|
| 1103 |
+
],
|
| 1104 |
+
"page_idx": 7
|
| 1105 |
+
},
|
| 1106 |
+
{
|
| 1107 |
+
"type": "text",
|
| 1108 |
+
"text": "5.3.2 COMPARISON WITH EXISTING MLLMS",
|
| 1109 |
+
"text_level": 1,
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
171,
|
| 1112 |
+
856,
|
| 1113 |
+
501,
|
| 1114 |
+
871
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 7
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "text",
|
| 1120 |
+
"text": "Generalizations of $\\gamma$ -MoD on different MLLMs. In Tab. 3, we also evaluate the generalization capability of $\\gamma$ -MoD across different MLLM architectures and model scales. In particular, $\\gamma$ -MoD with $30\\%$ routing ratio demonstrates great trade-off between performance and efficiency on LLaVA. When",
|
| 1121 |
+
"bbox": [
|
| 1122 |
+
169,
|
| 1123 |
+
881,
|
| 1124 |
+
828,
|
| 1125 |
+
925
|
| 1126 |
+
],
|
| 1127 |
+
"page_idx": 7
|
| 1128 |
+
},
|
| 1129 |
+
{
|
| 1130 |
+
"type": "header",
|
| 1131 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1132 |
+
"bbox": [
|
| 1133 |
+
173,
|
| 1134 |
+
32,
|
| 1135 |
+
478,
|
| 1136 |
+
47
|
| 1137 |
+
],
|
| 1138 |
+
"page_idx": 7
|
| 1139 |
+
},
|
| 1140 |
+
{
|
| 1141 |
+
"type": "page_number",
|
| 1142 |
+
"text": "8",
|
| 1143 |
+
"bbox": [
|
| 1144 |
+
493,
|
| 1145 |
+
948,
|
| 1146 |
+
504,
|
| 1147 |
+
959
|
| 1148 |
+
],
|
| 1149 |
+
"page_idx": 7
|
| 1150 |
+
},
|
| 1151 |
+
{
|
| 1152 |
+
"type": "table",
|
| 1153 |
+
"img_path": "images/3aff40d3de5d042072aa9122ae7f9b6cf2958abbdd8f491963d9eacee40d7fe2.jpg",
|
| 1154 |
+
"table_caption": [
|
| 1155 |
+
"Table 5: Comparison with quantization and pruning methods. \"Speed\", \"Prefilling\" and \"Next-token\" indicate the throughput (samples/s), prefetching time (seconds) and next-token time (seconds), respectively. For MMMU, models predict one option without the need of the next-token time."
|
| 1156 |
+
],
|
| 1157 |
+
"table_footnote": [],
|
| 1158 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"3\">MMMU</td><td colspan=\"4\">MM-Vet</td></tr><tr><td>Acc.</td><td>Speed</td><td>Prefilling</td><td>Acc.</td><td>Speed</td><td>Prefilling</td><td>Next-token</td></tr><tr><td>LLaVA-v1.5-7B (Liu et al., 2024b)</td><td>34.3</td><td>9.1</td><td>0.11</td><td>30.5</td><td>0.53</td><td>0.20</td><td>1.8</td></tr><tr><td>+ AWQ-4bit (Lin et al., 2024b)</td><td>34.8</td><td>11.1</td><td>0.09</td><td>26.5</td><td>0.57</td><td>0.16</td><td>1.6</td></tr><tr><td>+ FastV(K=2,R=50%) (Chen et al., 2025)</td><td>33.9</td><td>11.6</td><td>0.09</td><td>28.8</td><td>0.68</td><td>0.17</td><td>1.3</td></tr><tr><td>+ γ-MoD-0.3</td><td>35.4</td><td>12.5</td><td>0.08</td><td>29.1</td><td>0.76</td><td>0.19</td><td>1.1</td></tr></table>",
|
| 1159 |
+
"bbox": [
|
| 1160 |
+
174,
|
| 1161 |
+
155,
|
| 1162 |
+
823,
|
| 1163 |
+
243
|
| 1164 |
+
],
|
| 1165 |
+
"page_idx": 8
|
| 1166 |
+
},
|
| 1167 |
+
{
|
| 1168 |
+
"type": "table",
|
| 1169 |
+
"img_path": "images/a6908b0835c120fa75cb7e9b78ce2f837e57095dc85670f729b0bf040e074c18.jpg",
|
| 1170 |
+
"table_caption": [
|
| 1171 |
+
"Table 6: Comparison with existing dense and sparse MLLMs on 9 benchmarks. Speed is the average samples per second of GQA, SQA, MMMU, and TextVQA."
|
| 1172 |
+
],
|
| 1173 |
+
"table_footnote": [],
|
| 1174 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td rowspan=\"2\">Param.</td><td colspan=\"2\">Image Question</td><td colspan=\"2\">Answering</td><td colspan=\"5\">Benchmark Toolkit</td><td rowspan=\"2\">Speed</td></tr><tr><td>TextVQA</td><td>\\( VQA^{v2} \\)</td><td>GQA</td><td>\\( SQA^1 \\)</td><td>POPE</td><td>MME</td><td>MMB</td><td>MMMU</td><td>MM-Vet</td></tr><tr><td colspan=\"12\">Dense Model:</td></tr><tr><td>I-80B (Laurençon et al., 2024)</td><td>65B</td><td>-</td><td>60.0</td><td>45.2</td><td>-</td><td>-</td><td>-</td><td>54.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>InstructBLIP (Dai et al., 2023)</td><td>14B</td><td>50.7</td><td>-</td><td>49.5</td><td>63.1</td><td>78.9</td><td>1212.8</td><td>-</td><td>-</td><td>25.6</td><td>-</td></tr><tr><td>VILA (Lin et al., 2024c)</td><td>7B</td><td>64.4</td><td>79.9</td><td>62.3</td><td>68.2</td><td>85.5</td><td>1533.0</td><td>68.9</td><td>-</td><td>34.9</td><td>-</td></tr><tr><td>Qwen-VL (Bai et al., 2023b)</td><td>10B</td><td>63.8</td><td>78.8</td><td>59.3</td><td>67.1</td><td>-</td><td>1487.6</td><td>38.2</td><td>-</td><td>-</td><td>4.6</td></tr><tr><td>LLaVA-1.5 (Liu et al., 2024b)</td><td>7B</td><td>58.2</td><td>78.5</td><td>62.0</td><td>66.8</td><td>85.9</td><td>1510.7</td><td>64.3</td><td>34.3</td><td>30.5</td><td>8.1</td></tr><tr><td>LLaVA-HR (Luo et al., 2024)</td><td>7B</td><td>67.1</td><td>81.9</td><td>64.2</td><td>67.9</td><td>87.6</td><td>1554.9</td><td>66.8</td><td>35.2</td><td>31.2</td><td>4.7</td></tr><tr><td>LLaVA-HR (Luo et al., 2024)</td><td>13B</td><td>68.1</td><td>82.3</td><td>64.8</td><td>68.1</td><td>87.8</td><td>1540.9</td><td>64.5</td><td>36.3</td><td>34.8</td><td>3.1</td></tr><tr><td colspan=\"12\">Sparse Model:</td></tr><tr><td>MoE-LLaVA (Lin et al., 2024a)</td><td>3B</td><td>50.1</td><td>76.7</td><td>60.3</td><td>62.6</td><td>85.7</td><td>1318.2</td><td>60.2</td><td>-</td><td>26.9</td><td>8.5</td></tr><tr><td>MoE-LLaVA (Lin et al., 2024a)</td><td>5B</td><td>51.4</td><td>77.6</td><td>61.4</td><td>68.5</td><td>86.3</td><td>1423.0</td><td>65.2</td><td>-</td><td>34.3</td><td>5.6</td></tr><tr><td>γ-MoD-LLaVA(ours)</td><td>7B</td><td>56.3</td><td>77.6</td><td>61.1</td><td>64.7</td><td>86.0</td><td>1342.1</td><td>59.4</td><td>35.4</td><td>29.8</td><td>10.3</td></tr><tr><td>γ-MoD-LLaVA-HR(ours)</td><td>7B</td><td>64.9</td><td>80.6</td><td>63.1</td><td>67.9</td><td>87.3</td><td>1516.0</td><td>63.4</td><td>34.7</td><td>31.5</td><td>7.2</td></tr><tr><td>γ-MoD-LLaVA-HR(ours)</td><td>13B</td><td>66.8</td><td>82.0</td><td>64.8</td><td>69.5</td><td>86.7</td><td>1515.4</td><td>65.2</td><td>35.8</td><td>34.0</td><td>4.8</td></tr></table>",
|
| 1175 |
+
"bbox": [
|
| 1176 |
+
173,
|
| 1177 |
+
280,
|
| 1178 |
+
823,
|
| 1179 |
+
474
|
| 1180 |
+
],
|
| 1181 |
+
"page_idx": 8
|
| 1182 |
+
},
|
| 1183 |
+
{
|
| 1184 |
+
"type": "text",
|
| 1185 |
+
"text": "the routing ratio increases to $51\\%$ , the performance of LLaVA decreases significantly, suggesting its relatively low tolerance to high routing ratio. For LLaVA-HR, the $\\gamma$ -MoD-0.3 configuration maintains high accuracy $63.7\\%$ on GQA and $65.3\\%$ on TextVQA while reducing TFlops by $34\\%$ and skipping $37.9\\%$ of tokens. When the routing ratio increases to $51\\%$ , the token skip rate improves to $57.7\\%$ , though a slight drop in accuracy is observed e.g., $-0.6\\%$ on GQA. These comparisons also reflect that high-resolution MLLMs often have a higher token redundancy than low-resolution ones. Similar observations can also be witnessed on Mini-Gemini-HD Li et al. (2024b). When scaling to larger models, such as the LLaVA-HR-13B, our method continues to perform strongly. The $\\gamma$ -MoD-0.3 configuration yields a $38.1\\%$ skip rate and 25.1 TFlops with minimal accuracy loss, suggesting that larger models are better suited to handle higher skip rates while maintaining performance. Even increasing the routing ratio to $51\\%$ the competitive accuracy is still maintained, e.g., $64.8\\%$ on GQA and $66.8\\%$ on TextVQA.",
|
| 1186 |
+
"bbox": [
|
| 1187 |
+
169,
|
| 1188 |
+
492,
|
| 1189 |
+
826,
|
| 1190 |
+
660
|
| 1191 |
+
],
|
| 1192 |
+
"page_idx": 8
|
| 1193 |
+
},
|
| 1194 |
+
{
|
| 1195 |
+
"type": "text",
|
| 1196 |
+
"text": "Efficiency analysis. In Tab. 4, we compare the training and inference efficiency of $\\gamma$ -MoD on LLaVA-HR. From these results, we observe comprehensive advantages of $\\gamma$ -MoD in terms of training and inference inference. In particular, $\\gamma$ -MoD-0.3 already achieves an obvious improvement in efficiency, i.e., $-26\\%$ training time and $-35\\%$ TFlops. However, the performance drops of $\\gamma$ -MoD-0.3 can be almost ignorable, i.e., $-0.2\\%$ average accuracy. When increasing the routing ratio to $50\\%$ tokens, the inference throughput of $\\gamma$ -MoD-0.5 further improves by up to $+53.2\\%$ . Despite the significant efficiency gains, the performance drop of $\\gamma$ -MoD is still acceptable, i.e., $-1.5\\%$ average accuracy. These results well validate the obvious benefits of $\\gamma$ -MoD in efficiency.",
|
| 1197 |
+
"bbox": [
|
| 1198 |
+
169,
|
| 1199 |
+
666,
|
| 1200 |
+
826,
|
| 1201 |
+
779
|
| 1202 |
+
],
|
| 1203 |
+
"page_idx": 8
|
| 1204 |
+
},
|
| 1205 |
+
{
|
| 1206 |
+
"type": "text",
|
| 1207 |
+
"text": "Comparison with existing methods. In Tab. 6, we compare MLLMs deployed by $\\gamma$ -MoD with both dense and sparse models on 9 benchmarks. From it we can see $\\gamma$ -MoD can maintain the competitive performance on all benchmarks, while achieving significant efficiency gains on LLaVA and LLaVA-HR. Specifically, $\\gamma$ -MoD-LLaVA-HR (13B) can reach similar inference speed as LLaVA-HR (7B) while outperforming the latter on multiple benchmarks, e.g., $+3.0\\%$ on MMVet. In addition, compared to existing sparse models, i.e., MoE-LLaVA (Lin et al., 2024a), our approaches also achieve better trade-off between performance and efficiency. In particular, $\\gamma$ -MoD-LLaVA-HR (7B) outperforms MoE-LLaVA (5B) on 5 of 8 benchmarks, e.g., $+93$ scores on MME, while still maintaining better efficiency, i.e., $+28\\%$ gains on inference speed. It is worth noting that although the parameter scale of MoE-LLaVA is smaller, its routing calculation often leads to higher latency.",
|
| 1208 |
+
"bbox": [
|
| 1209 |
+
169,
|
| 1210 |
+
784,
|
| 1211 |
+
828,
|
| 1212 |
+
925
|
| 1213 |
+
],
|
| 1214 |
+
"page_idx": 8
|
| 1215 |
+
},
|
| 1216 |
+
{
|
| 1217 |
+
"type": "header",
|
| 1218 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1219 |
+
"bbox": [
|
| 1220 |
+
173,
|
| 1221 |
+
32,
|
| 1222 |
+
478,
|
| 1223 |
+
47
|
| 1224 |
+
],
|
| 1225 |
+
"page_idx": 8
|
| 1226 |
+
},
|
| 1227 |
+
{
|
| 1228 |
+
"type": "page_number",
|
| 1229 |
+
"text": "9",
|
| 1230 |
+
"bbox": [
|
| 1231 |
+
493,
|
| 1232 |
+
948,
|
| 1233 |
+
504,
|
| 1234 |
+
959
|
| 1235 |
+
],
|
| 1236 |
+
"page_idx": 8
|
| 1237 |
+
},
|
| 1238 |
+
{
|
| 1239 |
+
"type": "image",
|
| 1240 |
+
"img_path": "images/83d3548950efcbecca3b09953f0aacb7c6c0199af5d5f72b42ed7dae842ca293.jpg",
|
| 1241 |
+
"image_caption": [
|
| 1242 |
+
"Figure 4: Visualization of routing results for different MoD layers. \"Q\", \"I\" and \"A\" denote the question, image and response, respectively. The skipped tokens in sub-figure (b) are colored in gray."
|
| 1243 |
+
],
|
| 1244 |
+
"image_footnote": [],
|
| 1245 |
+
"bbox": [
|
| 1246 |
+
173,
|
| 1247 |
+
99,
|
| 1248 |
+
826,
|
| 1249 |
+
438
|
| 1250 |
+
],
|
| 1251 |
+
"page_idx": 9
|
| 1252 |
+
},
|
| 1253 |
+
{
|
| 1254 |
+
"type": "text",
|
| 1255 |
+
"text": "In Tab. 5, we also compare $\\gamma$ -MoD with common inference-time acceleration methods Chen et al. (2025); Lin et al. (2024b). Compared to these methods, $\\gamma$ -MoD can better maintain the model performance on MMMU and MMVet. In terms of efficiency, $\\gamma$ -MoD shows greater advantages in accelerating the next-token generation for MLLMs, providing up to $+31\\%$ speedups on MMVet. Overall, these comparisons further confirm the effectiveness and efficiency of $\\gamma$ -MoD.",
|
| 1256 |
+
"bbox": [
|
| 1257 |
+
169,
|
| 1258 |
+
487,
|
| 1259 |
+
826,
|
| 1260 |
+
560
|
| 1261 |
+
],
|
| 1262 |
+
"page_idx": 9
|
| 1263 |
+
},
|
| 1264 |
+
{
|
| 1265 |
+
"type": "text",
|
| 1266 |
+
"text": "5.3.3 QUALITATIVE ANALYSIS",
|
| 1267 |
+
"text_level": 1,
|
| 1268 |
+
"bbox": [
|
| 1269 |
+
171,
|
| 1270 |
+
575,
|
| 1271 |
+
401,
|
| 1272 |
+
590
|
| 1273 |
+
],
|
| 1274 |
+
"page_idx": 9
|
| 1275 |
+
},
|
| 1276 |
+
{
|
| 1277 |
+
"type": "text",
|
| 1278 |
+
"text": "In Fig. 4, we visualize the routing ratio and the skipped content in both images and the corresponding conversations. The first observation from Fig. 4.(a) is that question, image, and response tokens are routed in a consistent pattern: question tokens are mostly kept, while image tokens are the most redundant, and thus routed the most. In Fig. 4.(b), we visualize the skipped content on images and texts. The gray portions of the images represent tokens that are skipped by the router, indicating that many regions in the images, such as background pixels, are redundant and do not provide critical information for understanding. Routing out these tokens allows the model to focus more on the white portions, which highlight the image regions or text parts that the model pays closer attention to. For example, in the middle of the first row with the IQ test example, the model can concentrate and spending more computations on the arithmetic and geometric aspects of the image.",
|
| 1279 |
+
"bbox": [
|
| 1280 |
+
169,
|
| 1281 |
+
601,
|
| 1282 |
+
826,
|
| 1283 |
+
742
|
| 1284 |
+
],
|
| 1285 |
+
"page_idx": 9
|
| 1286 |
+
},
|
| 1287 |
+
{
|
| 1288 |
+
"type": "text",
|
| 1289 |
+
"text": "6 CONCLUSION",
|
| 1290 |
+
"text_level": 1,
|
| 1291 |
+
"bbox": [
|
| 1292 |
+
171,
|
| 1293 |
+
765,
|
| 1294 |
+
318,
|
| 1295 |
+
780
|
| 1296 |
+
],
|
| 1297 |
+
"page_idx": 9
|
| 1298 |
+
},
|
| 1299 |
+
{
|
| 1300 |
+
"type": "text",
|
| 1301 |
+
"text": "In this paper, we aim to overcome the efficiency problem in multimodal large language models (MLLMs) from the perspective of \"activated token\". In particular, we present $\\gamma$ -MoD, a novel mixture-of-depth adaptation strategy for computationally efficient MLLM. In $\\gamma$ -MoD, an innovative metric is introduced to identify the redundant layers for MoD deployment, namely rank of attention maps (ARank). Moreover, $\\gamma$ -MoD also maximizes its benefit to MLLMs via two designs called shared vision-language router and masked routing learning. With these novel designs, $\\gamma$ -MoD can obviously reduce computational costs of existing MLLMs while maintaining their performance. Extensive experiments on 9 multimodal benchmarks validate the efficiency and effectiveness. Besides, the great generalization ability of $\\gamma$ -MoD is also validated across different MLLMs.",
|
| 1302 |
+
"bbox": [
|
| 1303 |
+
169,
|
| 1304 |
+
797,
|
| 1305 |
+
826,
|
| 1306 |
+
925
|
| 1307 |
+
],
|
| 1308 |
+
"page_idx": 9
|
| 1309 |
+
},
|
| 1310 |
+
{
|
| 1311 |
+
"type": "header",
|
| 1312 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1313 |
+
"bbox": [
|
| 1314 |
+
173,
|
| 1315 |
+
32,
|
| 1316 |
+
478,
|
| 1317 |
+
47
|
| 1318 |
+
],
|
| 1319 |
+
"page_idx": 9
|
| 1320 |
+
},
|
| 1321 |
+
{
|
| 1322 |
+
"type": "page_number",
|
| 1323 |
+
"text": "10",
|
| 1324 |
+
"bbox": [
|
| 1325 |
+
488,
|
| 1326 |
+
946,
|
| 1327 |
+
508,
|
| 1328 |
+
960
|
| 1329 |
+
],
|
| 1330 |
+
"page_idx": 9
|
| 1331 |
+
},
|
| 1332 |
+
{
|
| 1333 |
+
"type": "ref_text",
|
| 1334 |
+
"text": "Acknowledgments. This work was supported by the National Natural Science Foundation of China (No. 623B2088) and the China Postdoctoral Science Foundation (No. 2024M761548).",
|
| 1335 |
+
"bbox": [
|
| 1336 |
+
171,
|
| 1337 |
+
103,
|
| 1338 |
+
823,
|
| 1339 |
+
132
|
| 1340 |
+
],
|
| 1341 |
+
"page_idx": 10
|
| 1342 |
+
},
|
| 1343 |
+
{
|
| 1344 |
+
"type": "text",
|
| 1345 |
+
"text": "REFERENCES",
|
| 1346 |
+
"text_level": 1,
|
| 1347 |
+
"bbox": [
|
| 1348 |
+
171,
|
| 1349 |
+
152,
|
| 1350 |
+
287,
|
| 1351 |
+
167
|
| 1352 |
+
],
|
| 1353 |
+
"page_idx": 10
|
| 1354 |
+
},
|
| 1355 |
+
{
|
| 1356 |
+
"type": "list",
|
| 1357 |
+
"sub_type": "ref_text",
|
| 1358 |
+
"list_items": [
|
| 1359 |
+
"Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024.",
|
| 1360 |
+
"Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.",
|
| 1361 |
+
"Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 2022.",
|
| 1362 |
+
"Ebtesam Almazrouei, Hamza Alobeidli, Abdulaziz Alshamsi, Alessandro Cappelli, Ruxandra Cojocaru, Mérouane Debbah, Étienne Goffinet, Daniel Hesslow, Julien Launay, Quentin Malartic, et al. The falcon series of open language models. arXiv preprint arXiv:2311.16867, 2023.",
|
| 1363 |
+
"Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023a.",
|
| 1364 |
+
"Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023b.",
|
| 1365 |
+
"Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts. arXiv preprint arXiv:2407.06204, 2024a.",
|
| 1366 |
+
"Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024b.",
|
| 1367 |
+
"Jun Chen, Deyao Zhu, Xiaogian Shen, Xiang Li, Zechun Liu, Pengchuan Zhang, Raghuraman Krishnamoorthi, Vikas Chandra, Yunyang Xiong, and Mohamed Elhoseiny. Minigpt-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478, 2023.",
|
| 1368 |
+
"Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. In European Conference on Computer Vision, pp. 19-35. Springer, 2025.",
|
| 1369 |
+
"Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, Ji Ma, Jiaqi Wang, Xiaoyi Dong, Hang Yan, Hewei Guo, Conghui He, Botian Shi, Zhenjiang Jin, Chao Xu, Bin Wang, Xingjian Wei, Wei Li, Wenjian Zhang, Bo Zhang, Pinlong Cai, Licheng Wen, Xiangchao Yan, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites, 2024a.",
|
| 1370 |
+
"Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 24185-24198, 2024b.",
|
| 1371 |
+
"Damai Dai, Chengqi Deng, Chenggang Zhao, RX Xu, Huazuo Gao, Deli Chen, Jiashi Li, Wangding Zeng, Xingkai Yu, Y Wu, et al. Deepseekmoe: Towards ultimate expert specialization in mixture-of-experts language models. arXiv preprint arXiv:2401.06066, 2024.",
|
| 1372 |
+
"Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023."
|
| 1373 |
+
],
|
| 1374 |
+
"bbox": [
|
| 1375 |
+
171,
|
| 1376 |
+
176,
|
| 1377 |
+
828,
|
| 1378 |
+
924
|
| 1379 |
+
],
|
| 1380 |
+
"page_idx": 10
|
| 1381 |
+
},
|
| 1382 |
+
{
|
| 1383 |
+
"type": "header",
|
| 1384 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1385 |
+
"bbox": [
|
| 1386 |
+
171,
|
| 1387 |
+
32,
|
| 1388 |
+
478,
|
| 1389 |
+
47
|
| 1390 |
+
],
|
| 1391 |
+
"page_idx": 10
|
| 1392 |
+
},
|
| 1393 |
+
{
|
| 1394 |
+
"type": "page_number",
|
| 1395 |
+
"text": "11",
|
| 1396 |
+
"bbox": [
|
| 1397 |
+
488,
|
| 1398 |
+
946,
|
| 1399 |
+
506,
|
| 1400 |
+
960
|
| 1401 |
+
],
|
| 1402 |
+
"page_idx": 10
|
| 1403 |
+
},
|
| 1404 |
+
{
|
| 1405 |
+
"type": "list",
|
| 1406 |
+
"sub_type": "ref_text",
|
| 1407 |
+
"list_items": [
|
| 1408 |
+
"Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamllm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499, 2023.",
|
| 1409 |
+
"Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, Yunsheng Wu, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models, 2024.",
|
| 1410 |
+
"Yaroslav Fyodorov, Yoad Winter, and Nissim Francez. A natural logic inference system. In Proceedings of the 2nd workshop on inference in computational semantics (ICoS-2), 2000.",
|
| 1411 |
+
"Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model, 2023.",
|
| 1412 |
+
"G.H.Goulb and C.Reinsch. Singular value decomposition and least squares solutions. In Handbook for Automatic Computation: Volume II: Linear Algebra, pp. 134-151. Springer, 1971.",
|
| 1413 |
+
"Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering, 2017.",
|
| 1414 |
+
"Danna Gurari, Qing Li, Abigale J. Stangl, Anhong Guo, Chi Lin, Kristen Grauman, Jiebo Luo, and Jeffrey P. Bigham. Vizwiz grand challenge: Answering visual questions from blind people, 2018.",
|
| 1415 |
+
"Drew A. Hudson and Christopher D. Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering, 2019.",
|
| 1416 |
+
"Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024.",
|
| 1417 |
+
"Yizhang Jin, Jian Li, Yexin Liu, Tianjun Gu, Kai Wu, Zhengkai Jiang, Muyang He, Bo Zhao, Xin Tan, Zhenye Gan, et al. Efficient multimodal large language models: A survey. arXiv preprint arXiv:2405.10739, 2024.",
|
| 1418 |
+
"Minchul Kim, Shangqian Gao, Yen-Chang Hsu, Yilin Shen, and Hongxia Jin. Token fusion: Bridging the gap between token pruning and token merging. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 1383-1392, 2024.",
|
| 1419 |
+
"Hugo Laurençon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander Rush, Douwe Kiela, et al. Obelics: An open web-scale filtered dataset of interleaved image-text documents. Advances in Neural Information Processing Systems, 36, 2024.",
|
| 1420 |
+
"Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a.",
|
| 1421 |
+
"Yanwei Li, Yuechen Zhang, Chengyao Wang, Zhisheng Zhong, Yixin Chen, Ruihang Chu, Shaoteng Liu, and Jiaya Jia. Mini-gemini: Mining the potential of multi-modality vision language models. arXiv preprint arXiv:2403.18814, 2024b.",
|
| 1422 |
+
"Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models, 2023.",
|
| 1423 |
+
"Bin Lin, Zhenyu Tang, Yang Ye, Jiaxi Cui, Bin Zhu, Peng Jin, Jina Huang, Junwu Zhang, Yatian Pang, Munan Ning, and Li Yuan. Moe-llava: Mixture of experts for large vision-language models, 2024a.",
|
| 1424 |
+
"Ji Lin, Jiaming Tang, Haotian Tang, Shang Yang, Wei-Ming Chen, Wei-Chen Wang, Guangxuan Xiao, Xingyu Dang, Chuang Gan, and Song Han. Awq: Activation-aware weight quantization for llm compression and acceleration, 2024b.",
|
| 1425 |
+
"Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 26689-26699, 2024c."
|
| 1426 |
+
],
|
| 1427 |
+
"bbox": [
|
| 1428 |
+
171,
|
| 1429 |
+
102,
|
| 1430 |
+
826,
|
| 1431 |
+
924
|
| 1432 |
+
],
|
| 1433 |
+
"page_idx": 11
|
| 1434 |
+
},
|
| 1435 |
+
{
|
| 1436 |
+
"type": "header",
|
| 1437 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1438 |
+
"bbox": [
|
| 1439 |
+
171,
|
| 1440 |
+
32,
|
| 1441 |
+
478,
|
| 1442 |
+
47
|
| 1443 |
+
],
|
| 1444 |
+
"page_idx": 11
|
| 1445 |
+
},
|
| 1446 |
+
{
|
| 1447 |
+
"type": "page_number",
|
| 1448 |
+
"text": "12",
|
| 1449 |
+
"bbox": [
|
| 1450 |
+
488,
|
| 1451 |
+
946,
|
| 1452 |
+
508,
|
| 1453 |
+
960
|
| 1454 |
+
],
|
| 1455 |
+
"page_idx": 11
|
| 1456 |
+
},
|
| 1457 |
+
{
|
| 1458 |
+
"type": "list",
|
| 1459 |
+
"sub_type": "ref_text",
|
| 1460 |
+
"list_items": [
|
| 1461 |
+
"Mingbao Lin, Rongrong Ji, Yan Wang, Yichen Zhang, Baochang Zhang, Yonghong Tian, and Ling Shao. Hrank: Filter pruning using high-rank feature map. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 1529-1538, 2020.",
|
| 1462 |
+
"Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024a.",
|
| 1463 |
+
"Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 26296-26306, 2024b.",
|
| 1464 |
+
"Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024c.",
|
| 1465 |
+
"Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024d.",
|
| 1466 |
+
"Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, Kai Chen, and Dahua Lin. Mmbench: Is your multi-modal model an all-around player?, 2024e.",
|
| 1467 |
+
"Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering, 2022.",
|
| 1468 |
+
"Gen Luo, Yiyi Zhou, Yuxin Zhang, Xiawu Zheng, Xiaoshuai Sun, and Rongrong Ji. Feast your eyes: Mixture-of-resolution adaptation for multimodal large language models. arXiv preprint arXiv:2403.03003, 2024.",
|
| 1469 |
+
"Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, et al. Mm1: Methods, analysis & insights from multimodal llm pre-training. arXiv preprint arXiv:2403.09611, 2024.",
|
| 1470 |
+
"Denis Paperno, Germán Kruszewski, Angeliki Lazaridou, Quan Ngoc Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fernández. The lambada dataset: Word prediction requiring a broad discourse context. arXiv preprint arXiv:1606.06031, 2016.",
|
| 1471 |
+
"David Raposo, Sam Ritter, Blake Richards, Timothy Lillicrap, Peter Conway Humphreys, and Adam Santoro. Mixture-of-depths: Dynamically allocating compute in transformer-based language models. arXiv preprint arXiv:2404.02258, 2024.",
|
| 1472 |
+
"Hanoona Rasheed, Muhammad Maaz, Salman Khan, and Fahad S. Khan. Llava++: Extending visual capabilities with llama-3 and phi-3, 2024.",
|
| 1473 |
+
"Siva Reddy, Danqi Chen, and Christopher D Manning. Coqa: A conversational question answering challenge. Transactions of the Association for Computational Linguistics, 7:249-266, 2019.",
|
| 1474 |
+
"Zhiqiang Shen, Tianhua Tao, Liquun Ma, Willie Neiswanger, Joel Hestness, Natalia Vassilieva, Daria Soboleva, and Eric Xing. Slimpajama-dc: Understanding data combinations for llm training. arXiv preprint arXiv:2309.10818, 2023.",
|
| 1475 |
+
"Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 8317-8326, 2019.",
|
| 1476 |
+
"Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14398-14409, 2024.",
|
| 1477 |
+
"Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024."
|
| 1478 |
+
],
|
| 1479 |
+
"bbox": [
|
| 1480 |
+
171,
|
| 1481 |
+
102,
|
| 1482 |
+
826,
|
| 1483 |
+
924
|
| 1484 |
+
],
|
| 1485 |
+
"page_idx": 12
|
| 1486 |
+
},
|
| 1487 |
+
{
|
| 1488 |
+
"type": "header",
|
| 1489 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1490 |
+
"bbox": [
|
| 1491 |
+
171,
|
| 1492 |
+
32,
|
| 1493 |
+
478,
|
| 1494 |
+
47
|
| 1495 |
+
],
|
| 1496 |
+
"page_idx": 12
|
| 1497 |
+
},
|
| 1498 |
+
{
|
| 1499 |
+
"type": "page_number",
|
| 1500 |
+
"text": "13",
|
| 1501 |
+
"bbox": [
|
| 1502 |
+
488,
|
| 1503 |
+
946,
|
| 1504 |
+
506,
|
| 1505 |
+
959
|
| 1506 |
+
],
|
| 1507 |
+
"page_idx": 12
|
| 1508 |
+
},
|
| 1509 |
+
{
|
| 1510 |
+
"type": "list",
|
| 1511 |
+
"sub_type": "ref_text",
|
| 1512 |
+
"list_items": [
|
| 1513 |
+
"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.",
|
| 1514 |
+
"Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024.",
|
| 1515 |
+
"Fuzhao Xue, Zian Zheng, Yao Fu, Jinjie Ni, Zhangwei Zheng, Wangchunshu Zhou, and Yang You. Openmoe: An early effort on open mixture-of-experts language models. arXiv preprint arXiv:2402.01739, 2024.",
|
| 1516 |
+
"Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023.",
|
| 1517 |
+
"Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities, 2023.",
|
| 1518 |
+
"Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Renliang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi, 2024.",
|
| 1519 |
+
"Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024.",
|
| 1520 |
+
"Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593, 2019."
|
| 1521 |
+
],
|
| 1522 |
+
"bbox": [
|
| 1523 |
+
171,
|
| 1524 |
+
102,
|
| 1525 |
+
825,
|
| 1526 |
+
522
|
| 1527 |
+
],
|
| 1528 |
+
"page_idx": 13
|
| 1529 |
+
},
|
| 1530 |
+
{
|
| 1531 |
+
"type": "header",
|
| 1532 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1533 |
+
"bbox": [
|
| 1534 |
+
171,
|
| 1535 |
+
32,
|
| 1536 |
+
478,
|
| 1537 |
+
47
|
| 1538 |
+
],
|
| 1539 |
+
"page_idx": 13
|
| 1540 |
+
},
|
| 1541 |
+
{
|
| 1542 |
+
"type": "page_number",
|
| 1543 |
+
"text": "14",
|
| 1544 |
+
"bbox": [
|
| 1545 |
+
490,
|
| 1546 |
+
946,
|
| 1547 |
+
506,
|
| 1548 |
+
959
|
| 1549 |
+
],
|
| 1550 |
+
"page_idx": 13
|
| 1551 |
+
}
|
| 1552 |
+
]
|
2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/0556a44d-0dc6-414e-954b-026617063a1b_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/0556a44d-0dc6-414e-954b-026617063a1b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:21f092e0e0ac5dc8cba315ba4f029e76991a202e0ae740350ebc358b4794bb97
|
| 3 |
+
size 2349640
|
2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/full.md
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $\gamma$ -MOD: EXPLORING MIXTURE-OF-DEPTH ADAPTATION FOR MULTIMODAL LARGE LANGUAGE MODELS
|
| 2 |
+
|
| 3 |
+
Yaxin Luo $^{1}$ , Gen Luo $^{2\dagger}$ , Jiayi Ji $^{3,4}$ , Yiyi Zhou $^{3}$ , Xiaoshuai Sun $^{3}$ , Zhiqiang Shen $^{1}$ , Rongrong Ji $^{3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ MBZUAI $^{2}$ OpenGVLab, Shanghai AI Laboratory
|
| 6 |
+
$^{3}$ Xiamen University $^{4}$ National University of Singapore
|
| 7 |
+
|
| 8 |
+
Project Page: Gamma-MOD
|
| 9 |
+
|
| 10 |
+
# ABSTRACT
|
| 11 |
+
|
| 12 |
+
Despite the significant progress in multimodal large language models (MLLMs), their high computational cost remains a barrier to real-world deployment. Inspired by the mixture of depths (MoDs) in natural language processing, we aim to address this limitation from the perspective of "activated tokens". Our key insight is that if most tokens are redundant for the layer computation, then can be skipped directly via the MoD layer. However, directly converting the dense layers of MLLMs to MoD layers leads to substantial performance degradation. To address this issue, we propose an innovative MoD adaptation strategy for existing MLLMs called $\gamma$ -MoD. In $\gamma$ -MoD, a novel metric is proposed to guide the deployment of MoDs in the MLLM, namely rank of attention maps (ARank). Through ARank, we can effectively identify which layer is redundant and should be replaced with the MoD layer. Based on ARank, we further propose two novel designs to maximize the computational sparsity of MLLM while maintaining its performance, namely shared vision-language router and masked routing learning. With these designs, more than $90\%$ dense layers of the MLLM can be effectively converted to the MoD ones. To validate our method, we apply it to three popular MLLMs, and conduct extensive experiments on 9 benchmark datasets. Experimental results not only validate the significant efficiency benefit of $\gamma$ -MoD to existing MLLMs but also confirm its generalization ability on various MLLMs. For example, with a minor performance drop, i.e., $-0.9\%$ , $\gamma$ -MoD can reduce the training and inference time of LLaVA-HR by $31.0\%$ and $53.2\%$ , respectively.
|
| 13 |
+
|
| 14 |
+
# 1 INTRODUCTION
|
| 15 |
+
|
| 16 |
+
Recent years have witnessed the great success of large language models (LLMs) in natural language processing (NLP) (Achiam et al., 2023; Touvron et al., 2023; Cai et al., 2024b), which attracts increasing attentions in extending LLMs to vision-language (VL) tasks. Despite the progress, recent multimodal large language models (MLLMs) (Liu et al., 2024d;c; Chen et al., 2024a; Alayrac et al., 2022) are often criticized by their expensive computational costs. For example, the inference speed of existing MLLMs like LLaVA-HR (Luo et al., 2024) is still far from practical requirements, e.g., 4.7 samples per second. Driven by the progress of NLP, recent advances have employed the mixture-of-experts (MoEs) (Lin et al., 2024a; Jiang et al., 2024) to MLLMs to reduce the "activated parameters", thus achieving trade-off between efficiency and performance.
|
| 17 |
+
|
| 18 |
+
Orthogonal to MoEs, we aim to tackle the efficiency bottleneck of MLLMs from the perspective of "activated tokens". As shown in Fig. 1 (a), a large number of tokens are less important in the computation, such as visual background and prepositional words. However, existing MoEs still allocate the same experts to all input tokens, leading to redundant computational costs. A promising solution to this issue is the recently proposed mixture-of-depths (MoDs) in NLP (Raposo et al., 2024), which equips each token with a router to determine whether a module should be computed.
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
(a) Attention maps of different layers in LLaVA-HR
|
| 22 |
+
Figure 1: Visualization of attention maps in the MLLM and comparison of MoE with MoD. (a) Lower-rank layers often exhibit redundancy in their attention computation. (b) Different from MoE, MoD achieves the computational sparsity from the perspective of "activated token", where the computational budget is dynamically allocated to each token.
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
(b) Comparison of MoE and MoD
|
| 26 |
+
|
| 27 |
+
However, recent MoDs (Raposo et al., 2024) typically require pre-training LLMs from scratch, and their employment on MLLMs still remains under-explored.
|
| 28 |
+
|
| 29 |
+
In this paper, we focus on the efficient adaptation of MoDs to existing MLLMs. In particular, our goal is to maximize the computational sparsity of MLLMs while maintaining competitive performance. However, directly converting all dense layers of MLLMs to MoD layers leads to significant performance degradation, e.g., $-33.3\%$ of LLaVA-HR (Luo et al., 2024) on TextVQA (Singh et al., 2019). In practice, we observe that such issue is mainly caused by two aspects. Firstly, the deployment of MoDs lacks a practical guidance to measure the layer redundancy, thus undermining the necessary dense layers. As illustrated in Fig. 1 (a), attention patterns vary significantly across layers, and some layers exhibit less redundancy. Additionally, the setting of MLLMs, e.g., input modality, differs substantially from that of LLMs, making the direct adaptation of MoDs suboptimal.
|
| 30 |
+
|
| 31 |
+
To overcome these limitations, we first propose a novel metric to guide the deployment of MoDs in MLLMs, called the rank of attention maps (ARank). Our key insight is that lower-rank attention maps indicate that fewer tokens are necessary for computation. As shown in Fig. 1 (a), most of tokens of Layer-4 are assigned small attention weights, contributing minimally to the final output. This provides a valuable hint for us to replace the redundant layer with the MoD one under the guidance of ARank. In practice, the calculation of ARank is both efficient and flexible. Empirically, we find that the average ARank always keeps the similar despite the change of samples. Therefore, randomly sampling a small amount of data can already accurately estimate the ARanks.
|
| 32 |
+
|
| 33 |
+
Based on the ARank, we propose an innovative MoD adaptation strategy for existing MLLMs, called $\gamma$ -MoD. Specifically, $\gamma$ -MoD is a plug-and-play adaptation approach that can be seamlessly integrated into current MLLMs via instruction tuning. In $\gamma$ -MoD, two novel designs are adopted to maximize its benefits to MLLMs, namely shared vision-language router and masked routing learning. The shared vision-language router performs routing on the entire multimodal sequence and uses a weight-sharing strategy to facilitate optimization. Then, masked routing learning is introduced to prevent critical tokens from being skipped during training, i.e., instruction tokens. With these designs, over $90\%$ of dense layers can be converted to MoD layers with minimal performance sacrifice, resulting in even larger computational sparsity than the native MoD-based LLM (Raposo et al., 2024).
|
| 34 |
+
|
| 35 |
+
To validate $\gamma$ -MoD, we apply it to two popular MLLMs and conduct extensive experiments on 9 vision-language benchmarks. Experimental results show that $\gamma$ -MoD significantly improves the training and inference efficiency of existing MLLMs while keeping their performance competitive. For example, $\gamma$ -MoD reduces $51.6\%$ Flops, $31\%$ training time and $53.2\%$ inference time for LLaVA-HR (Luo et al., 2024), but its average performance decline is only $-1.5\%$ . More importantly, the great generalization ability of $\gamma$ -MoD is also witnessed on different MLLM structures and parameter sizes. Overall, the contribution of the paper can be summarized in three folds:
|
| 36 |
+
|
| 37 |
+
- We present a novel mixture-of-depth (MoD) framework for the sparse computation of existing MLLMs, namely $\gamma$ -MoD, which can seamlessly convert most dense layers in MLLMs to the sparse MoD layers.
|
| 38 |
+
- We propose an innovative metric to measure the layer redundancy, namely rank of attention maps (ARank). With ARank, we can best determine that which dense layer should be convert to the MoD one.
|
| 39 |
+
- We carefully explore the design of $\gamma$ -MoD in existing MLLMs, including the shared vision-language router and the masked routing learning, which can achieve up to $51.6\%$ computational sparsity with minor performance sacrifice. Extensive experiments also confirm the generalization ability of $\gamma$ -MoD.
|
| 40 |
+
|
| 41 |
+
# 2 RELATED WORK
|
| 42 |
+
|
| 43 |
+
# 2.1 MULTIMODAL LARGE LANGUAGE MODELS
|
| 44 |
+
|
| 45 |
+
Large language models (LLMs) (Achiam et al., 2023; Touvron et al., 2023; Jiang et al., 2024; Almazrouei et al., 2023; Cai et al., 2024b; Abdin et al., 2024; Shen et al., 2023) have proven their strong capabilities in various natural language processing tasks (Paperno et al., 2016; Fyodorov et al., 2000; Reddy et al., 2019; Ziegler et al., 2019). Motivated by this, numerous efforts (Liu et al., 2024d; Bai et al., 2023a; Ye et al., 2023; Dai et al., 2023; Chen et al., 2024b; Li et al., 2024b; Tong et al., 2024; Rasheed et al., 2024; Dong et al., 2023; Xie et al., 2024; Zhou et al., 2024; Chen et al., 2023; Alayrac et al., 2022; Sun et al., 2024) have been devoted into extending LLMs to multimodal large language models (MLLMs). Among them, the most representative work is LLaVA (Liu et al., 2024d), which uses a lightweight project to connect a visual encoder and an LLM. This simple framework has now become the de-facto paradigm in the community, empowering a set of MLLMs like Mini-Gemini (Li et al., 2024b) and InternVL (Chen et al., 2024b). Recently, researchers have shifted their attention to high-resolution MLLMs. For example, LLaVA-NexT (Liu et al., 2024c) and InternVL-1.5 (Chen et al., 2024a) adopt the dynamic image slicing strategy for high-resolution adaptation. LLaVA-HR (Luo et al., 2024) further propose a dual-branch structure to reduce the cost of high-resolution MLLMs. Despite the effectiveness, existing high-resolution MLLMs (Liu et al., 2024c; Li et al., 2024a) will produce a much longer input tokens, resulting in prohibitively expensive computational costs. In this paper, the proposed $\gamma$ -MoD can greatly overcome the efficiency bottleneck of existing MLLMs, which is significant for their practical applications.
|
| 46 |
+
|
| 47 |
+
# 2.2 SPARSE COMPUTATION FOR LLMS
|
| 48 |
+
|
| 49 |
+
Recently, an influx of attentions have been focused on the sparse computation of LLMs. Specifically, the mixture of experts (MoEs) are the most popular technology in the community (McKinzie et al., 2024; Cai et al., 2024a; Xue et al., 2024), which dynamically activates part of expert networks for each token, thereby achieving trade-offs between capability and efficiency. For instance, MoE-LLaVA (Lin et al., 2024a) proposed a novel approach to convert a dense MLLM to a mixture-of-expert structure. However, these methods often require additional training costs to realize the adaptation to MLLMs. Orthogonal to MoE, Raposo et al. (2024) proposed the mixture of depths (MoDs) to dynamically allocate computations for each token. Compared to MoE, the main principle of MoD is to reduce the "activated tokens" instead of the "activated parameters". This paradigm has shown great potentials for the sparse computation of LLMs, but its potential on MLLM is still under exploration. Recently, token-based pruning methods have emerged as a new promising solution. The most representative one is the FastV (Chen et al., 2025), which directly deletes the unimportant visual tokens according to their attention scores, thus achieving significant computational savings without compromising performance. Orthogonal to these works, we are the first to explore MoDs on MLLMs, which can seamlessly realize sparse computations of exiting MLLMs on both visual and textual tokens.
|
| 50 |
+
|
| 51 |
+
# 3 PRELIMINARIES
|
| 52 |
+
|
| 53 |
+
We first recap the mechanism of Mixture of Experts (MoEs) and Mixture of Depths (MoDs).
|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
Stage-1: Vision-Language Alignment
|
| 57 |
+
Stage-2: Instruction Tuning
|
| 58 |
+
Figure 2: Illustration of our $\gamma$ -MoD adaptation on LLaVA-HR. $\gamma$ -MoD is a plug-and-play approach that can be directly applied in existing MLLMs. After vision-language alignment, $\gamma$ -MoD can replace most redundant layers with MoD ones via the rank-based redundancy estimation.
|
| 59 |
+
|
| 60 |
+
Mixture of experts. In particular, the main principle of MoE is to reduce the "activated parameters" in dense models. Existing MoE-based LLMs (Dai et al., 2024; Liu et al., 2024a; Lin et al., 2024a; Jiang et al., 2024) and MLLMs (Luo et al., 2024; Chen et al., 2024a; Liu et al., 2024d) often contain multiple FFN modules in their layers, also termed experts. During training and inference, only few experts are activated to participate in computations, thus retaining the trade-offs between performance and efficiency. Given input features $x \in \mathbb{R}^{l \times d}$ , MoE mechanism can be defined by
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
x = x + \sum_ {j = 1} ^ {k} \mathcal {D} _ {j} (x) R _ {j} (x). \tag {1}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
Here, $\mathcal{D}(\cdot)$ denotes the expert layer, i.e., FFN. $k$ is the number of activated experts, and $R_{j}(\cdot)$ is the corresponding routing function. In practice, top-k experts are selected according to their routing scores, where $k$ is much smaller than the total number of experts $K$ .
|
| 67 |
+
|
| 68 |
+
Mixture of depths. Different from MoEs, MoDs aim to improve the model efficiency via the reduction of "activated tokens". Compared to MoEs, the routing mechanism of MoDs performs on input tokens, and most tokens will directly skip the dense layer in MLLMs. Thus, MoDs can be written as
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
x _ {j} = \left\{ \begin{array}{l l} x _ {j} + \mathcal {D} \left(x _ {j}\right) R \left(x _ {j}\right) & \text {i f} R \left(x _ {j}\right) \geq \delta_ {s}, \\ x _ {j} & \text {i f} R \left(x _ {j}\right) < \delta_ {s}, \end{array} \right. \tag {2}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
where $x_{j} \in \mathbb{R}^{d}$ denotes the token vector in $x$ , and $\delta_{s}$ is a routing threshold. As defined in Eq. 2, inactive tokens will directly skip the layer $\mathcal{D}(\cdot)$ to save the computational cost.
|
| 75 |
+
|
| 76 |
+
Discussion. In existing MLLMs (Lin et al., 2024a), MoE is typically used to efficiently scale up the model size, while its computations are not directly reduced. In contrast, MoD can perform as a plug-and-play module to save the cost of a common dense layer, which is more significant to the efficient scenario. Unfortunately, the adaptation of MoD to existing MLLMs is still under-explored, and its practical use in LLMs also requires expensive pretraining.
|
| 77 |
+
|
| 78 |
+
# 4 METHOD
|
| 79 |
+
|
| 80 |
+
# 4.1 OVERVIEW
|
| 81 |
+
|
| 82 |
+
In this paper, we propose a novel method to efficiently deploy MoDs to existing MLLMs, namely $\gamma$ -MoD. The core principle of $\gamma$ -MoD is to identify redundant MLLM layers via a novel metric called
|
| 83 |
+
|
| 84 |
+

|
| 85 |
+
Figure 3: Visualization of ARank based on different tasks (left) and sample sizes (right). The horizontal axis represents the layer index of LLaVA-HR. The darker color indicates the larger ARank.
|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
|
| 89 |
+
rank of attention maps (ARank) and replace them with the proposed MoD layer. Therefore, the deployment of $\gamma$ -MoD in the given MLLM, i.e., $\mathcal{F}_{\mathrm{MLLM}}(\cdot)$ , can be formulated by
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\mathcal {F} _ {\mathrm {M L L M}} = \mathcal {G} _ {0} \circ \mathcal {G} _ {1} \circ \mathcal {G} _ {2} \dots \circ \mathcal {G} _ {n},
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
where $\mathcal{G}_i = \left\{ \begin{array}{ll}\mathcal{D}_i & \text{if}\tau (\mathcal{D}_i)\geq \delta_\tau ,\\ \mathcal{S}_i & \text{if}\tau (\mathcal{D}_i) < \delta_\tau . \end{array} \right.$ (3)
|
| 96 |
+
|
| 97 |
+
Here, $\mathcal{G}(\cdot)$ denotes the layer of the MLLM, where $S(\cdot)$ and $\mathcal{D}(\cdot)$ indicate the dense layer and its MoD alternative, respectively. $\tau (\cdot)$ is a function to estimate the redundancy of the given dense layer $\mathcal{D}_i$ and $\delta_{\tau}$ is a threshold. Given the architecture in Eq. 3, $\gamma$ -MoD aims to maximize the sparsity while maintaining the performance. Thus, the optimization objective of $\gamma$ -MoD can be written as:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\arg \min _ {\theta , \theta_ {r}} \mathcal {L} _ {o b j} \left(\mathcal {F} _ {\mathrm {M L L M}} \left(x ^ {0}; \theta\right)\right) + \sum_ {i = 1} ^ {k} \mathcal {L} _ {a u g} \left(R \left(x ^ {i}; \theta_ {r}\right)\right),
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
s.t. $\frac{1}{k\cdot d}\sum_{i = 1}^{k}\sum_{j = 1}^{d}\mathbb{I}_{R(x_j^i) < \delta_s} = \alpha .$ (4)
|
| 104 |
+
|
| 105 |
+
Here, $\mathcal{L}_{obj}$ and $\mathcal{L}_{aug}$ denote the auto-regressive loss and the routing loss for the router $R(\cdot)$ , respectively. $x^{i}$ is the input tokens of $i$ -th layer, and $\alpha$ is the pre-defined sparse target. $\mathbb{I}_{R(x_{j}^{i}) < \delta_{s}} \to \{0,1\}$ is the indicator function, which is equal to 1 when $R(x_{j}^{i}) < \delta_{s}$ . And $k$ is the number of layers, $d$ denotes the number of tokens per layer.
|
| 106 |
+
|
| 107 |
+
# 4.2 RANK-BASED REDUNDANCY ESTIMATION
|
| 108 |
+
|
| 109 |
+
The key challenge of $\gamma$ -MoD is how to identify the dense layer that should be converted to the MoD one. The original MoD-based LLM (Raposo et al., 2024) overcomes this issue by the hand-craft attempt, which is still sub-optimal and time-consuming. However, in existing MLLMs, the LLM is already pre-trained on large scale of corpus, which can intuitively provide sufficient knowledge to achieve the process automatically.
|
| 110 |
+
|
| 111 |
+
Motivated by this, we propose an innovative metric to estimate the token-wise redundancy of a layer in MLLM, namely rank of attention maps (ARank). In particular, given tokens $x^{i} \in \mathbb{R}^{l \times d}$ of $i$ -th layer, ARank is defined by the average rank of attention maps:
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\tau \left(x ^ {i}, \mathcal {D} _ {i}\right) = \frac {1}{n _ {h}} \sum_ {h = 1} ^ {n _ {h}} \operatorname {r a n k} \left(A _ {h}\right), \tag {5}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
where $A_{h} = (x^{i}W_{Q}^{h})(x^{i}W_{K}^{h})^{T}$
|
| 118 |
+
|
| 119 |
+
Here, $\mathrm{rank}(\cdot)$ denotes the rank calculation. $n_h$ is the number of attention heads. $A_h \in \mathbb{R}^{l \times l}$ is the attention map in $h$ -th head, and $W_Q^h \in \mathbb{R}^{d \times \frac{d}{h}}$ and $W_K^h \in \mathbb{R}^{d \times \frac{d}{h}}$ are the corresponding weights.
|
| 120 |
+
|
| 121 |
+
Theoretical analysis of ARank. In Eq. 5, attention map $A_{h}$ can well reflect the contribution of different tokens. Thus, $A_{h}$ with a low rank suggests that most tokens are less informative. To validate this, we conduct a SVD (G.H.Goulb & C.Reinsch, 1971) analysis for $A_{h}$ , which is written as
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
A _ {h} = \sum_ {i = 1} ^ {r} \sigma_ {i} u _ {i} v _ {i} ^ {T} = \sum_ {i = 1} ^ {r ^ {\prime}} \sigma_ {i} u _ {i} v _ {i} ^ {T} + \sum_ {i = r ^ {\prime} + 1} ^ {r} \sigma_ {i} u _ {i} v _ {i} ^ {T}, \tag {6}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $r$ is the rank of $A_{h}$ and $r' \ll r$ is a constant value. $\sigma_{i}, u_{i}$ and $v_{i}$ denote the $i$ -th single value, left single vector and right single vector of $A_{h}$ , respectively. As shown in Eq. 6, $A_{h}$ can be deposed to a matrix of rank $r'$ and additional information, i.e., $\sum_{i=r'+1}^{r} \sigma_{i} u_{i} v_{i}^{T}$ . Therefore, lower-rank attention map suggests higher redundancy, which implies that MoD can be deployed to skip most tokens.
|
| 128 |
+
|
| 129 |
+
Practical calculation of ARank. As defined in Eq. 5, it is still challenging to accurately calculate the ARank due to the variance of individual samples. Inspired by HRank (Lin et al., 2020), we estimate ARank using its expectation on a batch of samples. Different from HRank, we aim to estimate the layer redundancy by the rank of their attention maps, thus guiding the deployment of MoD. Specifically, ARank estimates layer redundancy based on the rank of attention maps, enabling its use in guiding the deployment of MoD. As shown in Fig. 3, we visualize the average ARank values of LLaVA-HR (Luo et al., 2024) across different input samples. These results demonstrate that the expected ARank remains largely consistent across tasks, indicating that a small batch size is sufficient for reliable computation. In our experiments, we set the sample size to 50 to balance computational efficiency and accuracy.
|
| 130 |
+
|
| 131 |
+
# 4.3 MIXTURE-OF-DEPTH ADAPTATION
|
| 132 |
+
|
| 133 |
+
To maximize the effectiveness of MoDs to existing MLLMs, we carefully investigate the micro design of MoDs, including the shared vision-language router and the masked routing learning.
|
| 134 |
+
|
| 135 |
+
Shared vision-language router. Conventional MoDs (Raposo et al., 2024) are designed for LLMs, so their routing is only performed on textual tokens. In MLLMs, such a strategy is sub-optimal due to the large redundancy of visual tokens (Jin et al., 2024; Kim et al., 2024). Therefore, the router of $\gamma$ -MoD, i.e., $R(\cdot)$ , aims to skip both visual and textual tokens, which is defined by
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
R (x) = \operatorname {s o f t m a x} \left(x W _ {R} + b _ {R}\right), \tag {7}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $x = \{q, a, t\}$ denotes the vision-language tokens, which consist of question tokens $q \in \mathbb{R}^{l_q \times d}$ , image tokens $a \in \mathbb{R}^{l_a \times d}$ and textual response tokens $t \in \mathbb{R}^{l_t \times d}$ . $W_R \in \mathbb{R}^{l \times 2}$ and $b_R \in \mathbb{R}^2$ are the weights and bias, respectively. Notably, we use a binary softmax function to produce the routing probability, where $R(x)^0$ denotes the probability of skipping. Based on Eq. 7, we further share the router parameters for all MoD layers, which is significant for the stable optimization. To explain, the shared router receives more gradients from different layers, greatly facilitating its convergence at the beginning of training.
|
| 142 |
+
|
| 143 |
+
Masked routing learning. During VL training, not all tokens contribute equally to the optimizing process. In particular, the skip of key tokens in the question, e.g., subject, will greatly hurt the generative training as the answer relies on these conditional elements. Therefore, we introduce a masked routing learning strategy to prevent these tokens from being dropped during training. In this case, the objective of the routing learning can be defined by
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
\mathcal {L} _ {a u g} (x) = \log \left(R (x) ^ {1} \cdot M _ {q}\right) \hat {R} + \log \left(1 - R (x) ^ {0} \cdot M _ {q}\right) (1 - \hat {R}). \tag {8}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
Here, $M_q \in \mathbb{R}^{l \times 1}$ denotes the binary mask, where the question tokens are assigned to 0. $\hat{R} \in \mathbb{R}$ is the one-hot vector, where the position with top-k routing scores are assigned to 1.
|
| 150 |
+
|
| 151 |
+
The training scheme. Typically, MLLM training is divided into two stages: vision-language (VL) alignment and instruction tuning. $\gamma$ -MoD is a plug-and-play adaptation method, which is deployed in the instruction tuning stage. Therefore, we can skip the VL alignment by directly using the well pre-trained projector. Then, $\gamma$ -MoD then evaluates layer redundancy using the ARank metric and replaces redundant layers with MoD layers. During instruction tuning, the routing parameters are jointly optimized via the routing and task objectives. Importantly, all other training configurations can remain consistent with the original MLLM setup, ensuring seamless integration of $\gamma$ -MoD.
|
| 152 |
+
|
| 153 |
+
# 5 EXPERIMENTS
|
| 154 |
+
|
| 155 |
+
# 5.1 DATASETS AND METRICS
|
| 156 |
+
|
| 157 |
+
We evaluate our $\gamma$ -MoD on five MLLM benchmarks, which includes POPE (Li et al., 2023), MME (Fu et al., 2024), MMB (Liu et al., 2024e), MMMU (Yue et al., 2024) and MM-Vet (Yu et al., 2023). We
|
| 158 |
+
|
| 159 |
+
Table 1: Comparison of different $\gamma$ -MoD configurations on LLaVA-HR. The default setting used in the table is colored in gray. "Q" and "A" refer to question and answer tokens.
|
| 160 |
+
|
| 161 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="2">GQA</td><td colspan="2">SQA</td><td colspan="2">MMMU</td><td colspan="2">TextVQA</td><td colspan="2">Average</td></tr><tr><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>TFlops</td></tr><tr><td>LLaVA-HR (Luo et al., 2024)</td><td>64.2</td><td>0%</td><td>67.9</td><td>0%</td><td>34.6</td><td>0%</td><td>67.1</td><td>0%</td><td>58.5</td><td>19.2</td></tr><tr><td>MoD layer:</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>All layers</td><td>45.9</td><td>38.2%</td><td>42.6</td><td>33.7%</td><td>25.9</td><td>32.8%</td><td>33.8</td><td>34.1%</td><td>37.1</td><td>12.3</td></tr><tr><td>1 MoD per 2 layers</td><td>57.8</td><td>19.1%</td><td>52.3</td><td>16.5%</td><td>26.9</td><td>16.6%</td><td>54.0</td><td>17.9%</td><td>47.8</td><td>16.1</td></tr><tr><td>2 MoDs per 3 layers</td><td>38.1</td><td>26.8%</td><td>46.5</td><td>24.6%</td><td>24.3</td><td>24.4%</td><td>42.1</td><td>24.9%</td><td>37.8</td><td>15.9</td></tr><tr><td>ARank-based deployment</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>36.8%</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td></tr><tr><td>Masked token:</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>None</td><td>63.2</td><td>52.0%</td><td>66.8</td><td>46.9%</td><td>33.9</td><td>47.0%</td><td>64.7</td><td>49.8%</td><td>57.2</td><td>10.7</td></tr><tr><td>Q</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>36.8%</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td></tr><tr><td>Q + A</td><td>62.8</td><td>38.8%</td><td>68.6</td><td>30.5%</td><td>34.7</td><td>35.4%</td><td>62.0</td><td>37.2%</td><td>57.0</td><td>13.0</td></tr><tr><td>Shared router:</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Not Share</td><td>60.6</td><td>55.8%</td><td>64.5</td><td>48.2%</td><td>32.1</td><td>48.9%</td><td>58.4</td><td>52.9%</td><td>53.9</td><td>10.3</td></tr><tr><td>Share</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td></tr><tr><td>Routing ratio:</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>17%</td><td>63.6</td><td>18.9%</td><td>68.9</td><td>15.5%</td><td>34.7</td><td>14.7%</td><td>66.1</td><td>16.5%</td><td>58.3</td><td>16.3</td></tr><tr><td>34%</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>36.8%</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td></tr><tr><td>51%</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td></tr><tr><td>68%</td><td>59.1</td><td>77.8%</td><td>70.1</td><td>73.5%</td><td>33.7</td><td>71.8%</td><td>58.4</td><td>74.1%</td><td>55.3</td><td>6.5</td></tr></table>
|
| 162 |
+
|
| 163 |
+
Table 2: Ablation study of $\gamma$ -MoD on LLaVA-HR. "Param", "Acc." and "Skip" indicate the parameter, accuracy, and skip ratio, respectively.
|
| 164 |
+
|
| 165 |
+
<table><tr><td rowspan="2">Methods</td><td rowspan="2">Param</td><td colspan="2">GQA</td><td colspan="2">SQA</td><td colspan="2">MMMU</td><td colspan="2">TextVQA</td><td rowspan="2">Acc.</td><td rowspan="2">Average TFlops</td><td rowspan="2">Skip</td></tr><tr><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td></tr><tr><td>LLaVA-HR (Luo et al., 2024)</td><td>7.4B</td><td>64.2</td><td>0%</td><td>67.9</td><td>0%</td><td>34.6</td><td>0%</td><td>67.1</td><td>0%</td><td>58.5</td><td>19.2</td><td>0%</td></tr><tr><td>+ Default MoD (Raposo et al., 2024)</td><td>7.4B</td><td>45.9</td><td>38.2%</td><td>42.6</td><td>33.7%</td><td>25.9</td><td>32.8%</td><td>33.8</td><td>34.1%</td><td>37.1</td><td>12.3</td><td>34.7%</td></tr><tr><td>+ ARank-based deployment (ours)</td><td>7.4B</td><td>63.2</td><td>52.0%</td><td>66.8</td><td>46.9%</td><td>33.9</td><td>47.0%</td><td>64.7</td><td>49.8%</td><td>57.2</td><td>10.7</td><td>48.9%</td></tr><tr><td>+ Masked routing learning (ours)</td><td>7.4B</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td><td>57.7%</td></tr></table>
|
| 166 |
+
|
| 167 |
+
report all the results in their default settings. In addition, we evaluate $\gamma$ -MoD on six image question answering benchmarks: VQAv2 (Goyal et al., 2017), VizWiz (Gurari et al., 2018), TextVQA (Singh et al., 2019), SQA (Lu et al., 2022), GQA (Hudson & Manning, 2019) and SEED (Ge et al., 2023). We report all the results in their default settings. For MME, we report the perception score.
|
| 168 |
+
|
| 169 |
+
# 5.2 IMPLEMENTATION DETAILS
|
| 170 |
+
|
| 171 |
+
For all models, pre-training is conducted on LCS-558K dataset (Liu et al., 2024b), which includes high-quality 558k image-text pairs. For instruction tuning, we follow LLaVA-1.5 (Liu et al., 2024b) to use 665k vision-language instruction data. To deploy $\gamma$ -MoD to MLLMs, ARank is calculated to identify redundant layers after the pre-training stage. For all models, the fourth largest ARank value is used as the threshold for converting dense layers to MoD ones. During instruction tuning, the coefficient for the routing loss is set to 0.01. The remaining settings are kept the same with LLaVA-HR (Luo et al., 2024) and LLaVA (Liu et al., 2024b), including learning rate, training epochs, optimizer and datasets, etc.
|
| 172 |
+
|
| 173 |
+
# 5.3 EXPERIMENTAL RESULTS
|
| 174 |
+
|
| 175 |
+
# 5.3.1 QUANTITATIVE ANALYSIS
|
| 176 |
+
|
| 177 |
+
Comparison with different MoD configurations. In Tab. 1, we first compare different settings of MoD on LLaVA-HR (Luo et al., 2024). From this table, the first observation is that directly converting all layers to MoD ones leads to worse results, e.g., $33.8\%$ on TextVQA. Besides, although the hand-craft strategy performs much better, its performance declines are still obvious, e.g., $-10.7\%$ of 1 MoD per 2 layers on average. These results confirm the challenges of adopting MoDs to MLLMs.
|
| 178 |
+
|
| 179 |
+
Table 3: Results of $\gamma$ -MoD on different MLLM architectures and model scales. $\gamma$ -MoD-0.3 and $\gamma$ -MoD-0.5 denote the routing ratio of $30\%$ and $50\%$ , respectively.
|
| 180 |
+
|
| 181 |
+
<table><tr><td rowspan="2">Methods</td><td rowspan="2">Param</td><td colspan="2">GQA</td><td colspan="2">SQA</td><td colspan="2">MMMU</td><td colspan="2">TextVQA</td><td rowspan="2">Acc.</td><td rowspan="2">Average TFlops</td><td rowspan="2">Skip</td></tr><tr><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td><td>Acc.</td><td>Skip</td></tr><tr><td colspan="13">MLLM architecture:</td></tr><tr><td>LLaVA</td><td>7B</td><td>62.0</td><td>0%</td><td>66.8</td><td>0%</td><td>34.3</td><td>0%</td><td>58.2</td><td>0%</td><td>55.3</td><td>10.7</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>7B</td><td>61.1</td><td>34.1%</td><td>64.7</td><td>29.4%</td><td>35.4</td><td>29.8%</td><td>56.3</td><td>30.7%</td><td>54.4</td><td>7.7</td><td>31.0%</td></tr><tr><td>+γ-MoD-0.5</td><td>7B</td><td>41.4</td><td>60.9%</td><td>62.3</td><td>54.8%</td><td>31.0</td><td>53.6%</td><td>42.9</td><td>56.2%</td><td>44.4</td><td>5.3</td><td>56.4%</td></tr><tr><td>LLaVA-HR</td><td>7B</td><td>64.2</td><td>0%</td><td>67.9</td><td>0%</td><td>34.6</td><td>0%</td><td>67.1</td><td>0%</td><td>58.5</td><td>19.2</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>7B</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>36.8%</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td><td>37.9%</td></tr><tr><td>+γ-MoD-0.5</td><td>7B</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td><td>57.7%</td></tr><tr><td>Mini-Gemini-HD</td><td>7B</td><td>62.9</td><td>0%</td><td>69.6</td><td>0%</td><td>36.8</td><td>0%</td><td>66.5</td><td>0%</td><td>59.0</td><td>60.2</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>7B</td><td>62.1</td><td>37.1%</td><td>69.0</td><td>34.6%</td><td>34.1</td><td>36.4%</td><td>66.4</td><td>36.6%</td><td>57.9</td><td>39.4</td><td>36.2%</td></tr><tr><td>+γ-MoD-0.5</td><td>7B</td><td>62.2</td><td>59.2%</td><td>70.4</td><td>56.8%</td><td>33.9</td><td>58.6%</td><td>67.0</td><td>57.7%</td><td>58.4</td><td>27.8</td><td>58.1%</td></tr><tr><td colspan="13">Model scales:</td></tr><tr><td>LLaVA-HR</td><td>7B</td><td>64.2</td><td>0%</td><td>67.9</td><td>0%</td><td>34.6</td><td>0%</td><td>67.1</td><td>0%</td><td>58.5</td><td>19.2</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>7B</td><td>63.7</td><td>40.7%</td><td>68.5</td><td>35.9%</td><td>35.6</td><td>\( {36.8}\% \)</td><td>65.3</td><td>38.2%</td><td>58.3</td><td>12.6</td><td>37.9%</td></tr><tr><td>+γ-MoD-0.5</td><td>7B</td><td>63.1</td><td>60.3%</td><td>67.9</td><td>56.9%</td><td>34.7</td><td>56.6%</td><td>64.9</td><td>57.1%</td><td>57.6</td><td>9.3</td><td>57.1%</td></tr><tr><td>LLaVA-HR</td><td>13B</td><td>64.8</td><td>0%</td><td>68.1</td><td>0%</td><td>36.7</td><td>0%</td><td>68.1</td><td>0%</td><td>59.4</td><td>37.1</td><td>0%</td></tr><tr><td>+γ-MoD-0.3</td><td>13B</td><td>64.5</td><td>38.1%</td><td>70.5</td><td>33.1%</td><td>37.8</td><td>32.5%</td><td>67.0</td><td>36.0%</td><td>60.0</td><td>25.1</td><td>34.9%</td></tr><tr><td>+γ-MoD-0.5</td><td>13B</td><td>64.8</td><td>58.8%</td><td>69.5</td><td>52.2%</td><td>35.8</td><td>53.8%</td><td>66.8</td><td>55.4%</td><td>59.2</td><td>18.4</td><td>55.1%</td></tr></table>
|
| 182 |
+
|
| 183 |
+
Table 4: Training and inference efficiency of $\gamma$ -MoD on LLaVA-HR. The inference efficiency is tested on an NVIDIA A100 GPU, which is the average value of GQA, SQA, MMMU, and TextVQA.
|
| 184 |
+
|
| 185 |
+
<table><tr><td>Methods</td><td>Training Time ↓</td><td>Inference Throughput ↑</td><td>Inference Memory ↓</td><td>Inference TFlops ↓</td><td>Avg. Acc. ↑</td></tr><tr><td>LLaVA-HR</td><td>20.7 h</td><td>4.7 samples/s</td><td>19 G</td><td>19.2</td><td>58.5</td></tr><tr><td>+γ-MoD-0.3</td><td>15.4 h</td><td>5.9 samples/s</td><td>15 G</td><td>12.6</td><td>58.3</td></tr><tr><td>+γ-MoD-0.5</td><td>14.3 h</td><td>7.2 samples/s</td><td>14 G</td><td>9.3</td><td>57.6</td></tr><tr><td>Gains</td><td>-31.0%</td><td>+53.2%</td><td>-26.3%</td><td>-51.6%</td><td>-0.9%</td></tr></table>
|
| 186 |
+
|
| 187 |
+
However, after employing our ARank-based strategy, the efficiency of LLaVA-HR is greatly increased while the performance is well maintained.
|
| 188 |
+
|
| 189 |
+
In Tab. 1, we also validate different micro-designs for deploying MoD on MLLM, including the masked routing learning, the shared router and the routing ratio. From these comparisons, we first see that the masked learning strategy is much beneficial to the optimization of $\gamma$ -MoD, providing up to $+1.7\%$ gains on SQA. In addition, we also find that the router sharing strategy plays a significant role in $\gamma$ -MoD. After removing this strategy, model performance will obviously drop on TextVQA by $-6.5\%$ . For routing threshold, we observe that the adaptive thresholds perform better while the default one is more efficient. Finally, we validate the impact of different routing ratio on LLaVA-HR. From results we can see that model performance can be retained under relatively small routing ratios, i.e., $17\%$ and $34\%$ . When routing ratio is increased to $51\%$ , model performance drops slightly from $58.3\%$ to $57.6\%$ on average. However, the benefit of efficiency is still notable, i.e., $-51.5\%$ Flops.
|
| 190 |
+
|
| 191 |
+
Ablation studies. To validate contributions of each design in $\gamma$ -MoD, we conduct ablation study in Tab. 2. From this table, we can see that the default MoD will cause obvious performance degeneration, resulting up to $-25.3\%$ on SQA. In stark contrast, with our ARank-based deployment, the average performance of LLaVA-HR is improved from $37.1\%$ to $57.6\%$ , and the computational sparsity also boosts from $34.7\%$ to $48.9\%$ . Such comparison confirms that not all layers can be converted to MoD layers, and ARank is critical to identify the redundant ones. In addition, the use of masked routing learning can further benefit the model training, providing $+0.8\%$ on MMMU and $+0.2\%$ on TextVQA, respectively. These results further confirm the effectiveness of $\gamma$ -MoD.
|
| 192 |
+
|
| 193 |
+
# 5.3.2 COMPARISON WITH EXISTING MLLMS
|
| 194 |
+
|
| 195 |
+
Generalizations of $\gamma$ -MoD on different MLLMs. In Tab. 3, we also evaluate the generalization capability of $\gamma$ -MoD across different MLLM architectures and model scales. In particular, $\gamma$ -MoD with $30\%$ routing ratio demonstrates great trade-off between performance and efficiency on LLaVA. When
|
| 196 |
+
|
| 197 |
+
Table 5: Comparison with quantization and pruning methods. "Speed", "Prefilling" and "Next-token" indicate the throughput (samples/s), prefetching time (seconds) and next-token time (seconds), respectively. For MMMU, models predict one option without the need of the next-token time.
|
| 198 |
+
|
| 199 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="3">MMMU</td><td colspan="4">MM-Vet</td></tr><tr><td>Acc.</td><td>Speed</td><td>Prefilling</td><td>Acc.</td><td>Speed</td><td>Prefilling</td><td>Next-token</td></tr><tr><td>LLaVA-v1.5-7B (Liu et al., 2024b)</td><td>34.3</td><td>9.1</td><td>0.11</td><td>30.5</td><td>0.53</td><td>0.20</td><td>1.8</td></tr><tr><td>+ AWQ-4bit (Lin et al., 2024b)</td><td>34.8</td><td>11.1</td><td>0.09</td><td>26.5</td><td>0.57</td><td>0.16</td><td>1.6</td></tr><tr><td>+ FastV(K=2,R=50%) (Chen et al., 2025)</td><td>33.9</td><td>11.6</td><td>0.09</td><td>28.8</td><td>0.68</td><td>0.17</td><td>1.3</td></tr><tr><td>+ γ-MoD-0.3</td><td>35.4</td><td>12.5</td><td>0.08</td><td>29.1</td><td>0.76</td><td>0.19</td><td>1.1</td></tr></table>
|
| 200 |
+
|
| 201 |
+
Table 6: Comparison with existing dense and sparse MLLMs on 9 benchmarks. Speed is the average samples per second of GQA, SQA, MMMU, and TextVQA.
|
| 202 |
+
|
| 203 |
+
<table><tr><td rowspan="2">Methods</td><td rowspan="2">Param.</td><td colspan="2">Image Question</td><td colspan="2">Answering</td><td colspan="5">Benchmark Toolkit</td><td rowspan="2">Speed</td></tr><tr><td>TextVQA</td><td>\( VQA^{v2} \)</td><td>GQA</td><td>\( SQA^1 \)</td><td>POPE</td><td>MME</td><td>MMB</td><td>MMMU</td><td>MM-Vet</td></tr><tr><td colspan="12">Dense Model:</td></tr><tr><td>I-80B (Laurençon et al., 2024)</td><td>65B</td><td>-</td><td>60.0</td><td>45.2</td><td>-</td><td>-</td><td>-</td><td>54.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>InstructBLIP (Dai et al., 2023)</td><td>14B</td><td>50.7</td><td>-</td><td>49.5</td><td>63.1</td><td>78.9</td><td>1212.8</td><td>-</td><td>-</td><td>25.6</td><td>-</td></tr><tr><td>VILA (Lin et al., 2024c)</td><td>7B</td><td>64.4</td><td>79.9</td><td>62.3</td><td>68.2</td><td>85.5</td><td>1533.0</td><td>68.9</td><td>-</td><td>34.9</td><td>-</td></tr><tr><td>Qwen-VL (Bai et al., 2023b)</td><td>10B</td><td>63.8</td><td>78.8</td><td>59.3</td><td>67.1</td><td>-</td><td>1487.6</td><td>38.2</td><td>-</td><td>-</td><td>4.6</td></tr><tr><td>LLaVA-1.5 (Liu et al., 2024b)</td><td>7B</td><td>58.2</td><td>78.5</td><td>62.0</td><td>66.8</td><td>85.9</td><td>1510.7</td><td>64.3</td><td>34.3</td><td>30.5</td><td>8.1</td></tr><tr><td>LLaVA-HR (Luo et al., 2024)</td><td>7B</td><td>67.1</td><td>81.9</td><td>64.2</td><td>67.9</td><td>87.6</td><td>1554.9</td><td>66.8</td><td>35.2</td><td>31.2</td><td>4.7</td></tr><tr><td>LLaVA-HR (Luo et al., 2024)</td><td>13B</td><td>68.1</td><td>82.3</td><td>64.8</td><td>68.1</td><td>87.8</td><td>1540.9</td><td>64.5</td><td>36.3</td><td>34.8</td><td>3.1</td></tr><tr><td colspan="12">Sparse Model:</td></tr><tr><td>MoE-LLaVA (Lin et al., 2024a)</td><td>3B</td><td>50.1</td><td>76.7</td><td>60.3</td><td>62.6</td><td>85.7</td><td>1318.2</td><td>60.2</td><td>-</td><td>26.9</td><td>8.5</td></tr><tr><td>MoE-LLaVA (Lin et al., 2024a)</td><td>5B</td><td>51.4</td><td>77.6</td><td>61.4</td><td>68.5</td><td>86.3</td><td>1423.0</td><td>65.2</td><td>-</td><td>34.3</td><td>5.6</td></tr><tr><td>γ-MoD-LLaVA(ours)</td><td>7B</td><td>56.3</td><td>77.6</td><td>61.1</td><td>64.7</td><td>86.0</td><td>1342.1</td><td>59.4</td><td>35.4</td><td>29.8</td><td>10.3</td></tr><tr><td>γ-MoD-LLaVA-HR(ours)</td><td>7B</td><td>64.9</td><td>80.6</td><td>63.1</td><td>67.9</td><td>87.3</td><td>1516.0</td><td>63.4</td><td>34.7</td><td>31.5</td><td>7.2</td></tr><tr><td>γ-MoD-LLaVA-HR(ours)</td><td>13B</td><td>66.8</td><td>82.0</td><td>64.8</td><td>69.5</td><td>86.7</td><td>1515.4</td><td>65.2</td><td>35.8</td><td>34.0</td><td>4.8</td></tr></table>
|
| 204 |
+
|
| 205 |
+
the routing ratio increases to $51\%$ , the performance of LLaVA decreases significantly, suggesting its relatively low tolerance to high routing ratio. For LLaVA-HR, the $\gamma$ -MoD-0.3 configuration maintains high accuracy $63.7\%$ on GQA and $65.3\%$ on TextVQA while reducing TFlops by $34\%$ and skipping $37.9\%$ of tokens. When the routing ratio increases to $51\%$ , the token skip rate improves to $57.7\%$ , though a slight drop in accuracy is observed e.g., $-0.6\%$ on GQA. These comparisons also reflect that high-resolution MLLMs often have a higher token redundancy than low-resolution ones. Similar observations can also be witnessed on Mini-Gemini-HD Li et al. (2024b). When scaling to larger models, such as the LLaVA-HR-13B, our method continues to perform strongly. The $\gamma$ -MoD-0.3 configuration yields a $38.1\%$ skip rate and 25.1 TFlops with minimal accuracy loss, suggesting that larger models are better suited to handle higher skip rates while maintaining performance. Even increasing the routing ratio to $51\%$ the competitive accuracy is still maintained, e.g., $64.8\%$ on GQA and $66.8\%$ on TextVQA.
|
| 206 |
+
|
| 207 |
+
Efficiency analysis. In Tab. 4, we compare the training and inference efficiency of $\gamma$ -MoD on LLaVA-HR. From these results, we observe comprehensive advantages of $\gamma$ -MoD in terms of training and inference inference. In particular, $\gamma$ -MoD-0.3 already achieves an obvious improvement in efficiency, i.e., $-26\%$ training time and $-35\%$ TFlops. However, the performance drops of $\gamma$ -MoD-0.3 can be almost ignorable, i.e., $-0.2\%$ average accuracy. When increasing the routing ratio to $50\%$ tokens, the inference throughput of $\gamma$ -MoD-0.5 further improves by up to $+53.2\%$ . Despite the significant efficiency gains, the performance drop of $\gamma$ -MoD is still acceptable, i.e., $-1.5\%$ average accuracy. These results well validate the obvious benefits of $\gamma$ -MoD in efficiency.
|
| 208 |
+
|
| 209 |
+
Comparison with existing methods. In Tab. 6, we compare MLLMs deployed by $\gamma$ -MoD with both dense and sparse models on 9 benchmarks. From it we can see $\gamma$ -MoD can maintain the competitive performance on all benchmarks, while achieving significant efficiency gains on LLaVA and LLaVA-HR. Specifically, $\gamma$ -MoD-LLaVA-HR (13B) can reach similar inference speed as LLaVA-HR (7B) while outperforming the latter on multiple benchmarks, e.g., $+3.0\%$ on MMVet. In addition, compared to existing sparse models, i.e., MoE-LLaVA (Lin et al., 2024a), our approaches also achieve better trade-off between performance and efficiency. In particular, $\gamma$ -MoD-LLaVA-HR (7B) outperforms MoE-LLaVA (5B) on 5 of 8 benchmarks, e.g., $+93$ scores on MME, while still maintaining better efficiency, i.e., $+28\%$ gains on inference speed. It is worth noting that although the parameter scale of MoE-LLaVA is smaller, its routing calculation often leads to higher latency.
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
Figure 4: Visualization of routing results for different MoD layers. "Q", "I" and "A" denote the question, image and response, respectively. The skipped tokens in sub-figure (b) are colored in gray.
|
| 213 |
+
|
| 214 |
+
In Tab. 5, we also compare $\gamma$ -MoD with common inference-time acceleration methods Chen et al. (2025); Lin et al. (2024b). Compared to these methods, $\gamma$ -MoD can better maintain the model performance on MMMU and MMVet. In terms of efficiency, $\gamma$ -MoD shows greater advantages in accelerating the next-token generation for MLLMs, providing up to $+31\%$ speedups on MMVet. Overall, these comparisons further confirm the effectiveness and efficiency of $\gamma$ -MoD.
|
| 215 |
+
|
| 216 |
+
# 5.3.3 QUALITATIVE ANALYSIS
|
| 217 |
+
|
| 218 |
+
In Fig. 4, we visualize the routing ratio and the skipped content in both images and the corresponding conversations. The first observation from Fig. 4.(a) is that question, image, and response tokens are routed in a consistent pattern: question tokens are mostly kept, while image tokens are the most redundant, and thus routed the most. In Fig. 4.(b), we visualize the skipped content on images and texts. The gray portions of the images represent tokens that are skipped by the router, indicating that many regions in the images, such as background pixels, are redundant and do not provide critical information for understanding. Routing out these tokens allows the model to focus more on the white portions, which highlight the image regions or text parts that the model pays closer attention to. For example, in the middle of the first row with the IQ test example, the model can concentrate and spending more computations on the arithmetic and geometric aspects of the image.
|
| 219 |
+
|
| 220 |
+
# 6 CONCLUSION
|
| 221 |
+
|
| 222 |
+
In this paper, we aim to overcome the efficiency problem in multimodal large language models (MLLMs) from the perspective of "activated token". In particular, we present $\gamma$ -MoD, a novel mixture-of-depth adaptation strategy for computationally efficient MLLM. In $\gamma$ -MoD, an innovative metric is introduced to identify the redundant layers for MoD deployment, namely rank of attention maps (ARank). Moreover, $\gamma$ -MoD also maximizes its benefit to MLLMs via two designs called shared vision-language router and masked routing learning. With these novel designs, $\gamma$ -MoD can obviously reduce computational costs of existing MLLMs while maintaining their performance. Extensive experiments on 9 multimodal benchmarks validate the efficiency and effectiveness. Besides, the great generalization ability of $\gamma$ -MoD is also validated across different MLLMs.
|
| 223 |
+
|
| 224 |
+
Acknowledgments. This work was supported by the National Natural Science Foundation of China (No. 623B2088) and the China Postdoctoral Science Foundation (No. 2024M761548).
|
| 225 |
+
|
| 226 |
+
# REFERENCES
|
| 227 |
+
|
| 228 |
+
Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024.
|
| 229 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
|
| 230 |
+
Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 2022.
|
| 231 |
+
Ebtesam Almazrouei, Hamza Alobeidli, Abdulaziz Alshamsi, Alessandro Cappelli, Ruxandra Cojocaru, Mérouane Debbah, Étienne Goffinet, Daniel Hesslow, Julien Launay, Quentin Malartic, et al. The falcon series of open language models. arXiv preprint arXiv:2311.16867, 2023.
|
| 232 |
+
Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023a.
|
| 233 |
+
Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023b.
|
| 234 |
+
Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts. arXiv preprint arXiv:2407.06204, 2024a.
|
| 235 |
+
Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024b.
|
| 236 |
+
Jun Chen, Deyao Zhu, Xiaogian Shen, Xiang Li, Zechun Liu, Pengchuan Zhang, Raghuraman Krishnamoorthi, Vikas Chandra, Yunyang Xiong, and Mohamed Elhoseiny. Minigpt-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478, 2023.
|
| 237 |
+
Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. In European Conference on Computer Vision, pp. 19-35. Springer, 2025.
|
| 238 |
+
Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, Ji Ma, Jiaqi Wang, Xiaoyi Dong, Hang Yan, Hewei Guo, Conghui He, Botian Shi, Zhenjiang Jin, Chao Xu, Bin Wang, Xingjian Wei, Wei Li, Wenjian Zhang, Bo Zhang, Pinlong Cai, Licheng Wen, Xiangchao Yan, Min Dou, Lewei Lu, Xizhou Zhu, Tong Lu, Dahua Lin, Yu Qiao, Jifeng Dai, and Wenhai Wang. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites, 2024a.
|
| 239 |
+
Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 24185-24198, 2024b.
|
| 240 |
+
Damai Dai, Chengqi Deng, Chenggang Zhao, RX Xu, Huazuo Gao, Deli Chen, Jiashi Li, Wangding Zeng, Xingkai Yu, Y Wu, et al. Deepseekmoe: Towards ultimate expert specialization in mixture-of-experts language models. arXiv preprint arXiv:2401.06066, 2024.
|
| 241 |
+
Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023.
|
| 242 |
+
|
| 243 |
+
Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamllm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499, 2023.
|
| 244 |
+
Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, Yunsheng Wu, and Rongrong Ji. Mme: A comprehensive evaluation benchmark for multimodal large language models, 2024.
|
| 245 |
+
Yaroslav Fyodorov, Yoad Winter, and Nissim Francez. A natural logic inference system. In Proceedings of the 2nd workshop on inference in computational semantics (ICoS-2), 2000.
|
| 246 |
+
Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model, 2023.
|
| 247 |
+
G.H.Goulb and C.Reinsch. Singular value decomposition and least squares solutions. In Handbook for Automatic Computation: Volume II: Linear Algebra, pp. 134-151. Springer, 1971.
|
| 248 |
+
Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering, 2017.
|
| 249 |
+
Danna Gurari, Qing Li, Abigale J. Stangl, Anhong Guo, Chi Lin, Kristen Grauman, Jiebo Luo, and Jeffrey P. Bigham. Vizwiz grand challenge: Answering visual questions from blind people, 2018.
|
| 250 |
+
Drew A. Hudson and Christopher D. Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering, 2019.
|
| 251 |
+
Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024.
|
| 252 |
+
Yizhang Jin, Jian Li, Yexin Liu, Tianjun Gu, Kai Wu, Zhengkai Jiang, Muyang He, Bo Zhao, Xin Tan, Zhenye Gan, et al. Efficient multimodal large language models: A survey. arXiv preprint arXiv:2405.10739, 2024.
|
| 253 |
+
Minchul Kim, Shangqian Gao, Yen-Chang Hsu, Yilin Shen, and Hongxia Jin. Token fusion: Bridging the gap between token pruning and token merging. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 1383-1392, 2024.
|
| 254 |
+
Hugo Laurençon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander Rush, Douwe Kiela, et al. Obelics: An open web-scale filtered dataset of interleaved image-text documents. Advances in Neural Information Processing Systems, 36, 2024.
|
| 255 |
+
Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a.
|
| 256 |
+
Yanwei Li, Yuechen Zhang, Chengyao Wang, Zhisheng Zhong, Yixin Chen, Ruihang Chu, Shaoteng Liu, and Jiaya Jia. Mini-gemini: Mining the potential of multi-modality vision language models. arXiv preprint arXiv:2403.18814, 2024b.
|
| 257 |
+
Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models, 2023.
|
| 258 |
+
Bin Lin, Zhenyu Tang, Yang Ye, Jiaxi Cui, Bin Zhu, Peng Jin, Jina Huang, Junwu Zhang, Yatian Pang, Munan Ning, and Li Yuan. Moe-llava: Mixture of experts for large vision-language models, 2024a.
|
| 259 |
+
Ji Lin, Jiaming Tang, Haotian Tang, Shang Yang, Wei-Ming Chen, Wei-Chen Wang, Guangxuan Xiao, Xingyu Dang, Chuang Gan, and Song Han. Awq: Activation-aware weight quantization for llm compression and acceleration, 2024b.
|
| 260 |
+
Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 26689-26699, 2024c.
|
| 261 |
+
|
| 262 |
+
Mingbao Lin, Rongrong Ji, Yan Wang, Yichen Zhang, Baochang Zhang, Yonghong Tian, and Ling Shao. Hrank: Filter pruning using high-rank feature map. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 1529-1538, 2020.
|
| 263 |
+
Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024a.
|
| 264 |
+
Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 26296-26306, 2024b.
|
| 265 |
+
Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024c.
|
| 266 |
+
Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024d.
|
| 267 |
+
Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, Kai Chen, and Dahua Lin. Mmbench: Is your multi-modal model an all-around player?, 2024e.
|
| 268 |
+
Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering, 2022.
|
| 269 |
+
Gen Luo, Yiyi Zhou, Yuxin Zhang, Xiawu Zheng, Xiaoshuai Sun, and Rongrong Ji. Feast your eyes: Mixture-of-resolution adaptation for multimodal large language models. arXiv preprint arXiv:2403.03003, 2024.
|
| 270 |
+
Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, et al. Mm1: Methods, analysis & insights from multimodal llm pre-training. arXiv preprint arXiv:2403.09611, 2024.
|
| 271 |
+
Denis Paperno, Germán Kruszewski, Angeliki Lazaridou, Quan Ngoc Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fernández. The lambada dataset: Word prediction requiring a broad discourse context. arXiv preprint arXiv:1606.06031, 2016.
|
| 272 |
+
David Raposo, Sam Ritter, Blake Richards, Timothy Lillicrap, Peter Conway Humphreys, and Adam Santoro. Mixture-of-depths: Dynamically allocating compute in transformer-based language models. arXiv preprint arXiv:2404.02258, 2024.
|
| 273 |
+
Hanoona Rasheed, Muhammad Maaz, Salman Khan, and Fahad S. Khan. Llava++: Extending visual capabilities with llama-3 and phi-3, 2024.
|
| 274 |
+
Siva Reddy, Danqi Chen, and Christopher D Manning. Coqa: A conversational question answering challenge. Transactions of the Association for Computational Linguistics, 7:249-266, 2019.
|
| 275 |
+
Zhiqiang Shen, Tianhua Tao, Liquun Ma, Willie Neiswanger, Joel Hestness, Natalia Vassilieva, Daria Soboleva, and Eric Xing. Slimpajama-dc: Understanding data combinations for llm training. arXiv preprint arXiv:2309.10818, 2023.
|
| 276 |
+
Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 8317-8326, 2019.
|
| 277 |
+
Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14398-14409, 2024.
|
| 278 |
+
Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024.
|
| 279 |
+
|
| 280 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.
|
| 281 |
+
Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024.
|
| 282 |
+
Fuzhao Xue, Zian Zheng, Yao Fu, Jinjie Ni, Zhangwei Zheng, Wangchunshu Zhou, and Yang You. Openmoe: An early effort on open mixture-of-experts language models. arXiv preprint arXiv:2402.01739, 2024.
|
| 283 |
+
Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023.
|
| 284 |
+
Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities, 2023.
|
| 285 |
+
Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Renliang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi, 2024.
|
| 286 |
+
Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024.
|
| 287 |
+
Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593, 2019.
|
2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0665c0689dcc596db183b809c3e24d25aa009c5ed0bd5c1e095d730334eba7a5
|
| 3 |
+
size 869336
|
2025/$_gamma-$MoD_ Exploring Mixture-of-Depth Adaptation for Multimodal Large Language Models/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/3c396736-487a-4b9c-ac0f-42da9c476758_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/3c396736-487a-4b9c-ac0f-42da9c476758_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/3c396736-487a-4b9c-ac0f-42da9c476758_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90be1019b84619a6bb02a2d7dba516010ee34202989d1e3d58176a0e24cb7c88
|
| 3 |
+
size 10179159
|
2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/full.md
ADDED
|
@@ -0,0 +1,529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# X-SAMPLE CONTRASTIVE LOSS: IMPROVING CONTRASTIVE LEARNING WITH SAMPLE SIMILARITY GRAPHS
|
| 2 |
+
|
| 3 |
+
Vlad Sobal $^{1,2}$ Mark Ibrahim $^{1}$ Randall Balestriero $^{3}$ Vivien Cabannes $^{1}$ Diane Bouchacourt $^{1}$ Pietro Astolfi $^{1}$ Kyunghyun Cho $^{2,4,5}$ Yann LeCun $^{1,2}$ Meta FAIR $^{2}$ New York Univeristy Brown University Genentech CIFAR
|
| 4 |
+
|
| 5 |
+
# ABSTRACT
|
| 6 |
+
|
| 7 |
+
Learning good representations involves capturing the diverse ways in which data samples relate. Contrastive loss—an objective matching related samples—underlies methods from self-supervised to multimodal learning. Contrastive losses, however, can be viewed more broadly as modifying a similarity graph to indicate how samples should relate in the embedding space. This view reveals a shortcoming in contrastive learning: the similarity graph is binary, as only one sample is the related positive sample. Crucially, similarities across samples are ignored. Based on this observation, we revise the standard contrastive loss to explicitly encode how a sample relates to others. We experiment with this new objective, called $\mathbb{X}$ -Sample Contrastive, to train vision models based on similarities in class or text caption descriptions. Our study spans three scales: ImageNet-1k with 1 million, CC3M with 3 million, and CC12M with 12 million samples. The representations learned via our objective outperform both contrastive self-supervised and vision-language models trained on the same data across a range of tasks. When training on CC12M, we outperform CLIP by $0.6\%$ on both ImageNet and ImageNet Real. Our objective appears to work particularly well in lower-data regimes, with gains over CLIP of $17.2\%$ on ImageNet and $18.0\%$ on ImageNet Real when training with CC3M. Finally, our objective encourages the model to learn representations that separate objects from their attributes and backgrounds, with gains of $3.3 - 5.6\%$ over CLIP on ImageNet9. The proposed method takes a step towards developing richer learning objectives for understanding sample relations in foundation models.
|
| 8 |
+
|
| 9 |
+
# 1 INTRODUCTION
|
| 10 |
+
|
| 11 |
+
Contrastive loss underlies methods from self-supervised learning (SSL) to multimodal learning (Radford et al., 2021; Chen et al., 2020; Oord et al., 2018). In SSL, contrastive learning encourages the model to associate a sample with another view of the sample created using hand-crafted data augmentation—this related view is the positive sample. Other samples are then pushed away as negative, unrelated samples in the models' representation space. Contrastive losses also play a crucial role in multimodal models such as CLIP (Radford et al., 2021), where the model associates an image with its text caption in representation space. Here contrastive learning designates the caption and image representations as positives while all other text-image pairs are designated as negatives.
|
| 12 |
+
|
| 13 |
+
More broadly, contrastive losses can be seen as constructing a similarity graph to indicate how samples should relate in the model's representation space (Cabannes et al., 2023; Zhang et al., 2023; HaoChen et al., 2021; Wang et al., 2023; 2022). This view reveals a shortcoming in contrastive learning: the similarity graph is binary, as only one sample is the related positive sample. Crucially, similarities across samples which contain precious signals about how aspects of one sample may relate to another, are ignored, thereby limiting the quality of the learned representations. For the example shown in fig. 1a, contrastive learning treats each image independently, without explicitly encoding similarities between the images depicting different pets. While prior work showed that data samples can be implicitly linked by multi-step connections in the augmentations graph when using contrastive learning (Wang et al., 2022; 2023), we explore how to capture such similarities explicitly by modifying the standard contrastive objective. We show that, in contrast to modeling
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
(a)
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
(b)
|
| 20 |
+
Figure 1: a) The diagram of $\mathbb{X}$ -CLR. $\mathbb{X}$ -CLR objective learns representations of images with the help of a soft relationship graph. The graph can be built based on accompanying data, e.g. taxonomy for biological data. In our experiments, we use captioned images, and build similarities based on caption similarities. b) Python-style pseudo-code of $\mathbb{X}$ -CLR with similarity based on text captions.
|
| 21 |
+
|
| 22 |
+
those connections implicitly, explicitly incorporating similarities between visually different but semantically related samples makes representations more robust.
|
| 23 |
+
|
| 24 |
+
To account for similarities across samples, we first remove the binary negative vs. positive designations in standard contrastive loss. We introduce instead a similarity graph with continuous scalars capturing the extent to which two samples are related. Consider the example in fig. 1, where the two dog images have a high similarity while the dog and cat images have a more moderate similarity. We build our objective by incorporating the soft similarity targets into the InfoNCE objective (Oord et al., 2018) in a manner similar to objectives used for distillation (Hinton et al., 2015; Wu et al., 2023). In contrast to distillation, we do not focus on training a model using outputs of another model, but instead focus on building a meaningful similarities graph and incorporating it into the objective. The proposed target similarity graph does not have to come from another model, and can be inferred from any additional metadata. We experiment with this new objective, called X-Sample Contrastive Learning (X-CLR), by training vision models using a graph of similarities inferred from class or text caption descriptions found in common datasets. Our study spans three training dataset scales from 1 million samples with high-quality labels from ImageNet (Deng et al., 2009) to 3 and 12 million noisy image-text caption pairs from CC3M and CC12M (Sharma et al., 2018).
|
| 25 |
+
|
| 26 |
+
We find that compared to contrastive baseline methods trained on the same data, representations trained using $\mathbb{X}$ -CLR outperform contrastive training on a range of tasks from standard classification to tasks involving the decomposition of objects from their attributes and backgrounds. When training on CC12M, we outperform CLIP by $0.6\%$ on both ImageNet and ImageNet Real (Beyer et al., 2020). Furthermore, $\mathbb{X}$ -CLR yields representations that separate objects from their attributes and backgrounds well, with gains of $3.4 - 4.9\%$ over CLIP on ImageNet9 (Xiao et al., 2020). We also find for fine-grained disambiguation of object attributes, the quality of labels used to infer the similarity graph is much more important than the data quantity. Compared to noisier web caption data, we find $\mathbb{X}$ -CLR trained on 1 million higher quality class labels outperforms representations learned via standard contrastive CLIP trained $12\times$ more data. Finally, we find $\mathbb{X}$ -CLR appears to work particularly well in lower-data regimes, with gains over CLIP of $16.8\%$ on ImageNet and $18.1\%$ on ImageNet Real when training with CC3M. In short, we find representations learned using $\mathbb{X}$ -CLR generalize better, decompose objects from their attributes and backgrounds, and are more data-efficient.
|
| 27 |
+
|
| 28 |
+
# Our contributions are:
|
| 29 |
+
|
| 30 |
+
1. We revisit the graph similarity perspective of contrastive losses, revealing that standard losses encode a sparse similarity matrix that treats other possibly related samples as negatives;
|
| 31 |
+
2. We propose a new $\mathbb{X}$ -CLR loss that explicitly accounts for soft similarities across samples;
|
| 32 |
+
3. We experiment with this objective across three levels of data scale from 1-12 million samples, and find that the representations learned via $\mathbb{X}$ -CLR:
|
| 33 |
+
(a) Generalize better on standard classification tasks with consistent gains over contrastive baselines trained on the same data. For example, when training on CC12M we outperform CLIP by $0.6\%$ on both ImageNet and ImageNet Real.
|
| 34 |
+
|
| 35 |
+
(b) Disambiguate aspects of images such as attributes and backgrounds more reliably, with gains of $3.3 - 5.6\%$ over CLIP on background robustness benchmarks for ImageNet.
|
| 36 |
+
(c) Finally, we find $\mathbb{X}$ -CLR learns more efficiently when data is scarce, with gains of $17.2\%$ on ImageNet and $18.0\%$ on ImageNet Real when pretraining on the smaller 3 million sample CC3M dataset.
|
| 37 |
+
|
| 38 |
+
The proposed solution takes a step towards developing richer learning objectives for understanding sample relations in foundation models to encode richer, more generalizable representations.
|
| 39 |
+
|
| 40 |
+
# 2 RELATED WORK
|
| 41 |
+
|
| 42 |
+
Contrastive learning Various contrastive objectives have been proposed over the years (Chopra et al., 2005; Schroff et al., 2015). More recently, the InfoNCE objective (Oord et al., 2018) has been the most popular choice for self-supervised methods, e.g. SimCLR (Chen et al., 2020) and MoCo (He et al., 2020). InfoNCE objective has also been successfully used to learn vision-language models using CLIP (Radford et al., 2021). The basis of those objectives is to make positive pairs have similar representations, while the negatives, which typically are just all other elements in a batch, should have a different representation. In its original form, InfoNCE is binary, meaning it only works with positive and negative pairs, and does not support degrees of similarity. The positive pairs are usually two augmentations of the same sample, which makes well-tuned augmentations crucial for good performance (Ryali et al., 2021). Dwibedi et al. (2021) estimate positives using nearest neighbors in the latent space instead and therefore can use weaker augmentations, while Caron et al. (2020) use cluster assignment. A few methods have proposed modifications wherein multiple positive pairs are supported, e.g., Khosla et al. (2020) groups positive by class labels, Hoffmann et al. (2022) propose using WordNet (Fellbaum, 1998) hierarchy to define ranked positive samples, and Tian et al. (2024) uses a generative model to obtain multiple positives for the same concept. HaoChen et al. (2021) also look at contrastive learning through the lens of graphs, and propose a novel spectral objective. Wang et al. (2023) draw connections between contrastive learning and message passing on the augmentation graph, while Wang et al. (2022) show that aggressive data-augmentations like cropping can connect samples of the same class. Zhang et al. (2023) show that contrastive learning objective implicitly learns the graph in which the samples are connected via augmentations in the case of SimCLR or via captions in the case of CLIP. However, in that paradigm only visually similar samples or samples with a common caption get connected in the graph, while in our method the samples are connected based on provided graph, which can connect visually different yet semantically related samples.
|
| 43 |
+
|
| 44 |
+
Soft targets Using soft targets provides more learning signal to the model, possibly making it learn better and faster. This has been explored with distillation by Hinton et al. (2015). Soft targets have also been used with InfoNCE in the context of distillation in ReSSL (Zheng et al., 2021) and SCE (Denize et al., 2023), where the target cross-sample similarity comes from the teacher model. (Feng and Patras, 2023) use soft targets from self-distillation to train an image encoder with coarse labels. Similarly, Fini et al. (2023a) compute soft targets via latent clustering and apply it to semi-supervised learning. Shen et al. (2023) use patch-mixing to train ViT image encoders to model inter-sample relationships. Andonian et al. (2022) proposes to use soft targets for CLIP (Radford et al., 2021) training, and calculates the targets via self-distillation. Wu et al. (2023) use a similar objective to ours to distill the CLIP model into a smaller one. Further soft CLIP objectives are explored by Fini et al. (2023b), who apply label smoothing to obtain soft targets, and Gao et al. (2024), who estimate soft targets by comparing fine-grained image information. Finally, Huang et al. (2024) train CLIP with non-zero cross-sample similarities computed based on pre-trained uni-modal models for text and vision. In this study, we build on the work of Cabannes et al. (2023) who propose a unifying framework to view SSL and supervised learning objectives as learning with different underlying similarity graphs. We take inspiration from the soft targets literature and propose using a soft graph. Unlike distillation methods, where the soft targets typically come from a teacher model, the targets in $\mathbb{X}$ -CLR can originate from any source, not necessarily another model. We try different similarity graph sources, including ones not based on the outputs of another model, and show that we can build a better graph than ones commonly used in contrastive learning (see table 2).
|
| 45 |
+
|
| 46 |
+

|
| 47 |
+
Figure 2: Sample similarity adjacency matrices of existing methods vs. our $\mathbb{X}$ -Sample Contrastive similarity loss (right). We show pairwise similarities of 20 samples belonging to 4 classes. Similarity of 1 means the samples are identical, 0 - they are completely unrelated. In case of self-supervised learning, none of the inter-sample relationships are modelled (left). Supervised learning relies on the labels to group samples of the same class together (center). $\mathbb{X}$ -CLR models inter-class relationships by associating cats with dogs and pianos with guitars.
|
| 48 |
+
|
| 49 |
+
# 3 UNDERSTANDING CONTRASTIVE LOSSES VIA SIMILARITY GRAPHS
|
| 50 |
+
|
| 51 |
+
# 3.1 X-SAMPLE GRAPHS
|
| 52 |
+
|
| 53 |
+
Throughout this study, a similarity graph denotes a graph in which the nodes represent data samples, and edges - similarity relationships. Given the number of data samples in the dataset $N$ , a graph is expressed through its symmetric adjacency matrix $\pmb{G} \in \mathbb{R}^{N \times N}$ , the semantic relation between inputs $i$ and $j$ being encoded in the real entry $\pmb{G}_{i,j}$ . In fig. 2, we show graphs of different learning paradigms. SSL does not rely on labels, but on positive pairs/tuples/viewes generated at each epoch. Let us denote by $V$ the number of positive views generated, commonly $V = 2$ for positive pairs, and denote by $E$ the training epochs. In that case, the original $N$ input samples are transformed into $N \times V \times E$ "augmented" samples
|
| 54 |
+
|
| 55 |
+
$$
|
| 56 |
+
\boldsymbol {X} ^ {(A)} \triangleq [ \underbrace {\mathcal {T} (\boldsymbol {x} _ {1}) , \ldots , \mathcal {T} (\boldsymbol {x} _ {1})} _ {\text {r e p e a t e d V \times E t i m e s}}, \ldots , \mathcal {T} (\boldsymbol {x} _ {N}), \ldots , \mathcal {T} (\boldsymbol {x} _ {N}) ] ^ {\top},
|
| 57 |
+
$$
|
| 58 |
+
|
| 59 |
+
where each $\mathcal{T}$ is a random input transformation with its own random parameters. The corresponding graph is given by:
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\boldsymbol {G} _ {i, j} ^ {(\mathrm {s s l})} = \mathbf {1} _ {\{\lfloor i / V E \rfloor = \lfloor j / V E \rfloor \}}, \tag {1}
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
where the associated similarity graph captures if two samples were generated as augmentations of the same original input. Such graphs $\pmb{G}$ , as defined by eq. (1), are the ones used as targets in common SSL methods, as formalized below denoting $Z \triangleq f_{\theta}(X) \in \mathbb{R}^{N \times D}$ . Here, $f_{\theta}$ is the encoder, and $D$ is the encoding dimension.
|
| 66 |
+
|
| 67 |
+
Theorem 1 ((Cabannes et al., 2023)). VICReg (Bardes et al., 2021), SimCLR (Chen et al., 2020), and BarlowTwins (Zbontar et al., 2021) losses can be expressed in terms of the graph $G$ (1)
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
\mathcal {L} _ {\mathrm {V I C} ^ {2}} (\boldsymbol {Z}; \boldsymbol {G}) = \| \boldsymbol {Z} \boldsymbol {Z} ^ {T} - \boldsymbol {G} \| _ {F} ^ {2},
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\mathcal {L} _ {\mathrm {S i m C L R}} (\boldsymbol {Z}; \boldsymbol {G}) = - \sum_ {i, j \in [ N ]} \boldsymbol {G} _ {i, j} \log \left(\frac {\exp (\tilde {\boldsymbol {z}} _ {i} ^ {\top} \tilde {\boldsymbol {z}} _ {j})}{\sum_ {k \in [ N ]} \exp (\tilde {\boldsymbol {z}} _ {i} ^ {\top} \tilde {\boldsymbol {z}} _ {k})}\right),
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\mathcal {L} _ {\mathrm {B T}} (\boldsymbol {Z}; \boldsymbol {G}) = \left\| \tilde {\boldsymbol {Z}} ^ {\top} \boldsymbol {G} \tilde {\boldsymbol {Z}} - I \right\| ^ {2},
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
where $\tilde{z} \triangleq z / \|z\|$ and $\tilde{Z}$ the column normalized $Z$ so that each column has unit norm.
|
| 82 |
+
|
| 83 |
+
In our study, we will focus on contrastive learning, i.e., SimCLR family of losses. We will demonstrate how to move away from the ad-hoc graph $G$ from eq. (1).
|
| 84 |
+
|
| 85 |
+
# 3.2 REVISITING CONTRASTIVE LOSSES WITH SIMILARITY GRAPHS: $\mathbb{X}$ -CLR
|
| 86 |
+
|
| 87 |
+
We introduce the soft cross-sample similarity to the widely used InfoNCE objective (Oord et al., 2018). We note that the proposed framework isn't necessarily limited to InfoNCE-based methods and can potentially be integrated into non-contrastive objectives such as BYOL, SimSiam, or VICReg (Grill et al., 2020; Chen and He, 2020; Bardes et al., 2021), although we leave the extensions to future work. In SimCLR (Chen et al., 2020), given a batch of $N_{b}$ images, each image is augmented twice, so each sample has a true positive. The $2N_{b}$ images are then encoded representations $z$ . Then:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
p _ {i, j} = \frac {\exp (\sin (z _ {i} , z _ {j}) / \tau)}{\sum_ {k = 1} ^ {2 N} \mathbf {1} _ {k \neq i} \exp (\sin (z _ {i} , z _ {k}) / \tau)}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\mathcal {L} _ {\mathrm {S i m C L R}} = \frac {1}{2 N _ {b}} \sum_ {i = 1} ^ {2 N _ {b}} H (\mathbb {1} _ {i ^ {\prime}}, p _ {i})
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
where $H$ is the cross-entropy, and $\mathbb{1}_{i'}$ is the one-hot distribution where all the probability mass is assigned to the index of the positive sample corresponding to $i$ , and sim is the cosine similarity. Intuitively, we are training the model to classify positive examples in a batch, so the similarity $p$ should be high only for the true positive. We introduce the soft objective by replacing the hard positive distribution $\mathbb{1}_{i'}$ with a distribution $s_i$ . Or, in terms of graphs, we replace the graph from the eq. (1) with a soft graph where connection strengths can be any number in [0, 1], and, similarly, the distribution $s_i$ and does not have to be one-hot. Considering the example of fig. 1, we want a photo of a dog to have a representation similar to that of another photo of a dog, somewhat similar to the representation of a cat photo, and different from the representation of a photo of a mug. Given that distribution $s$ , we can plug it in directly:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\mathcal {L} _ {\mathbb {X} - \mathrm {C L R}} = \frac {1}{2 N _ {b}} \sum_ {i = 1} ^ {2 N _ {b}} H \left(s _ {i}, p _ {i}\right)
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
There are many possible ways to obtain this distribution $s$ . We could use the meta-data associated with the dataset. In our particular case, we utilize a trained text encoder $f_{\mathrm{text}}$ , and encode the text provided with each image to obtain a representation, which is then used to calculate similarity between samples $i$ and $j$ using the cosine similarity. Those pairwise similarities describe the soft graph:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\boldsymbol {G} _ {i, j} ^ {\mathrm {(s o f t)}} = \operatorname {s i m} (f _ {\mathrm {t e x t}} (c _ {i}), f _ {\mathrm {t e x t}} (c _ {j}))
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
Where $c_{i}$ is the caption associated with the $i$ -th sample. Note that the similarities $G^{(\mathrm{soft})}$ do not have to come from another model, and can be inferred based on any additional information available, making this type of objective applicable to other datasets and modalities. The last step before plugging the similarities into the objective is converting them to a valid probability distribution using softmax:
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
s _ {i, j} = \frac {\exp (G _ {i , j} ^ {(\mathrm {s o f t})} / \tau_ {s})}{\sum_ {k = 1} ^ {2 N _ {b}} \exp (G _ {i , k} ^ {(\mathrm {s o f t})} / \tau_ {s})}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
$\tau_{s}$ is a separate hyperparameter from $\tau$ in the softmax to calculate the learned similarities. Higher values of $\tau_{s}$ put more weight on the 'soft' positives, while lower values in the limit recover the original SimCLR objective. For more details about this, see appendix A.9.
|
| 116 |
+
|
| 117 |
+
# 4 EXPERIMENTS
|
| 118 |
+
|
| 119 |
+
# 4.1 EXPERIMENTAL SETUP
|
| 120 |
+
|
| 121 |
+
We test $\mathbb{X}$ -CLR on three datasets of varying scale: ImageNet (Deng et al., 2009) (1M), and Conceptual Captions 3M and 12M (Sharma et al., 2018). We blur faces in all datasets before training our models. We use the Sentence Transformer (Reimers and Gurevych, 2019) as the text encoder to construct similarities unless stated otherwise. For ImageNet experiments, we generate captions by using the template "a photo of a _" to generate captions out of class names. In our experiments with the conceptual captions dataset (Sharma et al., 2018), we use the captions as is. For experiments on
|
| 122 |
+
|
| 123 |
+
Table 1: Accuracy of X-CLR and a range of baselines on a set of classification benchmarks. We freeze the encoder, and train a linear layer on top to probe the quality of representations. We test on ImageNet classification, as well as on a set of robustness benchmarks.
|
| 124 |
+
|
| 125 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Soft loss</td><td rowspan="2">Uses labels</td><td rowspan="2">ImageNet</td><td rowspan="2">ImageNet Real</td><td colspan="2">Background Decomp.</td><td rowspan="2">ObjectNet</td><td colspan="2">MIT States</td></tr><tr><td>Same Class</td><td>Mixed</td><td>Objects</td><td>Attributes</td></tr><tr><td>SCE</td><td>✓</td><td>✗</td><td>71.3</td><td>78.7</td><td>61.7</td><td>58.4</td><td>20.2</td><td>44.5</td><td>31.0</td></tr><tr><td>ReSSL (1 crop)</td><td>✓</td><td>✗</td><td>69.4</td><td>76.9</td><td>56.3</td><td>53.2</td><td>18.3</td><td>44.5</td><td>31.2</td></tr><tr><td>VICReg</td><td>✗</td><td>✗</td><td>72.4</td><td>79.0</td><td>60.8</td><td>56.8</td><td>20.5</td><td>43.5</td><td>26.9</td></tr><tr><td>Barlow Twins</td><td>✗</td><td>✗</td><td>72.8</td><td>80.0</td><td>62.7</td><td>59.4</td><td>21.6</td><td>45.9</td><td>31.7</td></tr><tr><td>SimCLR</td><td>✗</td><td>✗</td><td>63.4</td><td>67.8</td><td>44.7</td><td>38.9</td><td>12.1</td><td>40.9</td><td>29.1</td></tr><tr><td>SupCon</td><td>✗</td><td>✓</td><td>74.3</td><td>79.7</td><td>64.0</td><td>59.1</td><td>24.4</td><td>45.6</td><td>30.8</td></tr><tr><td>X-CLR</td><td>✓</td><td>✓</td><td>75.6</td><td>81.5</td><td>66.6</td><td>62.7</td><td>27.5</td><td>45.9</td><td>31.1</td></tr></table>
|
| 126 |
+
|
| 127 |
+
ImageNet, we follow SupCon (Khosla et al., 2020) and use AutoAugment (Cubuk et al., 2018). All experiments on the ImageNet dataset were run for 100 epochs with 1024 batch size. The learning rate was set to 0.075 for ImageNet models. For experiments on CC3M and CC12M, we used the standard SimCLR augmentations, and a learning rate of 0.1. The rest of the settings were kept the same. Although SimCLR and SupCon both benefit from longer training, we haven't experimented with training for more epochs due to computational constraints. For more details, see appendix A.7.
|
| 128 |
+
|
| 129 |
+
In all our experiments, to isolate the effect of our learning objective, we fix the backbone architecture to be a ResNet-50 (He et al., 2015) model as this is the most widely studied, with optimized hyperparameters, for standard contrastive self-supervised learning (Chen et al., 2020). We use the same architecture for CLIP's vision encoder and take advantage of already optimized publicly available checkpoints provided by OpenCLIP (Ilharco et al., 2021) for CC12M. Since no comparable public checkpoint is available for CC3M, we train our own model, see appendix A.6.
|
| 130 |
+
|
| 131 |
+
We evaluate all models across a suite of benchmarks to gauge how well representations generalize in terms of classification performance. We test on ImageNet classification with standard as well as with ImageNet Real labels, on ImageNet-9 to test robustness to background change (we refer to this as 'Background Decomposition' in our results), on ObjectNet to test robustness to context and view change, and on MIT-States objects and attributes classification to test how well model captures object states. We use linear probing on top of frozen representations for all evaluations, including CLIP. For more details and dataset examples see appendix A.8.
|
| 132 |
+
|
| 133 |
+
# 4.2 X-SAMPLE CONTRASTIVE WITH WELL-LABELED SAMPLES
|
| 134 |
+
|
| 135 |
+
We first experiment with $\mathbb{X}$ -Sample Contrastive using the well-labeled ImageNet dataset to understand the effect of incorporating similarities across samples in the training objective. We compare $\mathbb{X}$ -Sample Contrastive (X-CLR) to a range of baselines: SupCon (Khosla et al., 2020) which uses labels; SCE (Denize et al., 2023) and ReSSL (Zheng et al., 2021) which use self-distillation with soft targets; and to SimCLR (Chen et al., 2020), VICReg (Bardes et al., 2021) and BarlowTwins (Zbontar et al., 2021), which are SSL methods for learning from images.
|
| 136 |
+
|
| 137 |
+
We find in table 1 representations learned via $\mathbb{X}$ -CLR with Sentence Transformer similarities (Reimers and Gurevych, 2019) improve on standard classification performance, with gains of $12.2\%$ relative to SimCLR and $1.3\%$ relative to Supervised Contrastive on ImageNet. We find similar gains when evaluated on revised labels from ImageNet Real of $13.7\%$ and $1.8\%$ , respectively. Finally, we find by capturing similarities across samples, representations learned via $\mathbb{X}$ -CLR are more capable of disambiguating objects from backgrounds and attributes with gains on ImageNet-9 (Xiao et al., 2020) and ObjectNet (Barbu et al., 2019). To confirm statistical significance of the results, we report standard deviations over 5 seeds for the models we trained in table 7 (SimCLR, SupCon, and $\mathbb{X}$ -CLR). For the remaining ImageNet models, we took published pre-trained encoders, and are limited to one seed.
|
| 138 |
+
|
| 139 |
+
Effect of the similarity graph. We investigate how the choice of similarity graph affects the performance. To calculate the similarity between samples, we use similarity of the captions embedding when using various text encoders: CLIP (Radford et al., 2021), LLama2 (Touvron et al., 2023), Sentence Transformer (Reimers and Gurevych, 2019). We also experiment with the similarity defined by the distance in WordNet hierarchy (Fellbaum, 1998). Baselines include SimCLR Chen et al. (2020), where the similarity graph is the binary augmentation graph, and Supervised Contrastive (Khosla
|
| 140 |
+
|
| 141 |
+
Table 2: The effect of the similarity source on the model performance. We train X-CLR with similarities coming from a variety of sources, and evaluate the representations with linear probing.
|
| 142 |
+
|
| 143 |
+
<table><tr><td rowspan="2">Similarity source</td><td rowspan="2">ImageNet</td><td rowspan="2">ImageNet Real</td><td colspan="3">Background Decomposition</td></tr><tr><td>Same Class</td><td>Mixed</td><td>ObjectNet</td></tr><tr><td>Augmentation graph (SimCLR)</td><td>63.4</td><td>67.8</td><td>44.7</td><td>38.9</td><td>12.1</td></tr><tr><td>True class graph (SupCon)</td><td>74.3</td><td>79.7</td><td>64.0</td><td>59.1</td><td>24.4</td></tr><tr><td>Sentence Transformer (X-CLR)</td><td>75.6</td><td>81.5</td><td>66.6</td><td>62.7</td><td>27.5</td></tr><tr><td>CLIP text encoder</td><td>74.4</td><td>80.6</td><td>67.5</td><td>64.2</td><td>24.5</td></tr><tr><td>LLama2 text encoder</td><td>40.9</td><td>45.8</td><td>38.3</td><td>36.0</td><td>4.3</td></tr><tr><td>Distance in WordNet hierarchy</td><td>68.3</td><td>74.9</td><td>55.7</td><td>52.1</td><td>21.2</td></tr></table>
|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
(a)
|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
(b)
|
| 150 |
+
Figure 3: (a) X-Sample Contrastive learning is data efficient with ImageNet pretraining. We outperform SimCLR in low data regimes and match Supervised Contrastive trained on ground truth labels at varying levels of data scarcity. (b) KNN performance ImageNet. X-CLR outperforms other methods with KNN probing for a range of values of K. (c) Sensitivity of X-Sample Contrastive to temperature. We benchmark our method trained with different values of temperature $\tau_{s}$ on ImageNet.
|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
(c)
|
| 154 |
+
|
| 155 |
+
et al., 2020), where the similarity graph has connections only between samples of the same class. We emphasize that $\mathbb{X}$ -CLR is not a model distillation method (Wu et al., 2023) as the similarities can come from any source, including any metadata, e.g., WordNet hierarchy in our case or taxonomy of organisms for biological data. For more details regarding this experiment, see appendix A.1. We show results in table 2. Overall, we find that using Sentence Transformer is the best option for classification performance, and use this similarity in all the following experiments.
|
| 156 |
+
|
| 157 |
+
Can we improve contrastive learning under data scarcity? To answer this question, we train all three models SimCLR, SupCon, and $\mathbb{X}$ -CLR by varying the number of samples seen for each class in ImageNet. We find $\mathbb{X}$ -CLR, by incorporating class labels and how they relate, is able to learn representations with comparable performance to SupCon trained with ground truth class labels and outperform SimCLR even when few training samples are available per class as shown in fig. 3a.
|
| 158 |
+
|
| 159 |
+
# 4.3 X-SAMPLE CONTRASTIVE LEARNING WITH NOISY MULTIMODAL SAMPLES
|
| 160 |
+
|
| 161 |
+
Contrastive loss also plays a pivotal role in multimodal vision-language models such as CLIP. The contrastive training objective matches noisy caption-image pairs. Here we experiment with $\mathbb{X}$ -Sample Contrastive loss by using the noisy captions to learn similarities across samples. We compare both SimCLR as a standard contrastive model and CLIP trained on the same caption-image data across two levels of scale: 3 and 12 million samples from CC3M and CC12M.
|
| 162 |
+
|
| 163 |
+
We find incorporating $\mathbb{X}$ -CLR loss leads to representations with higher classification accuracy and disambiguation of objects from their attributes and backgrounds. With CC12M training shown in table 3, $\mathbb{X}$ -Sample Contrastive learning outperforms SimCLR by $0.5\%$ and CLIP by $0.6\%$ with CC12M with similar gains for ImageNet Real. We also find $\mathbb{X}$ -CLR training can better disambiguate object foreground from backgrounds, with gains of $0.6 - 1.5\%$ over SimCLR and $3.3 - 5.6\%$ over CLIP.
|
| 164 |
+
|
| 165 |
+
We find learning similarites across samples with $\mathbb{X}$ -CLR leads to more considerable gains when less data is available. When trained on CC3M, $\mathbb{X}$ -CLR outperforms SimCLR by $1.2\%$ and CLIP by $17.2\%$ on ImageNet, with similar gains on ImageNet Real as shown in table 3. We find $\mathbb{X}$ -CLR training can
|
| 166 |
+
|
| 167 |
+
Table 3: X-Sample Contrastive learning performance with CC3M and CC12M training. We train X-CLR on conceptual caption images with text caption similarities, and compare to CLIP and SimCLR. SimCLR does not use text data and is only trained on images.
|
| 168 |
+
|
| 169 |
+
<table><tr><td rowspan="2">Data</td><td rowspan="2">Method</td><td rowspan="2">ImageNet</td><td rowspan="2">ImageNet Real</td><td colspan="3">Background Decomposition</td></tr><tr><td>Same Class</td><td>Mixed</td><td>ObjectNet</td></tr><tr><td rowspan="3">CC3M</td><td>SimCLR</td><td>57.0</td><td>64.0</td><td>24.4</td><td>18.9</td><td>10.8</td></tr><tr><td>CLIP</td><td>41.0</td><td>47.6</td><td>12.5</td><td>10.6</td><td>7.8</td></tr><tr><td>X-CLR</td><td>58.2</td><td>65.6</td><td>26.7</td><td>20.3</td><td>11.5</td></tr><tr><td rowspan="3">CC12M</td><td>SimCLR</td><td>58.9</td><td>66</td><td>24.6</td><td>19.8</td><td>12.7</td></tr><tr><td>CLIP</td><td>58.8</td><td>66.1</td><td>20.5</td><td>17.1</td><td>11.9</td></tr><tr><td>X-CLR</td><td>59.4</td><td>66.7</td><td>26.1</td><td>20.4</td><td>13.4</td></tr></table>
|
| 170 |
+
|
| 171 |
+
Table 4: X-CLR performance when used to finetune pretrained models. We take a SimCLR checkpoint and fine-tune it using X-CLR objective for 10 epochs. We then evaluate the original checkpoint and the fine-tuned one using linear probing.
|
| 172 |
+
|
| 173 |
+
<table><tr><td rowspan="2"></td><td rowspan="2">ImageNet</td><td rowspan="2">ImageNet Real</td><td colspan="2">Background Decomposition</td><td rowspan="2">ObjectNet</td></tr><tr><td>Same Class</td><td>Mixed</td></tr><tr><td>SimCLR</td><td>63.4</td><td>67.8</td><td>44.7</td><td>38.9</td><td>12.1</td></tr><tr><td>+ X-CLR finetuning</td><td>66.5</td><td>74.4</td><td>53.9</td><td>50.0</td><td>17.4</td></tr></table>
|
| 174 |
+
|
| 175 |
+
more considerably disambiguate object foregrounds from backgrounds compared to CLIP when less training data is available, with gains of $10.3 - 14.2\%$ over CLIP.
|
| 176 |
+
|
| 177 |
+
# 4.4 X-SAMPLE CONTRASTIVE CAN BE USED TO FINETUNE PRETRAINED BACKBONES
|
| 178 |
+
|
| 179 |
+
We validate whether $\mathbb{X}$ -CLR can be used as a finetuning objective for pretrained backbones, given the growing abundance of publicly available model checkpoints. Here, we evaluate a pretrained SimCLR model by finetuning for 10 epochs on ImageNet with $\mathbb{X}$ -CLR instead of the original SimCLR contrastive objective. We see in table 4 finetuning with $\mathbb{X}$ -CLR improves classification performance on ImageNet by $3.1\%$ and on ImageNet Real by $6.6\%$ . Furthermore, we see by relating samples during the finetuning stage, $\mathbb{X}$ -CLR can disambiguate object foregrounds from backgrounds with grains of $9.2 - 11.1\%$ on ImageNet-9 as well as improvements on natural object transformations from ObjectNet with a gain of $5.3\%$ after finetuning.
|
| 180 |
+
|
| 181 |
+
# 4.5 X-SAMPLE CONTRASTIVE OBJECTIVE INTRODUICES ONLY MINIMAL COMPUTATIONAL OVERHEAD
|
| 182 |
+
|
| 183 |
+
Both for ImageNet and conceptual captions datasets, we don't run the text encoder for each sample we see, and instead precompute the similarity values. For more details, see appendix A.7. Avoiding running the text encoder during model training avoids the extra overhead at the price of some preprocessing. Pre-processing takes less than 2 hours for CC12M when using one GPU, about 30 minutes for CC3M, and less than 5 minutes for ImageNet. To further analyze how much overhead there is, we compare the average time it takes to process one batch for SimCLR and $\mathbb{X}$ -CLR. The results are shown in table 5. Overall, we didn't notice any significant difference in the amount of time it takes to train models with the $\mathbb{X}$ -CLR objective compared to the regular contrastive objective. To train on ImageNet, we used 8 Nvidia V100s, and each run took about 30 hours. With the same setup, CC3M runs took about 50 hours, and CC12M runs took roughly 9 days.
|
| 184 |
+
|
| 185 |
+
# 5 ANALYZING REPRESENTATIONS LEARNED WITH $\mathbb{X}$ -CLR
|
| 186 |
+
|
| 187 |
+
KNN Clustering. To confirm the representations learned via $\mathbb{X}$ -CLR also work well for downstream tasks with non-linear decision boundaries, we perform evaluation using the common K-nearest neighbor (KNN) protocol. The results shown in fig. 3b demonstrate $\mathbb{X}$ -CLR outperforms both
|
| 188 |
+
|
| 189 |
+
Table 5: Analyzing the computation overhead of the X-Sample Contrastive objective during training. We measure the mean and standard deviation of the time needed to process one batch update for X-CLR and SimCLR. X-CLR introduces nearly no computational overhead.
|
| 190 |
+
|
| 191 |
+
<table><tr><td>Method</td><td>Seconds per batch ImageNet</td><td>Seconds per batch CC</td></tr><tr><td>SimCLR</td><td>0.866 ± 0.008</td><td>0.874 ± 0.034</td></tr><tr><td>X-CLR</td><td>0.866 ± 0.010</td><td>0.877 ± 0.032</td></tr></table>
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
Figure 4: Visualizing pairwise similarities SupCon (Khosla et al., 2020) objective does not encourage non-zero similarity between samples of different classes (left), while $\mathbb{X}$ -CLR target similarities take into account semantic closeness within categories such as dogs or types of balls (center). On the right, we see that the trained model successfully learns the soft similarity. For more graphs, see fig. 6.
|
| 195 |
+
|
| 196 |
+
SimCLR and SupCon baselines across a range of choices for $K$ . We also show KNN results for models trained on conceptual captions in appendix A.4.
|
| 197 |
+
|
| 198 |
+
Analyzing the learned graph from $\mathbb{X}$ -Sample Contrastive representations. Here we examine whether the learned representations from $\mathbb{X}$ -Sample Contrastive capture semantically meaningful similarities. To do so, we select four groups of three ImageNet classes: felines, dogs, types of balls, and musical instruments. For each pair of classes, we then compare the representation similarities using cosine similarity. A higher average pairwise similarity indicates the model's latent representations encode the classes similarly. In fig. 4 we show the graph of similarities learned after training with $\mathbb{X}$ -CLR on ImageNet. We find that the image encoder successfully captures the similarity within the class groups. Additionally, we repeat the analysis of the learned similarity graph in (Zhang et al., 2023). In that work, the authors also view contrastive learning through the lens of graphs, and introduce metrics to analyze the quality of the learned graph. In appendix A.3, table 9, we find that $\mathbb{X}$ -CLR learns a better structured graph compared to CLIP, SimCLR, and SupCon, with samples better connected within the class, and less connected for unrelated classes. We also plot T-SNE (Van der Maaten and Hinton, 2008) projections of $\mathbb{X}$ -CLR, SimCLR, and SupCon representations trained on ImageNet in fig. 13 and find that $\mathbb{X}$ -CLR learns well-structured embeddings.
|
| 199 |
+
|
| 200 |
+
The effect of softmax temperature, and inferred similarity graph. We show the sensitivity of $\mathbb{X}$ -CLR to temperature $\tau_{s}$ in fig. 3c on ImageNet. In the limit, when temperature goes to 0, we recover Supervised Contrastive method for ImageNet, or SimCLR in case of conceptual captions, see appendix A.9 for a more detailed explanation. With low temperature, the similarity is 1 only if the captions are exactly the same. As the temperature increases, more weight is put on the soft positives compared to the true positives (i.e. augmentations of the same sample). With high temperature, our method is unstable as too much emphasis is put on the soft positive examples compared to the true positives. We find that the value of 0.1 strikes the optimal balance and provides an improvement over pure Supervised Contrastive objective, while still emphasizing true positives enough. We show how $\tau_{s}$ changes the objective in fig. 5b. We see that more probability mass is put on the true positvie for low temperatures, with ImageNet on average having lower true positive weight than CC3M samples. In fig. 5a, we see that on average, similarity between samples in ImageNet is much higher than in CC3M, which may be caused by inadequate caption quality in CC3M.
|
| 201 |
+
|
| 202 |
+

|
| 203 |
+
(a)
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
Figure 5: (a) Histograms of the similarities calculated using Sentence Transformer on ImageNet and CC3M. While for ImageNet the average similarity is around 0.35, it is much lower on CC3M, signifying that the graph contains less information for CC3M. (b) Effect of the temperature and batch size on the weight assigned to the true positvie.
|
| 207 |
+
|
| 208 |
+

|
| 209 |
+
(b)
|
| 210 |
+
|
| 211 |
+
Table 6: The effect of label quality matters for fine-grained attribute disambiguation. We evaluate X-CLR trained on high-quality ImageNet labels and on noisier CC labels, and find that smaller but better quality ImageNet dataset yields better representations for attribute disambiguation on MIT states. For more details on the benchmark, see A.8.3.
|
| 212 |
+
|
| 213 |
+
<table><tr><td>Pretraining</td><td>Data Size</td><td>Quality</td><td>MIT States Attributes</td><td>MIT States Objects</td></tr><tr><td>CLIP CC3M</td><td>3M</td><td>Noisy</td><td>27.0</td><td>40.1</td></tr><tr><td>CLIP CC12M</td><td>12M</td><td>Noisy</td><td>23.3</td><td>36.9</td></tr><tr><td>X-CLR CC3M</td><td>3M</td><td>Noisy</td><td>29.5</td><td>40.7</td></tr><tr><td>X-CLR CC12M</td><td>12M</td><td>Noisy</td><td>30.1</td><td>42.1</td></tr><tr><td>X-CLR ImageNet</td><td>1M</td><td>High</td><td>30.9</td><td>45.8</td></tr></table>
|
| 214 |
+
|
| 215 |
+
The impact of label quality for fine-grained attribute disambiguation We show in table 6 how label quality can impact downstream performance on finer-grained attribute disambiguation. We find labels from noisy captions degrade performance for fine-grained object attributes in MIT States (Isola et al., 2015) for both $\mathbb{X}$ -CLR and CLIP. We find $\mathbb{X}$ -CLR with high quality labels from ImageNet, can outperform models trained on much larger noisier data. Compared to CLIP trained on $12 \times$ more data, $\mathbb{X}$ -CLR achieves $30.9\%$ vs. $23.3\%$ for CLIP on attribute classification and $45.8\%$ vs. $36.9\%$ for CLIP on object classification on MIT States benchmark (see appendix A.7 for more details).
|
| 216 |
+
|
| 217 |
+
# 6 DISCUSSION
|
| 218 |
+
|
| 219 |
+
We revisited the graph view on the commonly used contrastive learning methods and developed a better learning objective, $\mathbb{X}$ -CLR, by integrating a soft similarity graph. The adjacency matrix of the proposed graph contains not just 0 and 1, but also any values between, capturing the degree of similarity across samples. We experiment with different ways of constructing the graph, and find that indeed we can build a soft graph that improves over the existing binary graph of contrastive methods. However, we believe that there are better ways of constructing the graph than what we found, particularly for the conceptual captions dataset where the captions are quite noisy. A better graph can possibly be built using other metadata, such as location or time. We also believe that ideas from $\mathbb{X}$ -CLR can be used with other modalities where extra metadata is available, e.g. biological data with the associated taxonomy. The soft graph can also be used to enhance non-contrastive objectives such as BYOL (Grill et al., 2020) or VICReg (Bardes et al., 2021).
|
| 220 |
+
|
| 221 |
+
Limitations. The main limitation of the present work is that constructing the cross-sample similarity graph requires extra data, as well as some extra memory to store it. When the extra data is not available, the only options remaining are to build the graph using the augmentations, self-distillation, or other pre-trained models. The resulting method is also highly dependent on the quality of the graph, as we have seen with conceptual captions datasets.
|
| 222 |
+
|
| 223 |
+
# 7 ACKNOWLEDGMENTS
|
| 224 |
+
|
| 225 |
+
This material is based upon work supported by the National Science Foundation under NSF Award 1922658.
|
| 226 |
+
|
| 227 |
+
# REFERENCES
|
| 228 |
+
|
| 229 |
+
Alex Andonian, Shixing Chen, and Raffay Hamid. Robust cross-modal representation learning with progressive self-distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16430-16441, 2022.
|
| 230 |
+
Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. Objectnet: A large-scale bias-controlled dataset for pushing the limits of object recognition models. Advances in neural information processing systems, 32, 2019.
|
| 231 |
+
Adrien Bardes, Jean Ponce, and Yann LeCun. Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906, 2021.
|
| 232 |
+
Lucas Beyer, Olivier J Hénaff, Alexander Kolesnikov, Xiaohua Zhai, and Aïron van den Oord. Are we done with imagenet? arXiv preprint arXiv:2006.07159, 2020.
|
| 233 |
+
Steven Bird, Edward Loper, and Ewan Klein. Natural Language Processing with Python. O'Reilly Media Inc., 2009.
|
| 234 |
+
Vivien Cabannes, Leon Bottou, Yann Lecun, and Randall Balestriero. Active self-supervised learning: A few low-cost relationships are all you need. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16274-16283, 2023.
|
| 235 |
+
Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. Advances in neural information processing systems, 33:9912-9924, 2020.
|
| 236 |
+
Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR, 2020.
|
| 237 |
+
Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. in 2021 IEEE. In CVF conference on computer vision and pattern recognition (CVPR), pages 15745-15753, 2020.
|
| 238 |
+
S. Chopra, R. Hadsell, and Y. LeCun. Learning a similarity metric discriminatively, with application to face verification. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), volume 1, pages 539-546 vol. 1, 2005. doi: 10.1109/CVPR.2005.202.
|
| 239 |
+
Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasudevan, and Quoc V Le. Autoaugment: Learning augmentation policies from data. arXiv preprint arXiv:1805.09501, 2018.
|
| 240 |
+
Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. doi: 10.1109/CVPR.2009.5206848.
|
| 241 |
+
Julien Denize, Jaonary Rabarisoa, Astrid Orcesi, Romain Hérault, and Stephane Canu. Similarity contrastive estimation for self-supervised soft contrastive learning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2706-2716, 2023.
|
| 242 |
+
Debidatta Dwibedi, Yusuf Aytar, Jonathan Tompson, Pierre Sermanet, and Andrew Zisserman. With a little help from my friends: Nearest-neighbor contrastive learning of visual representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9588-9597, 2021.
|
| 243 |
+
Logan Engstrom, Andrew Ilyas, Hadi Salman, Shibani Santurkar, and Dimitris Tsipras. Robustness (python library), 2019. URL https://github.com/MadryLab/robustness.
|
| 244 |
+
Christiane Fellbaum. WordNet: An Electronic Lexical Database. Bradford Books, 1998. URL: https://mitpress.mit.edu/9780262561167/.
|
| 245 |
+
Chen Feng and Ioannis Patras. Maskcon: Masked contrastive learning for coarse-labelled dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19913-19922, 2023.
|
| 246 |
+
|
| 247 |
+
Enrico Fini, Pietro Astolfi, Karteek Alahari, Xavier Alameda-Pineda, Julien Mairal, Moin Nabi, and Elisa Ricci. Semi-supervised learning made simple with self-supervised clustering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3187-3197, 2023a.
|
| 248 |
+
Enrico Fini, Pietro Astolfi, Adriana Romero-Soriano, Jakob Verbeek, and Michal Drozdzal. Improved baselines for vision-language pre-training. Transactions on Machine Learning Research, 2023b. ISSN 2835-8856. URL https://openreview.net/forum?id=a7nvXxNmdV. Featured Certification.
|
| 249 |
+
Yuting Gao, Jinfeng Liu, Zihan Xu, Tong Wu, Enwei Zhang, Ke Li, Jie Yang, Wei Liu, and Xing Sun. Softclip: Softer cross-modal alignment makes clip stronger. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 1860-1868, 2024.
|
| 250 |
+
Jean-Bastien Grill, Florian Strub, Florent Altché, Coretin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 33:21271-21284, 2020.
|
| 251 |
+
Jeff Z HaoChen, Colin Wei, Adrien Gaidon, and Tengyu Ma. Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34:5000-5011, 2021.
|
| 252 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. corr abs/1512.03385 (2015), 2015.
|
| 253 |
+
Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020.
|
| 254 |
+
Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2015.
|
| 255 |
+
David T Hoffmann, Nadine Behrmann, Juergen Gall, Thomas Brox, and Mehdi Noroozi. Ranking info noise contrastive estimation: Boosting contrastive learning via ranked positives. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 897-905, 2022.
|
| 256 |
+
Hailang Huang, Zhijie Nie, Ziqiao Wang, and Ziyu Shang. Cross-modal and uni-modal soft-label alignment for image-text retrieval. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 18298-18306, 2024.
|
| 257 |
+
Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, July 2021. URL https://doi.org/10.5281/ zenodo.5143773. If you use this software, please cite it as below.
|
| 258 |
+
Phillip Isola, Joseph J Lim, and Edward H Adelson. Discovering states and transformations in image collections. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1383-1391, 2015.
|
| 259 |
+
Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron Sarna, Yonglong Tian, Phillip Isola, Aaron Maschinot, Ce Liu, and Dilip Krishnan. Supervised contrastive learning. Advances in neural information processing systems, 33:18661-18673, 2020.
|
| 260 |
+
Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018.
|
| 261 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021.
|
| 262 |
+
Nils Reimers and Iryna Gurevych. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084, 2019.
|
| 263 |
+
|
| 264 |
+
Chaitanya K Ryali, David J Schwab, and Ari S Morcos. Characterizing and improving the robustness of self-supervised learning through background augmentations. arXiv preprint arXiv:2103.12719, 2021.
|
| 265 |
+
Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 815-823, 2015.
|
| 266 |
+
Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2556-2565, 2018.
|
| 267 |
+
Chengchao Shen, Dawei Liu, Hao Tang, Zhe Qu, and Jianxin Wang. Inter-instance similarity modeling for contrastive learning. arXiv preprint arXiv:2306.12243, 2023.
|
| 268 |
+
Yonglong Tian, Lijie Fan, Phillip Isola, Huiwen Chang, and Dilip Krishnan. Stablerep: Synthetic images from text-to-image models make strong visual representation learners. Advances in Neural Information Processing Systems, 36, 2024.
|
| 269 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023.
|
| 270 |
+
Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008.
|
| 271 |
+
Yifei Wang, Qi Zhang, Yisen Wang, Jiansheng Yang, and Zhouchen Lin. Chaos is a ladder: A new theoretical understanding of contrastive learning via augmentation overlap. arXiv preprint arXiv:2203.13457, 2022.
|
| 272 |
+
Yifei Wang, Qi Zhang, Tianqi Du, Jiansheng Yang, Zhouchen Lin, and Yisen Wang. A message passing perspective on learning dynamics of contrastive learning. arXiv preprint arXiv:2303.04435, 2023.
|
| 273 |
+
Kan Wu, Houwen Peng, Zhenghong Zhou, Bin Xiao, Mengchen Liu, Lu Yuan, Hong Xuan, Michael Valenzuela, Xi Stephen Chen, Xinggang Wang, et al. Tinyclip: Clip distillation via affinity mimicking and weight inheritance. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 21970-21980, 2023.
|
| 274 |
+
Zhibiao Wu and Martha Palmer. Verb semantics and lexical selection. In Proceedings of the 32nd annual meeting of the Associations for Computational Linguistics, pages 133-138, 1994.
|
| 275 |
+
Kai Xiao, Logan Engstrom, Andrew Ilyas, and Aleksander Madry. Noise or signal: The role of image backgrounds in object recognition. arXiv preprint arXiv:2006.09994, 2020.
|
| 276 |
+
Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017.
|
| 277 |
+
Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stéphane Deny. Barlow twins: Self-supervised learning via redundancy reduction. International Conference on Machine Learning, 2021.
|
| 278 |
+
Qi Zhang, Yifei Wang, and Yisen Wang. On the generalization of multi-modal contrastive learning. In International Conference on Machine Learning, pages 41677-41693. PMLR, 2023.
|
| 279 |
+
Mingkai Zheng, Shan You, Fei Wang, Chen Qian, Changshui Zhang, Xiaogang Wang, and Chang Xu. Ressl: Relational self-supervised learning with weak augmentation. Advances in Neural Information Processing Systems, 34:2543-2555, 2021.
|
| 280 |
+
|
| 281 |
+
Table 7: Analyzing statistical significance of ImageNet results. Each experiment is ran with 5 seeds, we report the mean and standard deviation.
|
| 282 |
+
|
| 283 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">ImageNet</td><td rowspan="2">ImageNet Real</td><td colspan="2">Background Decomposition</td><td rowspan="2">ObjectNet</td><td colspan="2">MIT States</td></tr><tr><td>Same Class</td><td>Mixed</td><td>Objects</td><td>Attributes</td></tr><tr><td>SimCLR</td><td>63.43 ± 0.12</td><td>67.75 ± 0.27</td><td>38.88 ± 0.43</td><td>44.67 ± 0.60</td><td>12.07 ± 0.33</td><td>40.92 ± 0.26</td><td>29.08 ± 0.17</td></tr><tr><td>SupCon</td><td>74.30 ± 0.16</td><td>79.66 ± 0.12</td><td>59.08 ± 0.44</td><td>64.00 ± 0.62</td><td>24.42 ± 0.25</td><td>45.56 ± 0.16</td><td>30.83 ± 0.20</td></tr><tr><td>X-CLR</td><td>75.56 ± 0.09</td><td>81.54 ± 0.13</td><td>62.74 ± 0.27</td><td>66.59 ± 0.25</td><td>27.53 ± 0.13</td><td>45.86 ± 0.15</td><td>31.10 ± 0.18</td></tr></table>
|
| 284 |
+
|
| 285 |
+
# A APPENDIX / SUPPLEMENTAL MATERIAL
|
| 286 |
+
|
| 287 |
+
# A.1 MORE LEARNED SIMILARITIES COMPARISONS
|
| 288 |
+
|
| 289 |
+
We experiment with building the graph in the following ways:
|
| 290 |
+
|
| 291 |
+
- Graph with connections only between samples of the same class (SupCon);
|
| 292 |
+
- Graph with connections only between augmentations of the same image (SimCLR);
|
| 293 |
+
- Graph where soft similarity is inferred by comparing representations of the sample captions. The representations are computed using the sentence transformer (Reimers and Gurevych, 2019), CLIP text encoder (Radford et al., 2021), LLama2 encoder (Touvron et al., 2023). For LLama2, we averaged the output tokens;
|
| 294 |
+
- Graph where the connection strength is defined by the distance in WordNet (Fellbaum, 1998) hierarchy. We used the NTLK library Bird et al. (2009), and used the Wu-Palmer similarity (Wu and Palmer, 1994) between the class synsets;
|
| 295 |
+
- We also experiment with random graphs where the cross-sample connections' strengths are fully random. The connections are either random per sample pair, or random per class pair (akin to using a random text encoder for captions).
|
| 296 |
+
|
| 297 |
+
The results are shown in table 8. We find that overall, the Sentence Transformer graph performs the best, although the CLIP text encoder achieves good performance as well. Interestingly, we find that using WordNet hierarchy distance did not work well. Additionally, random per class pair performs quite well, particularly on background decomposition. We hypothesize that it's due to a regularizing effect of the random similarities. Nevertheless, the performance of ImageNet, ImageNet Real and Object Net is lower than when using Sentence Transformer. We visualize learned and target similarities for SupCon graph and for the graph built using CLIP text encoder in fig. 6.
|
| 298 |
+
|
| 299 |
+
Visualising similarities In fig. 4, to visualize learned similarities, for each class we pick 100 examples from the dataset and encode them. Then, to calculate the average learned similarity between two classes, we take the 100 examples for each of the two classes, and calculate the Cartesian product, yielding 10,000 similarities. We take the mean over those 10,000 similarities to represent the average learned similarity for a class pair.
|
| 300 |
+
|
| 301 |
+
Similarities when training on CC datasets In appendix A.2, we show the similarities learned by X-CLR on CC3M and CC12M datasets.
|
| 302 |
+
|
| 303 |
+
# A.2 ANALYZING STATISTICAL SIGNIFICANCE OF THE RESULTS
|
| 304 |
+
|
| 305 |
+
To make sure the difference in performance we observe is statistically significant, we run X-CLR, SimCLR, and SupCon pretraining with 5 different seeds. We report the results of the evaluations in table 7.
|
| 306 |
+
|
| 307 |
+
# A.3 ANALYZING THE LEARNED GRAPH
|
| 308 |
+
|
| 309 |
+
We follow the analysis of Zhang et al. (2023) and show results in table 9. The analysis studies two values: label error which measures how similar samples of different classes are on average, and intraclass connectivity, which measures the similarity of the samples within the class relative to those
|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
(a) SupCon target and learned similarities
|
| 315 |
+
(b) CLIP target and learned similarities
|
| 316 |
+
|
| 317 |
+

|
| 318 |
+
Figure 6: Target and learned similarities for different graphs.
|
| 319 |
+
(a) CC3M similarities
|
| 320 |
+
Figure 7: X-CLR Learned similarities when trained on a) CC3M and b) CC12M.
|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
(b) CC12M similarities
|
| 324 |
+
|
| 325 |
+
Table 8: The effect of the similarity source on the model performance.
|
| 326 |
+
|
| 327 |
+
<table><tr><td rowspan="2">Similarity source</td><td rowspan="2">ImageNet</td><td rowspan="2">ImageNet Real</td><td colspan="3">Background Decomposition</td></tr><tr><td>Same Class</td><td>Mixed</td><td>ObjectNet</td></tr><tr><td>Augmentation graph (SimCLR)</td><td>63.4</td><td>67.8</td><td>44.7</td><td>38.9</td><td>12.1</td></tr><tr><td>True class graph (SupCon)</td><td>74.3</td><td>79.7</td><td>64.0</td><td>59.1</td><td>24.4</td></tr><tr><td>Sentence Transformer (X-CLR)</td><td>75.6</td><td>81.5</td><td>66.6</td><td>62.7</td><td>27.5</td></tr><tr><td>CLIP text encoder</td><td>74.4</td><td>80.6</td><td>67.5</td><td>64.2</td><td>24.5</td></tr><tr><td>LLama2 text encoder</td><td>40.9</td><td>45.8</td><td>38.3</td><td>36.0</td><td>4.3</td></tr><tr><td>Random per class pair</td><td>74.5</td><td>80.8</td><td>71.0</td><td>68.0</td><td>26.6</td></tr><tr><td>Random per sample pair</td><td>0.1</td><td>0.1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>Distance in WordNet hierarchy</td><td>68.3</td><td>74.9</td><td>55.7</td><td>52.1</td><td>21.2</td></tr></table>
|
| 328 |
+
|
| 329 |
+
Table 9: Analyzing the learned representations' connectivity
|
| 330 |
+
|
| 331 |
+
<table><tr><td>Metric</td><td>CLIP</td><td>SimCLR</td><td>SupCon</td><td>X-CLR</td></tr><tr><td>Label error (↓)</td><td>0.550</td><td>0.250</td><td>0.250</td><td>0.223</td></tr><tr><td>Intra-class connectivity (↑)</td><td>1.233</td><td>1.700</td><td>2.005</td><td>2.193</td></tr></table>
|
| 332 |
+
|
| 333 |
+
from different classes. This allows us to determine how well the learned graph captures the class relationships in the data. Since the open-source repository of that paper did not contain the code for analysis, we re-implemented it to the best of our ability.
|
| 334 |
+
|
| 335 |
+
Here, $\mathbb{X}$ -CLR, SimCLR, and SupCon are trained on ImageNet, while CLIP is trained on CC12M. According to these metrics, $\mathbb{X}$ -CLR representation is the best among the baselines, meaning that we learn a better structured graph. We note that our SimCLR numbers are much better than in the original paper. We suspect that it's due to the fact that the authors train SimCLR with the batch size of 512, while we use 2048. ImageNet classification performance of our SimCLR model is also higher, at 63.4, compared to 61.2.
|
| 336 |
+
|
| 337 |
+
We also note that label error, which is the measure of average similarity between instances of different classes, is lower for our method, although the loss itself encourages it to be higher for related samples. This is due to the fact that in this analysis, we use the first 10 classes from ImageNet (replicating the original procedure), and those classes are not related to each other.
|
| 338 |
+
|
| 339 |
+
# A.4 KNN EVALUATION
|
| 340 |
+
|
| 341 |
+
Apart from testing the models trained on ImageNet using KNN, we also evaluate the models trained on CC3M and CC12M. The results are shown in fig. 8. We see that $\mathbb{X}$ -CLR performs better on CC3M, and comparatively with SimCLR when trained on CC12M.
|
| 342 |
+
|
| 343 |
+
# A.5 IMAGENET-9 DETAILS
|
| 344 |
+
|
| 345 |
+
# A.6 CLIP DETAILS
|
| 346 |
+
|
| 347 |
+
In CC3M experiments, we train the model from scratch, as OpenCLIP didn't have a checkpoint trained on that dataset. We trained both for 32 and 100 epochs, and found that the model trained for 100 epochs performs better. Since 32 epochs is the default CLIP number of epochs, we also report results for 32 epochs. The results are shown in table 10.
|
| 348 |
+
|
| 349 |
+
# A.7 MORE TRAINING DETAILS
|
| 350 |
+
|
| 351 |
+
We train SimCLR, SupCon and $\mathbb{X}$ -CLR using the LARS optimizer (You et al., 2017). In all cases, we use the same ResNet-50, with a two layer projector on top. The output dimension of the projector is 128. We note that SupCon benefits from bigger batch sizes and longer training as was shown in (Khosla et al., 2020), but in our experiments we did not explore training for more than 100 epochs or
|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
Figure 8: Results of models trained on ImageNet, CC3M, CC12M on ImageNet validation when using KNN classifier.
|
| 355 |
+
|
| 356 |
+

|
| 357 |
+
|
| 358 |
+

|
| 359 |
+
|
| 360 |
+
Table 10: CLIP on CC3M We train our own models on CC3M and find that training longer improves the performance. Nevertheless, CLIP struggles with small datasets.
|
| 361 |
+
|
| 362 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">ImageNet</td><td rowspan="2">ImageNet Real</td><td colspan="2">Background Decomposition</td><td rowspan="2">ObjectNet</td></tr><tr><td>Same Class</td><td>Mixed</td></tr><tr><td>CLIP 100 epochs</td><td>41.0</td><td>47.6</td><td>12.5</td><td>10.6</td><td>7.8</td></tr><tr><td>CLIP 32 epochs</td><td>36.8</td><td>42.0</td><td>11.5</td><td>9.8</td><td>6.0</td></tr></table>
|
| 363 |
+
|
| 364 |
+
with bigger batch sizes. For fair comparison, we keep the number of epochs to 100 and batch size to 1024 across all methods we train on ImageNet.
|
| 365 |
+
|
| 366 |
+
**Fetching similarities** For ImageNet, since the number of classes is known, we pre-compute the similarity matrix of dimension $1000 \times 1000$ , and retrieve elements from it depending on the associated class labels for a given sample pair to obtain the similarity value. For conceptual captions, we run the text encoder on the full dataset and save the encodings to disk. Then, when loading an image from disk, we also load the associated encoding of the corresponding caption. The similarity matrix for a given batch is then obtained by calculating the Cartesian product of those encodings.
|
| 367 |
+
|
| 368 |
+
MIT States In order to evaluate on this dataset using linear probing, we split the dataset randomly into two even parts, one used for training the linear layer, the other for evaluation. We train separately to classify objects and attributes.
|
| 369 |
+
|
| 370 |
+
# A.8 EVALUATION DETAILS
|
| 371 |
+
|
| 372 |
+
All evaluations are done with linear probing. We use the learning rate of 1 and plain SGD optimizer for all evaluations. We do not use zero-shot classification for CLIP, or any other models. Since $\mathbb{X}$ -CLR doesn't align the representations of text and images, we cannot utilize zero-shot classification to evaluate. For ImageNet models we train ourselves, we do not train the linear prober separately, we train it along with the rest of the model with detached gradients.
|
| 373 |
+
|
| 374 |
+
# A.8.1 IMAGENET REAL
|
| 375 |
+
|
| 376 |
+
ImageNet Real evaluation uses improved ImageNet labels from (Beyer et al., 2020). The improved evaluation allows for multiple labels for scenes where labels are ambiguous, and corrects some mistakes in annotations.
|
| 377 |
+
|
| 378 |
+
# A.8.2 BACKGROUND DECOMPOSITION WITH IMAGENET-9
|
| 379 |
+
|
| 380 |
+
ImageNet-9 (Xiao et al., 2020) proposes multiple benchmarks to test model robustness to the background perturbation. The benchmark is created by taking samples from ImageNet, segmenting
|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
(a) Mixed
|
| 384 |
+
Figure 9: The two settings we use for ImageNet-9 (Xiao et al., 2020) benchmark. In mixed setting in a), the background is swapped with that of a sample belonging to another class. In a), this results in an image of a butterfly on underwater background. Performing well in this setting is hard if the model learns spurious correlations with the background. In same class setting in b), the background is swapped with that of a sample belonging to the same class. In b), we see a butterfly pasted onto the background of another butterfly sample, in this case a tree branch on green background. This lets us see how much swapping the background affects the performance and serves as a baseline for 'mixed'.
|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
(b) Same class
|
| 388 |
+
|
| 389 |
+

|
| 390 |
+
pureed tomato
|
| 391 |
+
|
| 392 |
+

|
| 393 |
+
fresh tomato
|
| 394 |
+
|
| 395 |
+

|
| 396 |
+
peeled tomato
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
diced tomato
|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
pureed tomato
|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
typical flower
|
| 406 |
+
Figure 10: MIT States (Isola et al., 2015) samples from two classes: tomato and flower. Each sample has an associated class name and attribute. We show tomatoes and flowers classes with different attributes. For example, the attributes we show for tomato are "pureed", "fresh", "peeled", and "diced". We train our models to classify class names and attributes. When predicting attributes, we do not condition on class names and vice versa.
|
| 407 |
+
|
| 408 |
+

|
| 409 |
+
fresh flower
|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
fresh flower
|
| 413 |
+
|
| 414 |
+

|
| 415 |
+
crumpled flower
|
| 416 |
+
|
| 417 |
+

|
| 418 |
+
crinkled flower
|
| 419 |
+
|
| 420 |
+
the object in the scene, and swapping out the background. Since the benchmark uses the same classes as ImageNet, we do not retrain the ImageNet classifier. We consider two setups for evaluation:
|
| 421 |
+
|
| 422 |
+
- where the object is seen with backgrounds from the same class, which we label "same class";
|
| 423 |
+
- where the objects are seen with backgrounds from other classes, which we label "mixed".
|
| 424 |
+
|
| 425 |
+
These evaluations isolate how well the model can classify an object without relying on spurious associations from the background. We show an example in fig. 9.
|
| 426 |
+
|
| 427 |
+
# A.8.3 MIT STATES
|
| 428 |
+
|
| 429 |
+
Here we measure object classification accuracy as the object attributes vary as well as the models' ability to identify the object attributes such as color, texture, shape. We show a few examples from the dataset in fig. 10. Because this benchmark uses a different set of classes from ImageNet, we need to train a classifier separately.
|
| 430 |
+
|
| 431 |
+

|
| 432 |
+
Figure 11: Examples from ObjectNet dataset. We take the figure from Barbu et al. (2019). As opposed to ImageNet samples, ObjectNet shows objects in an unexpected contexts and positions. For example, a chair lying on the bathroom floor or a teapot on its side near a bathroom sink. This benchmark tests robustness of the models to these unexpected context changes.
|
| 433 |
+
|
| 434 |
+
# A.8.4 OBJECTNET
|
| 435 |
+
|
| 436 |
+
ObjectNet contains real-world objects with varying poses, backgrounds, and viewpoints. The benchmark uses the same classes as ImageNet, and measures how robust the model is to seeing the objects in unexpected contexts. Like for ImageNet-9, since the benchmark uses the same classes as ImageNet, we do not retrain the ImageNet classifier. We show examples in fig. 11.
|
| 437 |
+
|
| 438 |
+
# A.9 CONNECTION BETWEEN SUPERVISED CONTRASTIVE LEARNING AND X-CLR
|
| 439 |
+
|
| 440 |
+
Here, we will outline how as the temperature $\tau_{s}$ approaches 0, $\mathbb{X}$ -CLR becomes SupCon. Supervised Contrastive Learning (Khosla et al., 2020) also uses image augmentations, and augments each image twice, to obtain what they call "a multiviewed batch". Then, in equation 2, they propose the loss:
|
| 441 |
+
|
| 442 |
+
$$
|
| 443 |
+
\mathcal {L} _ {\text {o u t}} ^ {\sup } = \sum_ {i \in I} \mathcal {L} _ {\text {o u t}, i} ^ {\sup } = \sum_ {i \in I} \frac {- 1}{| P (i) |} \sum_ {p \in P (i)} \log p _ {i, p}
|
| 444 |
+
$$
|
| 445 |
+
|
| 446 |
+
where $p_{i,j}$ is defined as follows:
|
| 447 |
+
|
| 448 |
+
$$
|
| 449 |
+
p _ {i, j} = \frac {\exp (\sin (z _ {i} , z _ {j}) / \tau)}{\sum_ {k = 1} ^ {2 N _ {b}} \mathbf {1} _ {[ k \neq i ]} \exp (\sin (z _ {i} , z _ {k}) / \tau)}
|
| 450 |
+
$$
|
| 451 |
+
|
| 452 |
+
However, $|P(i)|$ is exactly the number of positive samples, and $p_{i,p}$ is the probability of $i$ and $p$ being a positive pair according to the model. We set $s_i^{\mathrm{supcon}}$ to be a distribution over $2N_{b} - 1$ candidates for positive pairs and define it as follows:
|
| 453 |
+
|
| 454 |
+
$$
|
| 455 |
+
s _ {i, j} ^ {\mathrm {s u p c o n}} = \left\{ \begin{array}{l l} \frac {1}{| P (i) |}, & \text {i f} j \in P (i) \\ 0, & \text {o t h e r w i s e} \end{array} \right.
|
| 456 |
+
$$
|
| 457 |
+
|
| 458 |
+
Then, we can write down the original loss as:
|
| 459 |
+
|
| 460 |
+
$$
|
| 461 |
+
\mathcal {L} _ {\text {o u t}, i} ^ {\sup } = H \left(s _ {i} ^ {\text {s u p c o n}}, p _ {i}\right)
|
| 462 |
+
$$
|
| 463 |
+
|
| 464 |
+
where $\mathrm{H}$ is the cross-entropy. This looks exactly like the $\mathbb{X}$ -CLR objective. We can recover SupCon objective if we increase the temperature $\tau_{s}$ ; the resulting distribution $s_i$ will be equal to $s_i^{\mathrm{supcon}}$ .
|
| 465 |
+
|
| 466 |
+
```python
|
| 467 |
+
def training_step/images, Sims):
|
| 468 |
+
# encode images with the model we are training
|
| 469 |
+
img_emb = img_enc/images) # N × D_img
|
| 470 |
+
# convert targets to distributions
|
| 471 |
+
tgt Sims = softmax(sims / Tao_s, dim=1)
|
| 472 |
+
# calculate similarities
|
| 473 |
+
img Sims = softmax(img_emb @ img_emb.T / Tao, dim=1)
|
| 474 |
+
# the loss is the cross-entropy
|
| 475 |
+
return CE(tgt Sims, img Sims, dim=1).mean()
|
| 476 |
+
```
|
| 477 |
+
|
| 478 |
+
Figure 12: Pseudocode for $\mathbb{X}$ -CLR loss for general similarities, not necessarily coming from a text encoding similarities.
|
| 479 |
+
|
| 480 |
+
# A.10 CONNECTION BETWEEN CLIP AND $\mathbb{X}$ -CLR
|
| 481 |
+
|
| 482 |
+
X-CLR and CLIP are similar because they both use images and text data. That said, X-CLR trains only the image representation, and the text is used only to calculate target similarities. The similarities do not have to come from text, as we showed in table 2. CLIP, on the other hand, trains both the image and text encoders. CLIP loss can be expressed as follows:
|
| 483 |
+
|
| 484 |
+
$$
|
| 485 |
+
\mathcal {L} _ {\mathrm {C L I P}} = \mathcal {L} _ {\mathrm {t e x t}} + \mathcal {L} _ {\mathrm {i m a g e}}
|
| 486 |
+
$$
|
| 487 |
+
|
| 488 |
+
$$
|
| 489 |
+
p _ {i, j} ^ {\mathrm {t e x t - i m a g e}} = \frac {\exp (\sin (z _ {i} ^ {\mathrm {t e x t}} , z _ {j} ^ {\mathrm {i m a g e}}) / \tau)}{\sum_ {k = 1} ^ {N _ {b}} \exp (\sin (z _ {i} ^ {\mathrm {t e x t}}, z _ {k} ^ {\mathrm {i m a g e}}) / \tau)}
|
| 490 |
+
$$
|
| 491 |
+
|
| 492 |
+
$$
|
| 493 |
+
\mathcal {L} _ {\text {t e x t}} = \frac {1}{N _ {b}} \sum_ {i = 1} ^ {2 N _ {b}} H \left(\mathbb {1} _ {i}, p _ {i}\right)
|
| 494 |
+
$$
|
| 495 |
+
|
| 496 |
+
$$
|
| 497 |
+
p _ {i, j} ^ {\mathrm {i m a g e - t e x t}} = \frac {\exp (\sin (z _ {i} ^ {\mathrm {i m a g e}} , z _ {j} ^ {\mathrm {t e x t}}) / \tau)}{\sum_ {k = 1} ^ {N _ {b}} \exp (\sin (z _ {i} ^ {\mathrm {i m a g e}} , z _ {k} ^ {\mathrm {t e x t}}) / \tau)}
|
| 498 |
+
$$
|
| 499 |
+
|
| 500 |
+
$$
|
| 501 |
+
\mathcal {L} _ {\mathrm {i m a g e}} = \frac {1}{N _ {b}} \sum_ {i = 1} ^ {2 N _ {b}} H \left(\mathbb {1} _ {i}, p _ {i} ^ {\mathrm {i m a g e - t e x t}}\right)
|
| 502 |
+
$$
|
| 503 |
+
|
| 504 |
+
Here, $N_{b}$ is the batch size, $D$ is the dimension of the encodings, $\mathbf{Z}^{\mathrm{image}}\in \mathbb{R}^{N_b\times D}$ is the batch of image encodings coming from the visual encoder, $\mathbf{Z}^{\mathrm{text}}\in \mathbb{R}^{N_b\times D}$ is the batch of text encodings coming from the text encoder model. $\mathbb{1}_i$ is a one-hot distribution on $N_{b}$ elements, with all the probability on $i$ -th element.
|
| 505 |
+
|
| 506 |
+
We can see that as opposed to $\mathbb{X}$ -CLR loss, CLIP loss has two major differences:
|
| 507 |
+
|
| 508 |
+
- CLIP loss is pushing similarities of images and text encodings together for positive pairs, while X-CLR loss only pushes the images representations to each other. X-CLR only works with image encodings, and beyond the target similarities, doesn't deal with text encodings at all;
|
| 509 |
+
- $\mathbb{X}$ -CLR loss uses a soft target distribution in cross entropy, while CLIP uses a one-hot distribution as target.
|
| 510 |
+
|
| 511 |
+
Due to these fundamental differences, $\mathbb{X}$ -CLR is different from CLIP even when the target similarities are coming from a frozen pre-trained CLIP text encoder. We hypothesize that CLIP could also benefit from soft target similarities, but leave that extension for future work.
|
| 512 |
+
|
| 513 |
+
# A.11 PSEUDOCODE FOR THE GENERAL SIMILARITIES
|
| 514 |
+
|
| 515 |
+
As we showed in table 2, the similarities do not have to come from a text encoder. In fig. 12, we show pseudocode for how the loss function would look like general target similarirites provided in variable Sims.
|
| 516 |
+
|
| 517 |
+

|
| 518 |
+
(a) $\mathbb{X}$ -CLR
|
| 519 |
+
|
| 520 |
+

|
| 521 |
+
(b) SimCLR
|
| 522 |
+
|
| 523 |
+

|
| 524 |
+
(c) SupCon
|
| 525 |
+
Figure 13: X-CLR and SupCon representations fall into a well-defined clusters, whereas SimCLR representations are less structured.
|
| 526 |
+
|
| 527 |
+
# A.12 T-SNE OF THE LEARNED REPRESENTATIONS
|
| 528 |
+
|
| 529 |
+
In fig. 13, we show T-SNE plots of representations of a few superclasses from ImageNet. We used the 'living 9' set of classes from (Engstrom et al., 2019).
|
2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eab8e7c912f194776dec126e54846b49070c3fca8168409e9f5ac91547046cc9
|
| 3 |
+
size 1229227
|
2025/$_mathbb{X}$-Sample Contrastive Loss_ Improving Contrastive Learning with Sample Similarity Graphs/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/c98e433f-25fa-4049-854e-e3fdde6ceb89_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/c98e433f-25fa-4049-854e-e3fdde6ceb89_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/c98e433f-25fa-4049-854e-e3fdde6ceb89_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:78fa133085a309a10fb2429274cd384cf8b79f0772d3c002856e1c513e14d1a5
|
| 3 |
+
size 813897
|
2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0418835849bb80f93a91375935754ad712dcfc1254b5d9228bbf3e6ab201af07
|
| 3 |
+
size 1940259
|
2025/$_phi$-Update_ A Class of Policy Update Methods with Policy Convergence Guarantee/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/3820601e-9eb2-4b22-a325-26893c36ef95_content_list.json
ADDED
|
@@ -0,0 +1,1459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "$\\sigma$ -ZERO: GRADIENT-BASED OPTIMIZATION OF $\\ell_0$ -NORM ADVERSARIAL EXAMPLES",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
169,
|
| 8 |
+
99,
|
| 9 |
+
733,
|
| 10 |
+
148
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Antonio Emanuele Cina $^{1}$ \nBattista Biggio $^{2}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
181,
|
| 19 |
+
171,
|
| 20 |
+
359,
|
| 21 |
+
200
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Francesco Villani",
|
| 28 |
+
"bbox": [
|
| 29 |
+
401,
|
| 30 |
+
172,
|
| 31 |
+
531,
|
| 32 |
+
186
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Maura Pintor²",
|
| 39 |
+
"bbox": [
|
| 40 |
+
573,
|
| 41 |
+
172,
|
| 42 |
+
679,
|
| 43 |
+
186
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Lea Schonherr<sup>3</sup>",
|
| 50 |
+
"bbox": [
|
| 51 |
+
720,
|
| 52 |
+
172,
|
| 53 |
+
834,
|
| 54 |
+
186
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "list",
|
| 60 |
+
"sub_type": "text",
|
| 61 |
+
"list_items": [
|
| 62 |
+
"$^{1}$ Department of Computer Science, Bioengineering, Robotics and Systems, University of Genoa, Italy",
|
| 63 |
+
"$^{2}$ Department of Electrical and Electronic Engineering, University of Cagliari, Italy",
|
| 64 |
+
"$^{3}$ CISPA Helmholtz Center for Information Security, Germany",
|
| 65 |
+
"<sup>4</sup>Department of Environmental Sciences, Informatics and Statistics, Ca' Foscari University of Venice, Italy \nantonio.cina@unige.it francesco.villani@edu.unige.it maura.pintor@unica.it \nschoenherr@cispa.de battista.biggio@unica.it pelillo@unive.it"
|
| 66 |
+
],
|
| 67 |
+
"bbox": [
|
| 68 |
+
183,
|
| 69 |
+
203,
|
| 70 |
+
880,
|
| 71 |
+
289
|
| 72 |
+
],
|
| 73 |
+
"page_idx": 0
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"type": "text",
|
| 77 |
+
"text": "ABSTRACT",
|
| 78 |
+
"text_level": 1,
|
| 79 |
+
"bbox": [
|
| 80 |
+
450,
|
| 81 |
+
325,
|
| 82 |
+
545,
|
| 83 |
+
339
|
| 84 |
+
],
|
| 85 |
+
"page_idx": 0
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"type": "text",
|
| 89 |
+
"text": "Evaluating the adversarial robustness of deep networks to gradient-based attacks is challenging. While most attacks consider $\\ell_{2}$ - and $\\ell_{\\infty}$ -norm constraints to craft input perturbations, only a few investigate sparse $\\ell_{1}$ - and $\\ell_{0}$ -norm attacks. In particular, $\\ell_{0}$ -norm attacks remain the least studied due to the inherent complexity of optimizing over a non-convex and non-differentiable constraint. However, evaluating adversarial robustness under these attacks could reveal weaknesses otherwise left untested with more conventional $\\ell_{2}$ - and $\\ell_{\\infty}$ -norm attacks. In this work, we propose a novel $\\ell_{0}$ -norm attack, called $\\sigma$ -zero, which leverages a differentiable approximation of the $\\ell_{0}$ norm to facilitate gradient-based optimization, and an adaptive projection operator to dynamically adjust the trade-off between loss minimization and perturbation sparsity. Extensive evaluations using MNIST, CIFAR10, and ImageNet datasets, involving robust and non-robust models, show that $\\sigma$ -zero finds minimum $\\ell_{0}$ -norm adversarial examples without requiring any time-consuming hyperparameter tuning, and that it outperforms all competing sparse attacks in terms of success rate, perturbation size, and efficiency.",
|
| 90 |
+
"bbox": [
|
| 91 |
+
228,
|
| 92 |
+
357,
|
| 93 |
+
769,
|
| 94 |
+
566
|
| 95 |
+
],
|
| 96 |
+
"page_idx": 0
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"type": "text",
|
| 100 |
+
"text": "1 INTRODUCTION",
|
| 101 |
+
"text_level": 1,
|
| 102 |
+
"bbox": [
|
| 103 |
+
173,
|
| 104 |
+
595,
|
| 105 |
+
336,
|
| 106 |
+
609
|
| 107 |
+
],
|
| 108 |
+
"page_idx": 0
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"type": "text",
|
| 112 |
+
"text": "Early research has revealed that machine learning models are fooled by adversarial examples, i.e., slightly-perturbed inputs optimized to cause misclassifications (Biggio et al., 2013; Szegedy et al., 2014). The discovery of this phenomenon has, in turn, demanded a more careful evaluation of the robustness of such models, especially when deployed in security-sensitive and safety-critical applications. Most of the gradient-based attacks proposed to evaluate the adversarial robustness of Deep Neural Networks (DNNs) optimize adversarial examples under different $\\ell_p$ -norm constraints. In particular, while convex $\\ell_1$ , $\\ell_2$ , and $\\ell_{\\infty}$ norms have been widely studied (Chen et al., 2018; Croce & Hein, 2021), only a few $\\ell_0$ -norm attacks have been considered to date. The main reason is that finding minimum $\\ell_0$ -norm solutions is known to be an NP-hard problem (Davis et al., 1997), and thus ad-hoc approximations must be adopted to overcome issues related to the non-convexity and non-differentiability of such (pseudo) norm. Although this is a challenging task, attacks based on the $\\ell_0$ norm have the potential to uncover issues in DNNs that may not be evident when considering other attacks (Carlini & Wagner, 2017b; Croce & Hein, 2021). In particular, $\\ell_0$ -norm attacks, known to perturb a minimal fraction of input values, can be used to determine the most sensitive characteristics that influence the model's decision-making process, offering a different and relevant threat model to benchmark existing defenses and a different understanding of the model's inner workings.",
|
| 113 |
+
"bbox": [
|
| 114 |
+
169,
|
| 115 |
+
627,
|
| 116 |
+
826,
|
| 117 |
+
851
|
| 118 |
+
],
|
| 119 |
+
"page_idx": 0
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"type": "text",
|
| 123 |
+
"text": "Unfortunately, current $\\ell_0$ -norm attacks exhibit a largely suboptimal trade-off between their success rate and efficiency, i.e., they are either accurate but slow or fast but inaccurate. In particular, the accurate ones use complex projections and advanced initialization strategies (e.g., adversarial",
|
| 124 |
+
"bbox": [
|
| 125 |
+
169,
|
| 126 |
+
856,
|
| 127 |
+
826,
|
| 128 |
+
900
|
| 129 |
+
],
|
| 130 |
+
"page_idx": 0
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"type": "header",
|
| 134 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 135 |
+
"bbox": [
|
| 136 |
+
171,
|
| 137 |
+
32,
|
| 138 |
+
478,
|
| 139 |
+
47
|
| 140 |
+
],
|
| 141 |
+
"page_idx": 0
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"type": "page_footnote",
|
| 145 |
+
"text": "Code is available at https://github.com/sigma0-advx/sigma-zero.",
|
| 146 |
+
"bbox": [
|
| 147 |
+
196,
|
| 148 |
+
910,
|
| 149 |
+
687,
|
| 150 |
+
924
|
| 151 |
+
],
|
| 152 |
+
"page_idx": 0
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"type": "page_number",
|
| 156 |
+
"text": "1",
|
| 157 |
+
"bbox": [
|
| 158 |
+
493,
|
| 159 |
+
948,
|
| 160 |
+
503,
|
| 161 |
+
959
|
| 162 |
+
],
|
| 163 |
+
"page_idx": 0
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"type": "image",
|
| 167 |
+
"img_path": "images/4581badad52aca59f90c0029fd2cdb99695772e8a08490b6ef747713affd64b6.jpg",
|
| 168 |
+
"image_caption": [
|
| 169 |
+
"Figure 1: The leftmost plot shows the execution of $\\sigma$ -zero on a two-dimensional problem. The initial point $\\mathbf{x}$ (red dot) is updated via gradient descent to find the adversarial example $\\mathbf{x}^{\\star}$ (green star) while minimizing the number of perturbed features (i.e., the $\\ell_0$ norm of the perturbation). The gray lines surrounding $\\mathbf{x}$ demarcate regions where the $\\ell_0$ norm is minimized. The rightmost plot shows the adversarial images (top row) and the corresponding perturbations (bottom row) found by $\\sigma$ -zero during the three steps highlighted in the leftmost plot, along with their prediction and $\\ell_0$ norm."
|
| 170 |
+
],
|
| 171 |
+
"image_footnote": [],
|
| 172 |
+
"bbox": [
|
| 173 |
+
181,
|
| 174 |
+
108,
|
| 175 |
+
424,
|
| 176 |
+
250
|
| 177 |
+
],
|
| 178 |
+
"page_idx": 1
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"type": "image",
|
| 182 |
+
"img_path": "images/2b5543101447f943dfb452a23206bc82da673c1a6500bcf707bc97378d440920.jpg",
|
| 183 |
+
"image_caption": [
|
| 184 |
+
"$\\mathrm{Frog}\\rightarrow \\mathrm{Frog}\\ell_{0:2813}$"
|
| 185 |
+
],
|
| 186 |
+
"image_footnote": [],
|
| 187 |
+
"bbox": [
|
| 188 |
+
447,
|
| 189 |
+
109,
|
| 190 |
+
557,
|
| 191 |
+
178
|
| 192 |
+
],
|
| 193 |
+
"page_idx": 1
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"type": "image",
|
| 197 |
+
"img_path": "images/6d12959fcefbd23801381b3132e4288096271595dc2bc619e48cfc3c8ca14154.jpg",
|
| 198 |
+
"image_caption": [],
|
| 199 |
+
"image_footnote": [],
|
| 200 |
+
"bbox": [
|
| 201 |
+
449,
|
| 202 |
+
180,
|
| 203 |
+
557,
|
| 204 |
+
250
|
| 205 |
+
],
|
| 206 |
+
"page_idx": 1
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"type": "image",
|
| 210 |
+
"img_path": "images/3f5323f241a838d55f46ba86c08026117b6493fab9666721666ff09609937b15.jpg",
|
| 211 |
+
"image_caption": [
|
| 212 |
+
"Frog $\\rightarrow$ Chameleon $\\ell_0:1381$"
|
| 213 |
+
],
|
| 214 |
+
"image_footnote": [],
|
| 215 |
+
"bbox": [
|
| 216 |
+
576,
|
| 217 |
+
111,
|
| 218 |
+
683,
|
| 219 |
+
178
|
| 220 |
+
],
|
| 221 |
+
"page_idx": 1
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "image",
|
| 225 |
+
"img_path": "images/cece0dd336b58e1c0b067e8f2be113defec8e7e4ad150d6e5cffffbe9ff79c4b.jpg",
|
| 226 |
+
"image_caption": [],
|
| 227 |
+
"image_footnote": [],
|
| 228 |
+
"bbox": [
|
| 229 |
+
576,
|
| 230 |
+
180,
|
| 231 |
+
683,
|
| 232 |
+
250
|
| 233 |
+
],
|
| 234 |
+
"page_idx": 1
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "image",
|
| 238 |
+
"img_path": "images/e78dfc977cfa1da6dbfc041ec1b0feb47a2c90e32a6bb42a77a29abc47661c7f.jpg",
|
| 239 |
+
"image_caption": [
|
| 240 |
+
"Frog $\\rightarrow$ Chameleon $\\ell_0:52$"
|
| 241 |
+
],
|
| 242 |
+
"image_footnote": [],
|
| 243 |
+
"bbox": [
|
| 244 |
+
702,
|
| 245 |
+
109,
|
| 246 |
+
808,
|
| 247 |
+
178
|
| 248 |
+
],
|
| 249 |
+
"page_idx": 1
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"type": "image",
|
| 253 |
+
"img_path": "images/f7aeb5c8a8ee5944b22f1d3c7fea8cfe120695406f71ec6c09e5e4bfa7662647.jpg",
|
| 254 |
+
"image_caption": [],
|
| 255 |
+
"image_footnote": [],
|
| 256 |
+
"bbox": [
|
| 257 |
+
702,
|
| 258 |
+
180,
|
| 259 |
+
808,
|
| 260 |
+
250
|
| 261 |
+
],
|
| 262 |
+
"page_idx": 1
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"type": "text",
|
| 266 |
+
"text": "initialization) to find smaller input perturbations but suffer from time or memory limitations, hindering their scalability to larger networks or high-dimensional data (Brendel et al., 2019a; Césaire et al., 2021). Other attacks execute faster, but their returned solution is typically less accurate and largely suboptimal (Matyasko & Chau, 2021; Pintor et al., 2021). This results in overestimating adversarial robustness and, in turn, contributes to spreading a false sense of security, hindering the development of effective defense mechanisms (Carlini et al., 2019; Pintor et al., 2022). Developing a reliable, scalable, and compelling method to assess the robustness of DNN models against sparse perturbations with minimum $\\ell_0$ norm remains thus a relevant and challenging open problem.",
|
| 267 |
+
"bbox": [
|
| 268 |
+
169,
|
| 269 |
+
375,
|
| 270 |
+
826,
|
| 271 |
+
488
|
| 272 |
+
],
|
| 273 |
+
"page_idx": 1
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"type": "text",
|
| 277 |
+
"text": "In this work, we propose a novel $\\ell_0$ -norm attack, named $\\sigma$ -zero, which iteratively promotes the sparsity of the adversarial perturbation by minimizing its $\\ell_0$ norm (see Figure 1 and Sect. 2). To overcome the limitations of previous approaches, our attack leverages two main technical contributions: (i) a smooth, differentiable approximation of the $\\ell_0$ norm to enable the minimization of the attack loss via gradient descent; and (ii) an adaptive projection operator that dynamically increases sparsity to further reduce the perturbation size while keeping the perturbed sample in the adversarial region.",
|
| 278 |
+
"bbox": [
|
| 279 |
+
169,
|
| 280 |
+
493,
|
| 281 |
+
823,
|
| 282 |
+
578
|
| 283 |
+
],
|
| 284 |
+
"page_idx": 1
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"type": "text",
|
| 288 |
+
"text": "Our experiments (Sect. 3) provide compelling evidence of the remarkable performance of $\\sigma$ -zero. We evaluate it on 3 well-known benchmark datasets (i.e., MNIST, CIFAR10, and ImageNet), using 22 different models from Robustbench (Croce et al. 2021) and the corresponding official repositories. We compare the performance of $\\sigma$ -zero against more than 10 competing attacks, totaling almost 450 different comparisons. Our analysis shows that $\\sigma$ -zero outperforms state-of-the-art attacks in terms of both attack success rate and perturbation size (lower $\\ell_0$ norm), while being also significantly faster (i.e., requiring fewer queries and lower runtime). Our attack also provides some additional advantages: (i) it does not require any sophisticated, time-consuming hyperparameter tuning; (ii) it does not require being initialized from an adversarial input; (iii) it is less likely to fail, i.e., it consistently achieves an attack success rate of $100\\%$ for sufficiently-large perturbation budgets, thereby enabling more reliable robustness evaluations (Carlini et al., 2019). We thus believe that $\\sigma$ -zero will foster significant advancements in the development of better robustness evaluation tools and more robust models against sparse attacks. We conclude the paper by discussing related work (Sect. 4), along with the main contributions and future research directions (Sect. 5).",
|
| 289 |
+
"bbox": [
|
| 290 |
+
169,
|
| 291 |
+
583,
|
| 292 |
+
826,
|
| 293 |
+
780
|
| 294 |
+
],
|
| 295 |
+
"page_idx": 1
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"type": "text",
|
| 299 |
+
"text": "2 $\\sigma$ -ZERO: MINIMUM $\\ell_0$ -NORM ATTACKS",
|
| 300 |
+
"text_level": 1,
|
| 301 |
+
"bbox": [
|
| 302 |
+
171,
|
| 303 |
+
800,
|
| 304 |
+
535,
|
| 305 |
+
816
|
| 306 |
+
],
|
| 307 |
+
"page_idx": 1
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"type": "text",
|
| 311 |
+
"text": "We present here $\\sigma$ -zero, a gradient-based attack that finds minimum $\\ell_0$ -norm adversarial examples.",
|
| 312 |
+
"bbox": [
|
| 313 |
+
169,
|
| 314 |
+
832,
|
| 315 |
+
823,
|
| 316 |
+
848
|
| 317 |
+
],
|
| 318 |
+
"page_idx": 1
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"type": "text",
|
| 322 |
+
"text": "Threat Model. We assume that the attacker has complete access to the target model, including its architecture and trained parameters, and exploits its gradient for staging white-box untargeted attacks (Carlini & Wagner, 2017b; Biggio & Roli, 2018). This setting is useful for worst-case evaluation of the adversarial robustness of DNNs, providing an empirical assessment of the performance degradation that may be incurred under attack. Note that this is the standard setting adopted",
|
| 323 |
+
"bbox": [
|
| 324 |
+
169,
|
| 325 |
+
854,
|
| 326 |
+
825,
|
| 327 |
+
925
|
| 328 |
+
],
|
| 329 |
+
"page_idx": 1
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"type": "header",
|
| 333 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 334 |
+
"bbox": [
|
| 335 |
+
171,
|
| 336 |
+
32,
|
| 337 |
+
478,
|
| 338 |
+
47
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 1
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "page_number",
|
| 344 |
+
"text": "2",
|
| 345 |
+
"bbox": [
|
| 346 |
+
493,
|
| 347 |
+
948,
|
| 348 |
+
503,
|
| 349 |
+
959
|
| 350 |
+
],
|
| 351 |
+
"page_idx": 1
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"type": "text",
|
| 355 |
+
"text": "in previous work for gradient-based adversarial robustness evaluations (Carlini & Wagner, 2017b; Brendel et al., 2019b; Croce et al., 2021; Pintor et al., 2021).",
|
| 356 |
+
"bbox": [
|
| 357 |
+
168,
|
| 358 |
+
103,
|
| 359 |
+
823,
|
| 360 |
+
133
|
| 361 |
+
],
|
| 362 |
+
"page_idx": 2
|
| 363 |
+
},
|
| 364 |
+
{
|
| 365 |
+
"type": "text",
|
| 366 |
+
"text": "Problem Formulation. In this work, we seek untargeted minimum $\\ell_0$ -norm adversarial perturbations that steer the model's decision towards misclassification (Carlini & Wagner 2017b). To this end, let $\\mathbf{x} \\in \\mathcal{X} = [0,1]^d$ be a $d$ -dimensional input sample, $y \\in \\mathcal{Y} = \\{1,\\dots,l\\}$ its associated true label, and $f: \\mathcal{X} \\times \\Theta \\mapsto \\mathcal{Y}$ the target model, parameterized by $\\theta \\in \\Theta$ . While $f$ outputs the predicted label, we will also use $f_k$ to denote the continuous-valued output (logit) for class $k \\in \\mathcal{Y}$ . The goal of our attack is to find the minimum $\\ell_0$ -norm adversarial perturbation $\\delta^\\star$ such that the corresponding adversarial example $\\mathbf{x}^\\star = \\mathbf{x} + \\delta^\\star$ is misclassified by $f$ . This can be formalized as:",
|
| 367 |
+
"bbox": [
|
| 368 |
+
169,
|
| 369 |
+
138,
|
| 370 |
+
826,
|
| 371 |
+
237
|
| 372 |
+
],
|
| 373 |
+
"page_idx": 2
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"type": "equation",
|
| 377 |
+
"text": "\n$$\n\\delta^ {\\star} \\in \\arg \\min _ {\\delta} \\| \\delta \\| _ {0}, \\tag {1}\n$$\n",
|
| 378 |
+
"text_format": "latex",
|
| 379 |
+
"bbox": [
|
| 380 |
+
383,
|
| 381 |
+
242,
|
| 382 |
+
825,
|
| 383 |
+
265
|
| 384 |
+
],
|
| 385 |
+
"page_idx": 2
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"type": "equation",
|
| 389 |
+
"text": "\n$$\n\\text {s . t .} \\quad f (\\mathbf {x} + \\boldsymbol {\\delta}, \\boldsymbol {\\theta}) \\neq y, \\tag {2}\n$$\n",
|
| 390 |
+
"text_format": "latex",
|
| 391 |
+
"bbox": [
|
| 392 |
+
455,
|
| 393 |
+
267,
|
| 394 |
+
825,
|
| 395 |
+
284
|
| 396 |
+
],
|
| 397 |
+
"page_idx": 2
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"type": "equation",
|
| 401 |
+
"text": "\n$$\n\\mathbf {x} + \\boldsymbol {\\delta} \\in [ 0, 1 ] ^ {d}, \\tag {3}\n$$\n",
|
| 402 |
+
"text_format": "latex",
|
| 403 |
+
"bbox": [
|
| 404 |
+
491,
|
| 405 |
+
286,
|
| 406 |
+
825,
|
| 407 |
+
305
|
| 408 |
+
],
|
| 409 |
+
"page_idx": 2
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"type": "text",
|
| 413 |
+
"text": "where $\\| \\cdot \\|_0$ denotes the $\\ell_0$ norm, which counts the number of non-zero components. The hard constraint in Eq. (2) ensures that the perturbation $\\delta$ is valid only if the target model $f$ misclassifies the perturbed sample $\\mathbf{x} + \\boldsymbol{\\delta}$ , while the box constraint in Eq. (3) ensures that the perturbed sample lies in $[0,1]^d$ . Since the problem in Eqs. (1)-(3) can not be solved directly, we reformulate it as:",
|
| 414 |
+
"bbox": [
|
| 415 |
+
169,
|
| 416 |
+
310,
|
| 417 |
+
823,
|
| 418 |
+
369
|
| 419 |
+
],
|
| 420 |
+
"page_idx": 2
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"type": "equation",
|
| 424 |
+
"text": "\n$$\n\\boldsymbol {\\delta} ^ {\\star} \\in \\arg \\min _ {\\boldsymbol {\\delta}} \\mathcal {L} (\\mathbf {x} + \\boldsymbol {\\delta}, y, \\boldsymbol {\\theta}) + \\frac {1}{d} \\hat {\\ell} _ {0} (\\boldsymbol {\\delta}) \\tag {4}\n$$\n",
|
| 425 |
+
"text_format": "latex",
|
| 426 |
+
"bbox": [
|
| 427 |
+
359,
|
| 428 |
+
373,
|
| 429 |
+
825,
|
| 430 |
+
402
|
| 431 |
+
],
|
| 432 |
+
"page_idx": 2
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "equation",
|
| 436 |
+
"text": "\n$$\n\\text {s . t .} \\quad \\mathbf {x} + \\boldsymbol {\\delta} \\in [ 0, 1 ] ^ {d}, \\tag {5}\n$$\n",
|
| 437 |
+
"text_format": "latex",
|
| 438 |
+
"bbox": [
|
| 439 |
+
431,
|
| 440 |
+
404,
|
| 441 |
+
825,
|
| 442 |
+
422
|
| 443 |
+
],
|
| 444 |
+
"page_idx": 2
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"type": "text",
|
| 448 |
+
"text": "where we use a differentiable approximation $\\hat{\\ell}_0(\\delta)$ instead of $||\\delta ||_0$ , and normalize it with respect to the number of features $d$ to ensure that its value is within the interval [0, 1]. The loss $\\mathcal{L}$ is defined as:",
|
| 449 |
+
"bbox": [
|
| 450 |
+
169,
|
| 451 |
+
431,
|
| 452 |
+
826,
|
| 453 |
+
460
|
| 454 |
+
],
|
| 455 |
+
"page_idx": 2
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"type": "equation",
|
| 459 |
+
"text": "\n$$\n\\mathcal {L} (\\mathbf {x}, y, \\boldsymbol {\\theta}) = \\max \\left(f _ {y} (\\mathbf {x}, \\boldsymbol {\\theta}) - \\max _ {k \\neq y} f _ {k} (\\mathbf {x}, \\boldsymbol {\\theta}), 0\\right) + \\mathbb {I} (f (\\mathbf {x}, \\boldsymbol {\\theta}) = y). \\tag {6}\n$$\n",
|
| 460 |
+
"text_format": "latex",
|
| 461 |
+
"bbox": [
|
| 462 |
+
269,
|
| 463 |
+
467,
|
| 464 |
+
825,
|
| 465 |
+
501
|
| 466 |
+
],
|
| 467 |
+
"page_idx": 2
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "text",
|
| 471 |
+
"text": "The first term in $\\mathcal{L}$ represents the logit difference, which is positive when the sample is correctly assigned to the true class $y$ , and clipped to zero when it is misclassified (Carlini & Wagner, 2017b). The second term merely adds 1 to the loss if the sample is correctly classified. This ensures that $\\mathcal{L} = 0$ only when an adversarial example is found and $\\mathcal{L} \\geq 1$ otherwise. In practice, when minimizing the objective in Eq. (4), this loss term induces an alternate optimization process between minimizing the loss function itself (to find an adversarial example) and minimizing the $\\ell_0$ -norm of the adversarial perturbation (when an adversarial example is found). It is also worth remarking that, conversely to the objective function proposed by Carlini & Wagner (2017b), our objective does not require tuning any trade-off hyperparameters to balance between minimizing the loss and reducing the perturbation size, thereby avoiding a computationally expensive line search for each input sample.",
|
| 472 |
+
"bbox": [
|
| 473 |
+
169,
|
| 474 |
+
506,
|
| 475 |
+
826,
|
| 476 |
+
647
|
| 477 |
+
],
|
| 478 |
+
"page_idx": 2
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "text",
|
| 482 |
+
"text": "$\\ell_0$ -norm Approximation. Besides the formalization of the attack objective, one of the main technical advantages of $\\sigma$ -zero is the smooth, differentiable approximation of the $\\ell_0$ norm, thereby enabling the use of gradient-based optimization. To this end, we first note that the $\\ell_0$ -norm of a vector can be rewritten as $\\| \\mathbf{x}\\| _0 = \\sum_{i = 1}^d\\mathrm{sign}(x_i)^2$ , and then approximate the sign function as $\\mathrm{sign}(x_i)\\approx x_i / \\sqrt{x_i^2 + \\sigma}$ , where $\\sigma >0$ is a smoothing hyperparameter that makes the approximation sharper as $\\sigma \\rightarrow 0$ . This, in turn, yields the following smooth approximation of the $\\ell_0$ norm:",
|
| 483 |
+
"bbox": [
|
| 484 |
+
169,
|
| 485 |
+
652,
|
| 486 |
+
823,
|
| 487 |
+
743
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 2
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "equation",
|
| 493 |
+
"text": "\n$$\n\\hat {\\ell} _ {0} (\\mathbf {x}, \\sigma) = \\sum_ {i = 1} ^ {d} \\frac {x _ {i} ^ {2}}{x _ {i} ^ {2} + \\sigma}, \\sigma > 0, \\quad \\hat {\\ell} _ {0} (\\mathbf {x}, \\sigma) \\in [ 0, d ]. \\tag {7}\n$$\n",
|
| 494 |
+
"text_format": "latex",
|
| 495 |
+
"bbox": [
|
| 496 |
+
326,
|
| 497 |
+
750,
|
| 498 |
+
825,
|
| 499 |
+
790
|
| 500 |
+
],
|
| 501 |
+
"page_idx": 2
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"type": "text",
|
| 505 |
+
"text": "Adaptive Projection $\\Pi_{\\tau}$ . The considered $\\ell_0$ -norm approximation allows optimizing Eq. (4) via gradient descent. However, using such a smooth approximation tends to promote solutions that are not fully sparse, i.e., with many components that are very close to zero but not exactly equal to zero, thereby yielding inflated $\\ell_0$ -norm values. To overcome this issue, we introduce an adaptive projection operator $\\Pi_{\\tau}$ that sets to zero the components with a perturbation intensity lower than a given sparsity",
|
| 506 |
+
"bbox": [
|
| 507 |
+
169,
|
| 508 |
+
803,
|
| 509 |
+
826,
|
| 510 |
+
876
|
| 511 |
+
],
|
| 512 |
+
"page_idx": 2
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"type": "header",
|
| 516 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 517 |
+
"bbox": [
|
| 518 |
+
171,
|
| 519 |
+
32,
|
| 520 |
+
478,
|
| 521 |
+
47
|
| 522 |
+
],
|
| 523 |
+
"page_idx": 2
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"type": "page_footnote",
|
| 527 |
+
"text": "<sup>1</sup> Note that, when the source point $\\mathbf{x}$ is already misclassified by $f$ , the solution is simply $\\delta^{\\star} = 0$ .",
|
| 528 |
+
"bbox": [
|
| 529 |
+
192,
|
| 530 |
+
883,
|
| 531 |
+
763,
|
| 532 |
+
897
|
| 533 |
+
],
|
| 534 |
+
"page_idx": 2
|
| 535 |
+
},
|
| 536 |
+
{
|
| 537 |
+
"type": "page_footnote",
|
| 538 |
+
"text": "2While a sigmoid approximation may be adopted to overcome the non-differentiability of the $\\mathbb{I}$ term at the decision boundary, we simply set its gradient to zero everywhere, without any impact on the experimental results.",
|
| 539 |
+
"bbox": [
|
| 540 |
+
169,
|
| 541 |
+
896,
|
| 542 |
+
823,
|
| 543 |
+
924
|
| 544 |
+
],
|
| 545 |
+
"page_idx": 2
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"type": "page_number",
|
| 549 |
+
"text": "3",
|
| 550 |
+
"bbox": [
|
| 551 |
+
493,
|
| 552 |
+
948,
|
| 553 |
+
504,
|
| 554 |
+
959
|
| 555 |
+
],
|
| 556 |
+
"page_idx": 2
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"type": "text",
|
| 560 |
+
"text": "Input: $\\mathbf{x} \\in [0,1]^d$ , the input sample; y, the true class label; $\\theta$ , the target model; N, the number of iterations; $\\eta_0 = 1.0$ , the initial step size; $\\sigma = 10^{-3}$ , the $\\ell_0$ -norm smoothing hyperparameter; $\\tau_0 = 0.3$ , the initial sparsity threshold; $t = 0.01$ , the sparsity threshold adjustment factor.",
|
| 561 |
+
"bbox": [
|
| 562 |
+
171,
|
| 563 |
+
121,
|
| 564 |
+
825,
|
| 565 |
+
165
|
| 566 |
+
],
|
| 567 |
+
"page_idx": 3
|
| 568 |
+
},
|
| 569 |
+
{
|
| 570 |
+
"type": "text",
|
| 571 |
+
"text": "Output: $\\mathbf{x}^{\\star}$ , the minimum $\\ell_0$ -norm adversarial example.",
|
| 572 |
+
"bbox": [
|
| 573 |
+
171,
|
| 574 |
+
164,
|
| 575 |
+
545,
|
| 576 |
+
178
|
| 577 |
+
],
|
| 578 |
+
"page_idx": 3
|
| 579 |
+
},
|
| 580 |
+
{
|
| 581 |
+
"type": "equation",
|
| 582 |
+
"text": "\n$$\n\\begin{array}{l} \\begin{array}{c c} \\mathbf {1} \\delta \\leftarrow \\mathbf {0}; & \\delta^ {\\star} \\leftarrow \\infty ; \\quad \\tau \\leftarrow \\tau_ {0}; \\quad \\eta \\leftarrow \\eta_ {0} \\end{array} \\\\ 2 \\text {f o r} i \\text {i n} 1, \\dots , N \\mathbf {d o} \\\\ \\end{array}\n$$\n",
|
| 583 |
+
"text_format": "latex",
|
| 584 |
+
"bbox": [
|
| 585 |
+
158,
|
| 586 |
+
176,
|
| 587 |
+
452,
|
| 588 |
+
205
|
| 589 |
+
],
|
| 590 |
+
"page_idx": 3
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "code",
|
| 594 |
+
"sub_type": "algorithm",
|
| 595 |
+
"code_caption": [
|
| 596 |
+
"Algorithm 1 $\\sigma$ -zero Attack Algorithm."
|
| 597 |
+
],
|
| 598 |
+
"code_body": "$\\nabla \\mathbf{g}\\gets \\nabla_{\\pmb{\\delta}}[\\mathcal{L}(\\mathbf{x} + \\pmb {\\delta},y,\\pmb {\\theta}) + \\frac{1}{d}\\hat{\\ell}_0(\\pmb {\\delta},\\sigma)]$ Gradient Descent for Eq. (4). \n $\\nabla \\mathbf{g}\\gets \\nabla \\mathbf{g} / \\| \\nabla \\mathbf{g}\\|_{\\infty}$ Gradient Normalization. \n $\\delta \\leftarrow \\mathrm{clip}(\\mathbf{x} + [\\pmb {\\delta} - \\eta \\cdot \\nabla \\mathbf{g}]) - \\mathbf{x}$ Box Constraints. \n $\\delta \\leftarrow \\Pi_{\\tau}(\\delta)$ Adaptive Projection Operator. \n $\\eta = \\mathrm{cosine\\_annealing}(\\eta_0,i)$ Learning Rate Decay. \nif $\\mathcal{L}(\\mathbf{x} + \\pmb {\\delta},y,\\pmb {\\theta})\\leq 0$ .. $\\tau + = t\\cdot \\eta$ ,else $\\tau - = t\\cdot \\eta$ Adaptive Adjustment for $\\tau$ \nif $\\mathcal{L}(\\mathbf{x} + \\pmb {\\delta},y,\\pmb {\\theta})\\leq 0\\wedge \\| \\pmb {\\delta}\\| _0 < \\| \\pmb {\\delta}^*\\| _0$ .. $\\pmb{\\delta}^{\\star}\\gets \\pmb{\\delta}$",
|
| 599 |
+
"bbox": [
|
| 600 |
+
158,
|
| 601 |
+
205,
|
| 602 |
+
823,
|
| 603 |
+
308
|
| 604 |
+
],
|
| 605 |
+
"page_idx": 3
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"type": "text",
|
| 609 |
+
"text": "10 end",
|
| 610 |
+
"bbox": [
|
| 611 |
+
155,
|
| 612 |
+
310,
|
| 613 |
+
202,
|
| 614 |
+
321
|
| 615 |
+
],
|
| 616 |
+
"page_idx": 3
|
| 617 |
+
},
|
| 618 |
+
{
|
| 619 |
+
"type": "equation",
|
| 620 |
+
"text": "\n$$\n\\begin{array}{l} 1 1 \\text {i f} \\mathcal {L} (\\mathbf {x} + \\delta^ {\\star}, y, \\theta) > 0: \\delta^ {\\star} \\leftarrow \\infty \\\\ 1 2 \\operatorname {r e t u r n} \\mathrm {x} ^ {\\star} \\leftarrow \\mathrm {x} + \\delta^ {\\star} \\\\ \\end{array}\n$$\n",
|
| 621 |
+
"text_format": "latex",
|
| 622 |
+
"bbox": [
|
| 623 |
+
155,
|
| 624 |
+
323,
|
| 625 |
+
395,
|
| 626 |
+
352
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 3
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "text",
|
| 632 |
+
"text": "threshold $\\tau$ in each iteration. The sparsity threshold $\\tau$ is initialized with a starting value $\\tau_0$ and then dynamically adjusted for each sample during each iteration; in particular, it is increased to find sparser perturbations when the current sample is already adversarial, while it is decreased otherwise. The updates to $\\tau$ are proportional to the step size and follow its annealing strategy, as detailed below.",
|
| 633 |
+
"bbox": [
|
| 634 |
+
169,
|
| 635 |
+
393,
|
| 636 |
+
823,
|
| 637 |
+
450
|
| 638 |
+
],
|
| 639 |
+
"page_idx": 3
|
| 640 |
+
},
|
| 641 |
+
{
|
| 642 |
+
"type": "text",
|
| 643 |
+
"text": "Solution Algorithm. Our attack, given as Algorithm 1, solves the problem in Eqs. (4)-(5) via a fast and memory-efficient gradient-based optimization. After initializing the adversarial perturbation $\\delta = 0$ (line 1), it computes the gradient of the objective in Eq. (4) with respect to $\\delta$ (line 3). The gradient is then normalized such that its largest components (in absolute value) equal $\\pm 1$ (line 4). This stabilizes the optimization by making the update independent from the gradient size, and also makes the selection of the step size independent from the input dimensionality (Rony et al., 2018; Pintor et al., 2021). We then update $\\delta$ to minimize the objective via gradient descent while also enforcing the box constraints in Eq. (5) through the usage of the clip operator (line 5). We increase sparsity in $\\delta$ by zeroing all components lower than the current sparsity threshold $\\tau$ (line 6), as discussed in the previous paragraph. We then decrease the step size $\\eta$ via cosine annealing (line 7), as suggested by Rony et al. (2018); Pintor et al. (2021), and adjust the sparsity threshold $\\tau$ accordingly (line 8). In particular, if the current sample is adversarial, we increase $\\tau$ by $t \\cdot \\eta$ to promote sparser perturbations; otherwise, we decrease $\\tau$ by the same amount to promote the minimization of $\\mathcal{L}$ . The above process is repeated for $N$ iterations while keeping track of the best solution found, i.e., the adversarial perturbation $\\delta^{\\star}$ with the lowest $\\ell_0$ norm (line 9). If no adversarial example is found, the algorithm sets $\\delta^{\\star} = \\infty$ (line 11). It terminates by returning $\\mathbf{x}^{\\star} = \\mathbf{x} + \\delta^{\\star}$ (line 12).",
|
| 644 |
+
"bbox": [
|
| 645 |
+
169,
|
| 646 |
+
455,
|
| 647 |
+
826,
|
| 648 |
+
681
|
| 649 |
+
],
|
| 650 |
+
"page_idx": 3
|
| 651 |
+
},
|
| 652 |
+
{
|
| 653 |
+
"type": "text",
|
| 654 |
+
"text": "Remarks. To summarize, the main contributions behind $\\sigma$ -zero are: (i) the use of a smooth $\\ell_0$ -norm approximation, along with the definition of an appropriate objective (Eq. 4), to enable optimizing $\\ell_0$ -norm adversarial examples via gradient descent; and (ii) the introduction of an adaptive projection operator to further improve sparsity during the optimization. Our algorithm leverages also common strategies like gradient normalization and step size annealing to speed up convergence. As reported by our experiments, $\\sigma$ -zero provides a more effective and efficient $\\ell_0$ -norm attack that (i) is robust to different hyperparameter choices; (ii) does not require any adversarial initialization; and (iii) enables more reliable robustness evaluations, being able to find adversarial examples also when the competing attacks may fail (Carlini et al., 2019; Pintor et al., 2022).",
|
| 655 |
+
"bbox": [
|
| 656 |
+
169,
|
| 657 |
+
686,
|
| 658 |
+
826,
|
| 659 |
+
813
|
| 660 |
+
],
|
| 661 |
+
"page_idx": 3
|
| 662 |
+
},
|
| 663 |
+
{
|
| 664 |
+
"type": "text",
|
| 665 |
+
"text": "3 EXPERIMENTS",
|
| 666 |
+
"text_level": 1,
|
| 667 |
+
"bbox": [
|
| 668 |
+
171,
|
| 669 |
+
843,
|
| 670 |
+
326,
|
| 671 |
+
858
|
| 672 |
+
],
|
| 673 |
+
"page_idx": 3
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"type": "text",
|
| 677 |
+
"text": "We report here an extensive experimental evaluation comparing $\\sigma$ -zero against 11 state-of-the-art sparse attacks, including both $\\ell_0$ - and $\\ell_1$ -norm attacks. We test all attacks using different settings on 18 distinct models and 3 different datasets, yielding almost 450 different comparisons in total.",
|
| 678 |
+
"bbox": [
|
| 679 |
+
169,
|
| 680 |
+
881,
|
| 681 |
+
823,
|
| 682 |
+
925
|
| 683 |
+
],
|
| 684 |
+
"page_idx": 3
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"type": "header",
|
| 688 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 689 |
+
"bbox": [
|
| 690 |
+
171,
|
| 691 |
+
32,
|
| 692 |
+
478,
|
| 693 |
+
47
|
| 694 |
+
],
|
| 695 |
+
"page_idx": 3
|
| 696 |
+
},
|
| 697 |
+
{
|
| 698 |
+
"type": "page_number",
|
| 699 |
+
"text": "4",
|
| 700 |
+
"bbox": [
|
| 701 |
+
493,
|
| 702 |
+
948,
|
| 703 |
+
503,
|
| 704 |
+
959
|
| 705 |
+
],
|
| 706 |
+
"page_idx": 3
|
| 707 |
+
},
|
| 708 |
+
{
|
| 709 |
+
"type": "text",
|
| 710 |
+
"text": "3.1 EXPERIMENTAL SETUP",
|
| 711 |
+
"text_level": 1,
|
| 712 |
+
"bbox": [
|
| 713 |
+
171,
|
| 714 |
+
103,
|
| 715 |
+
377,
|
| 716 |
+
118
|
| 717 |
+
],
|
| 718 |
+
"page_idx": 4
|
| 719 |
+
},
|
| 720 |
+
{
|
| 721 |
+
"type": "text",
|
| 722 |
+
"text": "Datasets. We consider the three most popular datasets used for benchmarking adversarial robustness: MNIST (LeCun & Cortes, 2005), CIFAR-10 (Krizhevsky, 2009) and ImageNet (Krizhevsky et al., 2012). To evaluate the attack performance, we use the entire test set for MNIST and CIFAR-10 (with a batch size of 32), and a subset of 1000 test samples for ImageNet (with a batch size of 16).",
|
| 723 |
+
"bbox": [
|
| 724 |
+
169,
|
| 725 |
+
132,
|
| 726 |
+
826,
|
| 727 |
+
190
|
| 728 |
+
],
|
| 729 |
+
"page_idx": 4
|
| 730 |
+
},
|
| 731 |
+
{
|
| 732 |
+
"type": "text",
|
| 733 |
+
"text": "Models. We use a selection of both baseline and robust models to evaluate the attacks under different conditions. We evaluate $\\sigma$ -zero on a vast set of models to ensure its broad effectiveness and expose vulnerabilities that may not be revealed by other attacks (Croce & Hein, 2021). For the MNIST dataset, we consider two adversarially trained convolutional neural network (CNN) models by Rony et al. (2021), i.e., CNN-DDN and CNN-Trades. These models have been trained to be robust to both $\\ell_2$ and $\\ell_{\\infty}$ adversarial attacks. We denote them M1 and M2, respectively. For the CIFAR-10 and ImageNet datasets, we employ state-of-the-art robust models from RobustBench (Croce et al., 2021) and the paper's official repositories. For CIFAR-10, we adopt ten models, denoted as C1-C12. C1 (Carmon et al., 2019) and C2 (Augustin et al., 2020) combine training data augmentation with adversarial training to improve robustness to $\\ell_{\\infty}$ and $\\ell_2$ attacks. C3 (Croce & Hein, 2021) and C4 (Jiang et al., 2023) are $\\ell_1$ robust models. C5 (Croce et al., 2021) is a non-robust WideResNet-28-10 model. C6 (Gowal et al., 2021) uses generative models to artificially augment the original training set and improve adversarial robustness to generic $\\ell_p$ -norm attacks. C7 (Engstrom et al., 2019) is an adversarial trained model that is robust against $\\ell_2$ -norm attacks. C8 (Chen et al., 2020) is a robust ensemble model. C9 (Xu et al., 2023) is a recently proposed adversarial training defense robust to $\\ell_2$ attacks. C10 (Addepalli et al., 2022) enforces diversity during data augmentation and combines it with adversarial training. Lastly, C11 (Zhong et al., 2024) and C12 (Zhong et al., 2024) are two adversarial trained models robust against $\\ell_0$ -norm adversarial perturbations. For ImageNet, we consider a pretrained ResNet-18 denoted with I1 (He et al., 2015), and five robust models to $\\ell_{\\infty}$ -attacks, denoted with I2 (Engstrom et al., 2019), I3 (Hendrycks et al., 2021), I4 (Debenedetti et al., 2023), I5 (Wong et al., 2020), and I6 (Salman et al., 2020). Lastly, in the appendix, we present two $\\ell_0$ -robust models, C11 (Zhong et al., 2024) and C12 (Zhong et al., 2024), for CIFAR-10, along with two large $\\ell_{\\infty}$ -robust models, I7 (Peng et al., 2023) and I8 (Mo et al., 2022), for ImageNet.",
|
| 734 |
+
"bbox": [
|
| 735 |
+
169,
|
| 736 |
+
196,
|
| 737 |
+
826,
|
| 738 |
+
517
|
| 739 |
+
],
|
| 740 |
+
"page_idx": 4
|
| 741 |
+
},
|
| 742 |
+
{
|
| 743 |
+
"type": "text",
|
| 744 |
+
"text": "Attacks. We compare $\\sigma$ -zero against the following state-of-the-art minimum-norm attacks, in their $\\ell_0$ -norm variants: the Voting Folded Gaussian Attack (VFGA) attack (Cétaire et al. 2021), the Primal-Dual Proximal Gradient Descent (PDPGD) attack (Matyasko & Chau 2021), the Brendel & Bethge (BB) attack (Brendel et al., 2019a), including also its variant with adversarial initialization (BBadv), and the Fast Minimum Norm (FMN) attack (Pintor et al. 2021). We also consider two state-of-the-art $\\ell_1$ -norm attacks as additional baselines, i.e., the Elastic-Net (EAD) attack (Chen et al. 2018) and SparseFool (SF) by Modas et al. (2019). All attacks are set to manipulate the input values independently; e.g., for CIFAR-10, the number of modifiable inputs is $3 \\times 32 \\times 32 = 3072$ .",
|
| 745 |
+
"bbox": [
|
| 746 |
+
169,
|
| 747 |
+
521,
|
| 748 |
+
826,
|
| 749 |
+
635
|
| 750 |
+
],
|
| 751 |
+
"page_idx": 4
|
| 752 |
+
},
|
| 753 |
+
{
|
| 754 |
+
"type": "text",
|
| 755 |
+
"text": "Hyperparameters. We run our experiments using the default hyperparameters from the original implementations provided in the authors' repositories, AdversarialLib (Rony & Ben Ayed) and Foolbox (Rauber et al., 2017). We set the maximum number of iterations to $N = 1000$ to ensure that all attacks reach convergence (Pintor et al., 2022). For $\\sigma$ -zero, we set $\\eta_0 = 1$ , $\\tau_0 = 0.3$ , $t = 0.01$ , and $\\sigma = 10^{-3}$ , and keep the same configuration for all models and datasets.",
|
| 756 |
+
"bbox": [
|
| 757 |
+
169,
|
| 758 |
+
640,
|
| 759 |
+
826,
|
| 760 |
+
712
|
| 761 |
+
],
|
| 762 |
+
"page_idx": 4
|
| 763 |
+
},
|
| 764 |
+
{
|
| 765 |
+
"type": "text",
|
| 766 |
+
"text": "Evaluation Metrics. For each attack, we report the Attack Success Rate (ASR) at different values of $k$ , denoted with $\\mathrm{ASR}_k$ , i.e., the fraction of successful attacks for which $\\| \\delta^{\\star} \\|_0 \\leq k$ , and the median value of $\\| \\delta^{\\star} \\|_0$ over the test samples, denoted with $\\tilde{\\ell}_0$ . We compare the computational effort of each attack considering the mean runtime (s) (per sample), the mean number of queries (q) (i.e., the total number of forwards and backwards required to perform the attack, divided by the number of samples), and the Video Random Access Memory (VRAM) consumed by the Graphics Processing Unit (GPU). We measure the runtime on a workstation with an NVIDIA A100 Tensor Core GPU (40 GB memory) and two Intel® XeO® Gold 6238R processors. We evaluate memory consumption as the maximum VRAM used among all batches, representing the minimum requirement to run without failure.",
|
| 767 |
+
"bbox": [
|
| 768 |
+
169,
|
| 769 |
+
715,
|
| 770 |
+
826,
|
| 771 |
+
845
|
| 772 |
+
],
|
| 773 |
+
"page_idx": 4
|
| 774 |
+
},
|
| 775 |
+
{
|
| 776 |
+
"type": "list",
|
| 777 |
+
"sub_type": "ref_text",
|
| 778 |
+
"list_items": [
|
| 779 |
+
"3We utilize the Foolbox DatasetAttack (Foolbox, 2017) for adversarial initialization.",
|
| 780 |
+
"4Additional results using only $N = 100$ steps are reported in Appendix B.1",
|
| 781 |
+
"5To show that no specific hyperparameter tuning is required, additional results are reported in Appendix A.2",
|
| 782 |
+
"${}^{6}$ If no adversarial example is found for a given $\\mathbf{x}$ ,we set ${\\begin{Vmatrix}{\\mathbf{\\delta }}^{ * }\\end{Vmatrix}}_{0} = \\infty$ ,as done by Brendel et al. (2019a)."
|
| 783 |
+
],
|
| 784 |
+
"bbox": [
|
| 785 |
+
192,
|
| 786 |
+
867,
|
| 787 |
+
823,
|
| 788 |
+
925
|
| 789 |
+
],
|
| 790 |
+
"page_idx": 4
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "header",
|
| 794 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 795 |
+
"bbox": [
|
| 796 |
+
171,
|
| 797 |
+
32,
|
| 798 |
+
478,
|
| 799 |
+
47
|
| 800 |
+
],
|
| 801 |
+
"page_idx": 4
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "page_number",
|
| 805 |
+
"text": "5",
|
| 806 |
+
"bbox": [
|
| 807 |
+
493,
|
| 808 |
+
948,
|
| 809 |
+
504,
|
| 810 |
+
959
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 4
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "table",
|
| 816 |
+
"img_path": "images/e366bafd01fffcc39e1db8900bcd9b32a9710138d1358735fe51409c96312cf0.jpg",
|
| 817 |
+
"table_caption": [
|
| 818 |
+
"Table 1: Minimum-norm comparison results on MNIST, CIFAR10 and ImageNet with $N = 1000$ . For each attack and model (M), we report ASR at $k = 24, 50, \\infty$ , median perturbation size $\\tilde{\\ell}_0$ , mean runtime $s$ (in seconds), mean number of queries $q$ (in thousands), and maximum VRAM usage (in GB). When VFGA exceeds the VRAM limit, we re-run it using a smaller batch size, increasing its runtime $t$ . We denote those cases with the symbol $\\star$ . Remaining models in Appendix B, Table 6."
|
| 819 |
+
],
|
| 820 |
+
"table_footnote": [],
|
| 821 |
+
"table_body": "<table><tr><td>Attack</td><td>M</td><td>ASR24</td><td>ASR50</td><td>ASR∞</td><td>\\( \\tilde{\\ell}_{0} \\)</td><td>s</td><td>q</td><td>VRAM</td><td>M</td><td>ASR24</td><td>ASR50</td><td>ASR∞</td><td>\\( \\tilde{\\ell}_{0} \\)</td><td>s</td><td>q</td><td>VRAM</td></tr><tr><td colspan=\"17\">MNIST</td></tr><tr><td>SF</td><td></td><td>6.66</td><td>6.76</td><td>96.98</td><td>469</td><td>1.07</td><td>0.18</td><td>0.06</td><td></td><td>1.03</td><td>1.21</td><td>91.68</td><td>463</td><td>2.87</td><td>0.86</td><td>0.07</td></tr><tr><td>EAD</td><td></td><td>3.83</td><td>53.66</td><td>100.0</td><td>49</td><td>0.47</td><td>6.28</td><td>0.05</td><td></td><td>2.13</td><td>55.57</td><td>100.0</td><td>48</td><td>0.50</td><td>6.73</td><td>0.05</td></tr><tr><td>PDPGD</td><td></td><td>26.77</td><td>74.08</td><td>100.0</td><td>38</td><td>0.23</td><td>2.00</td><td>0.04</td><td></td><td>16.91</td><td>66.30</td><td>100.0</td><td>42</td><td>0.23</td><td>2.00</td><td>0.04</td></tr><tr><td>VFGA</td><td>MI</td><td>43.58</td><td>82.42</td><td>99.98</td><td>27</td><td>0.05</td><td>0.77</td><td>0.21</td><td>M2</td><td>5.00</td><td>39.33</td><td>99.95</td><td>57</td><td>0.05</td><td>1.33</td><td>0.21</td></tr><tr><td>FMN</td><td></td><td>35.90</td><td>93.74</td><td>100.0</td><td>29</td><td>0.21</td><td>2.00</td><td>0.04</td><td></td><td>50.74</td><td>91.84</td><td>99.41</td><td>24</td><td>0.22</td><td>2.00</td><td>0.04</td></tr><tr><td>BB</td><td></td><td>71.23</td><td>97.86</td><td>100.0</td><td>18</td><td>0.90</td><td>2.99</td><td>0.05</td><td></td><td>56.53</td><td>91.62</td><td>100.0</td><td>18</td><td>0.74</td><td>3.71</td><td>0.05</td></tr><tr><td>BBadv</td><td></td><td>67.06</td><td>91.23</td><td>100.0</td><td>19</td><td>0.77</td><td>2.01</td><td>0.07</td><td></td><td>29.17</td><td>40.88</td><td>100.0</td><td>89</td><td>0.71</td><td>2.01</td><td>0.07</td></tr><tr><td>σ-zero</td><td></td><td>83.79</td><td>99.98</td><td>100.0</td><td>16</td><td>0.31</td><td>2.00</td><td>0.04</td><td></td><td>98.03</td><td>100.0</td><td>100.0</td><td>9</td><td>0.31</td><td>2.00</td><td>0.04</td></tr><tr><td colspan=\"17\">CIFAR-10</td></tr><tr><td>SF</td><td></td><td>18.71</td><td>18.77</td><td>56.39</td><td>3072</td><td>11.31</td><td>1.40</td><td>1.62</td><td rowspan=\"7\">C3</td><td>20.46</td><td>24.36</td><td>58.29</td><td>3072</td><td>1.63</td><td>0.48</td><td>0.66</td></tr><tr><td>EAD</td><td></td><td>16.32</td><td>30.38</td><td>100.0</td><td>90</td><td>1.92</td><td>5.70</td><td>1.47</td><td>13.01</td><td>13.23</td><td>100.0</td><td>800</td><td>0.94</td><td>4.89</td><td>0.65</td></tr><tr><td>PDPGD</td><td rowspan=\"5\">CI</td><td>26.84</td><td>42.50</td><td>100.0</td><td>63</td><td>0.64</td><td>2.00</td><td>1.32</td><td>22.30</td><td>35.13</td><td>100.0</td><td>75</td><td>0.41</td><td>2.00</td><td>0.59</td></tr><tr><td>VFGA</td><td>51.06</td><td>75.37</td><td>99.92</td><td>24</td><td>0.59</td><td>0.78</td><td>11.71</td><td>28.47</td><td>49.98</td><td>99.72</td><td>51</td><td>0.32</td><td>1.25</td><td>4.44</td></tr><tr><td>FMN</td><td>48.89</td><td>74.70</td><td>100.0</td><td>26</td><td>0.59</td><td>2.00</td><td>1.31</td><td>27.45</td><td>48.87</td><td>100.0</td><td>52</td><td>0.24</td><td>2.00</td><td>0.60</td></tr><tr><td>BB</td><td>13.27</td><td>14.24</td><td>14.70</td><td>∞</td><td>0.63</td><td>2.05</td><td>1.47</td><td>16.88</td><td>22.91</td><td>27.64</td><td>∞</td><td>1.04</td><td>2.25</td><td>0.65</td></tr><tr><td>BBadv</td><td>65.96</td><td>90.57</td><td>100.0</td><td>16</td><td>4.68</td><td>2.01</td><td>1.64</td><td>36.47</td><td>72.43</td><td>100.0</td><td>34</td><td>5.28</td><td>2.01</td><td>0.64</td></tr><tr><td>σ-zero</td><td></td><td>76.53</td><td>95.38</td><td>100.0</td><td>11</td><td>0.73</td><td>2.00</td><td>1.53</td><td></td><td>38.60</td><td>73.02</td><td>100.0</td><td>32</td><td>0.43</td><td>2.00</td><td>0.71</td></tr><tr><td>SF</td><td></td><td>19.66</td><td>21.22</td><td>98.74</td><td>3070</td><td>3.62</td><td>0.46</td><td>1.90</td><td rowspan=\"7\">C4</td><td>31.76</td><td>43.07</td><td>91.14</td><td>69</td><td>4.32</td><td>1.49</td><td>0.66</td></tr><tr><td>EAD</td><td></td><td>9.73</td><td>11.42</td><td>100.0</td><td>360</td><td>2.53</td><td>5.62</td><td>1.89</td><td>24.21</td><td>24.78</td><td>100.0</td><td>768</td><td>1.04</td><td>4.99</td><td>0.65</td></tr><tr><td>PDPGD</td><td rowspan=\"5\">CI</td><td>28.02</td><td>45.15</td><td>100.0</td><td>55</td><td>1.12</td><td>2.00</td><td>1.8</td><td>26.89</td><td>42.38</td><td>100.0</td><td>66</td><td>0.40</td><td>2.00</td><td>0.60</td></tr><tr><td>VFGA</td><td>39.58</td><td>66.50</td><td>99.62</td><td>34</td><td>0.48</td><td>0.94</td><td>16.53</td><td>46.71</td><td>69.47</td><td>99.83</td><td>28</td><td>0.25</td><td>0.82</td><td>4.22</td></tr><tr><td>FMN</td><td>39.30</td><td>71.70</td><td>100.0</td><td>33</td><td>1.08</td><td>2.00</td><td>1.8</td><td>43.06</td><td>62.96</td><td>100.0</td><td>34</td><td>0.35</td><td>2.00</td><td>0.59</td></tr><tr><td>BB</td><td>38.73</td><td>56.78</td><td>58.64</td><td>33</td><td>2.31</td><td>2.89</td><td>1.89</td><td>25.95</td><td>27.98</td><td>29.50</td><td>∞</td><td>0.54</td><td>2.09</td><td>0.65</td></tr><tr><td>BBadv</td><td>70.07</td><td>96.31</td><td>100.0</td><td>17</td><td>3.92</td><td>2.01</td><td>1.99</td><td>53.17</td><td>82.46</td><td>100.0</td><td>22</td><td>3.03</td><td>2.01</td><td>0.65</td></tr><tr><td>σ-zero</td><td></td><td>74.63</td><td>97.55</td><td>100.0</td><td>15</td><td>1.41</td><td>2.00</td><td>1.92</td><td></td><td>55.42</td><td>82.92</td><td>100.0</td><td>20</td><td>0.42</td><td>2.00</td><td>0.72</td></tr><tr><td colspan=\"17\">ImageNet</td></tr><tr><td>EAD</td><td></td><td>35.4</td><td>36.3</td><td>100.0</td><td>460</td><td>4.13</td><td>2.69</td><td>0.46</td><td></td><td>27.0</td><td>28.4</td><td>100.0</td><td>981</td><td>19.25</td><td>5.49</td><td>1.41</td></tr><tr><td>VFGA</td><td rowspan=\"3\">II</td><td>57.9</td><td>72.5</td><td>99.9</td><td>14</td><td>1.22*</td><td>1.08</td><td>>40</td><td rowspan=\"3\">I3</td><td>46.7</td><td>59.5</td><td>97.9</td><td>31</td><td>6.93*</td><td>1.98</td><td>>40</td></tr><tr><td>FMN</td><td>62.6</td><td>81.0</td><td>100.0</td><td>12</td><td>0.73</td><td>2.00</td><td>0.66</td><td>49.1</td><td>67.7</td><td>100.0</td><td>25</td><td>1.98</td><td>2.00</td><td>2.30</td></tr><tr><td>BBadv</td><td>77.5</td><td>93.2</td><td>100.0</td><td>7</td><td>231.67</td><td>2.01</td><td>0.72</td><td>64.7</td><td>85.5</td><td>100.0</td><td>14</td><td>205.11</td><td>2.01</td><td>2.41</td></tr><tr><td>σ-zero</td><td></td><td>82.6</td><td>95.9</td><td>100.0</td><td>5</td><td>1.18</td><td>2.00</td><td>0.84</td><td></td><td>66.7</td><td>86.9</td><td>100.0</td><td>13</td><td>2.76</td><td>2.00</td><td>2.52</td></tr><tr><td>EAD</td><td></td><td>46.8</td><td>51.0</td><td>100.0</td><td>42</td><td>18.10</td><td>5.45</td><td>1.42</td><td rowspan=\"4\"></td><td>32.8</td><td>33.5</td><td>100.0</td><td>572</td><td>11.43</td><td>5.34</td><td>1.68</td></tr><tr><td>VFGA</td><td rowspan=\"3\">II</td><td>54.7</td><td>63.4</td><td>96.7</td><td>12</td><td>8.21*</td><td>2.35</td><td>>40</td><td>40.0</td><td>46.5</td><td>95.5</td><td>66</td><td>33.88*</td><td>3.97</td><td>>40</td></tr><tr><td>FMN</td><td>57.8</td><td>67.0</td><td>100.0</td><td>9</td><td>1.97</td><td>2.00</td><td>2.30</td><td>40.3</td><td>47.2</td><td>100.0</td><td>58</td><td>4.28</td><td>2.00</td><td>2.97</td></tr><tr><td>BBadv</td><td>71.0</td><td>82.3</td><td>100</td><td>4</td><td>182.65</td><td>2.01</td><td>2.40</td><td>46.8</td><td>59.8</td><td>100.0</td><td>31</td><td>178.06</td><td>2.01</td><td>3.07</td></tr><tr><td>σ-zero</td><td></td><td>76.9</td><td>87.4</td><td>100.0</td><td>3</td><td>2.75</td><td>2.00</td><td>2.52</td><td></td><td>50.7</td><td>65.1</td><td>100.0</td><td>23</td><td>5.72</td><td>2.00</td><td>3.20</td></tr></table>",
|
| 822 |
+
"bbox": [
|
| 823 |
+
181,
|
| 824 |
+
186,
|
| 825 |
+
823,
|
| 826 |
+
640
|
| 827 |
+
],
|
| 828 |
+
"page_idx": 5
|
| 829 |
+
},
|
| 830 |
+
{
|
| 831 |
+
"type": "text",
|
| 832 |
+
"text": "3.2 EXPERIMENTAL RESULTS",
|
| 833 |
+
"text_level": 1,
|
| 834 |
+
"bbox": [
|
| 835 |
+
171,
|
| 836 |
+
666,
|
| 837 |
+
393,
|
| 838 |
+
679
|
| 839 |
+
],
|
| 840 |
+
"page_idx": 5
|
| 841 |
+
},
|
| 842 |
+
{
|
| 843 |
+
"type": "text",
|
| 844 |
+
"text": "We report the success rate and computational effort metrics of $\\sigma$ -zero against minimum-norm attacks in Table 1 and fixed-budget attacks in Table 3-4. In these tables, we consider the most robust models for each dataset, and we provide the remaining results in Appendix B. Finally, for ImageNet, we narrow our analysis to EAD, FMN, BBadv, and VFGA minimum-norm attacks, as they surpass competing attacks on MNIST and CIFAR-10 in terms of ASR, perturbation size, or execution time.",
|
| 845 |
+
"bbox": [
|
| 846 |
+
169,
|
| 847 |
+
691,
|
| 848 |
+
826,
|
| 849 |
+
763
|
| 850 |
+
],
|
| 851 |
+
"page_idx": 5
|
| 852 |
+
},
|
| 853 |
+
{
|
| 854 |
+
"type": "text",
|
| 855 |
+
"text": "Effectiveness. The median values of $||\\delta^{\\star}||_{0}$ , denoted as $\\tilde{\\ell}_0$ , and the ASRs are reported in Table 1 for all models and datasets. To facilitate comparison, the attacks are sorted from the least to the most effective, on average. In all dataset-model configurations, $\\sigma$ -zero significantly outperforms all the considered attacks. Taking the best-performing attack among the fastest competitors as a reference (i.e., FMN), $\\sigma$ -zero is able to find smaller perturbations and higher ASRs in all configurations. In particular, on CIFAR-10, $\\sigma$ -zero reduces the median number of manipulated features from 52 to 32 against the most robust model (C3), with an average reduction of $49\\%$ across all models. On ImageNet, this improvement is even more pronounced, with a reduction of up to $58\\%$ . In the best case (I4), the median $||\\delta^{\\star}||_{0}$ is reduced from 58 to 23, and in the worst case (I2), from 9 to 3. Alternatively, the most competitive attack in finding small perturbations is BBadv, which is significantly slower and requires starting from an already-adversarial input. The $\\mathrm{ASR}_{\\infty}$ of BB",
|
| 856 |
+
"bbox": [
|
| 857 |
+
169,
|
| 858 |
+
768,
|
| 859 |
+
826,
|
| 860 |
+
925
|
| 861 |
+
],
|
| 862 |
+
"page_idx": 5
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"type": "header",
|
| 866 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 867 |
+
"bbox": [
|
| 868 |
+
171,
|
| 869 |
+
32,
|
| 870 |
+
478,
|
| 871 |
+
47
|
| 872 |
+
],
|
| 873 |
+
"page_idx": 5
|
| 874 |
+
},
|
| 875 |
+
{
|
| 876 |
+
"type": "page_number",
|
| 877 |
+
"text": "6",
|
| 878 |
+
"bbox": [
|
| 879 |
+
493,
|
| 880 |
+
948,
|
| 881 |
+
504,
|
| 882 |
+
959
|
| 883 |
+
],
|
| 884 |
+
"page_idx": 5
|
| 885 |
+
},
|
| 886 |
+
{
|
| 887 |
+
"type": "image",
|
| 888 |
+
"img_path": "images/b3cafc2a2945ee83cd0451fd805edca9c2c6eb11bc03c6df7b43cc191c43fce6.jpg",
|
| 889 |
+
"image_caption": [
|
| 890 |
+
"Figure 2: Robustness evaluation curves (ASR vs. perturbation budget $k$ ) for M2 on MNIST (left), C1 on CIFAR-10 (middle), and I1 on ImageNet (right)."
|
| 891 |
+
],
|
| 892 |
+
"image_footnote": [],
|
| 893 |
+
"bbox": [
|
| 894 |
+
171,
|
| 895 |
+
99,
|
| 896 |
+
387,
|
| 897 |
+
215
|
| 898 |
+
],
|
| 899 |
+
"page_idx": 6
|
| 900 |
+
},
|
| 901 |
+
{
|
| 902 |
+
"type": "image",
|
| 903 |
+
"img_path": "images/d9d47b2223b7d7b51a869ec8a51bd9b8a319f2db09242bdeccf132485892709c.jpg",
|
| 904 |
+
"image_caption": [],
|
| 905 |
+
"image_footnote": [],
|
| 906 |
+
"bbox": [
|
| 907 |
+
388,
|
| 908 |
+
99,
|
| 909 |
+
607,
|
| 910 |
+
215
|
| 911 |
+
],
|
| 912 |
+
"page_idx": 6
|
| 913 |
+
},
|
| 914 |
+
{
|
| 915 |
+
"type": "image",
|
| 916 |
+
"img_path": "images/0f2970810b95697f83db64c95c1755d1eae9288c60746110cb3f2b0626788e5e.jpg",
|
| 917 |
+
"image_caption": [],
|
| 918 |
+
"image_footnote": [],
|
| 919 |
+
"bbox": [
|
| 920 |
+
609,
|
| 921 |
+
101,
|
| 922 |
+
823,
|
| 923 |
+
215
|
| 924 |
+
],
|
| 925 |
+
"page_idx": 6
|
| 926 |
+
},
|
| 927 |
+
{
|
| 928 |
+
"type": "text",
|
| 929 |
+
"text": "(i.e., without adversarial initialization) indeed decreases with increasing input dimensionality (e.g., CIFAR-10). This occurs because BB often stops unexpectedly before reaching the specified number of steps due to initialization failures; in particular, Table 1 shows that the median perturbation size found by BB is sometimes $\\infty$ , as its $\\mathrm{ASR}_{\\infty}$ is lower than $50\\%$ . Although BBadv does not suffer from the same issue, as it leverages adversarial initialization, it is still outperformed by $\\sigma$ -zero. Specifically, $\\sigma$ -zero reduces the $\\ell_0$ norm of the adversarial examples from 16 to 11 in the best case (C1), while achieving an average improvement of $24\\%$ across all dataset-model configurations.",
|
| 930 |
+
"bbox": [
|
| 931 |
+
169,
|
| 932 |
+
301,
|
| 933 |
+
826,
|
| 934 |
+
398
|
| 935 |
+
],
|
| 936 |
+
"page_idx": 6
|
| 937 |
+
},
|
| 938 |
+
{
|
| 939 |
+
"type": "text",
|
| 940 |
+
"text": "Efficiency. We evaluate the computational effort required to run each attack by reporting in Table 1 the mean runtime $s$ (in seconds), the mean number of queries $q$ issued to the model (in thousands), and the maximum VRAM used. Note that, while the runtime $s$ and the consumed VRAM may depend on the attack implementation, the number of queries $q$ counts the total number of forward and backward passes performed by the attack, thus providing a fairer evaluation of the attack complexity. In fact, some attacks perform more than 2000 queries even if $N = 1000$ , i.e., they perform more than one forward and one backward pass per iteration (see, e.g., EAD and BB). Other attacks, instead, might use less than 2000 queries as they implement early stopping strategies. The results indicate that $\\sigma$ -zero exhibits similar runtime performance when compared to the fastest algorithms FMN, PDPGD, and VFGA, while preserving higher effectiveness. In contrast, when compared against the BBadv attack, which competes in terms of $\\tilde{\\ell}_0$ , our attack is much faster across all the dataset-model configurations, especially forImagenet. For example, $\\sigma$ -zero is 10 times faster than BBadv on C4 and 100 times faster on I3 on ImageNet. This confirms that $\\sigma$ -zero establishes a better effectiveness-efficiency trade-off than that provided by state-of-the-art $\\ell_0$ -norm attacks.",
|
| 941 |
+
"bbox": [
|
| 942 |
+
169,
|
| 943 |
+
405,
|
| 944 |
+
826,
|
| 945 |
+
603
|
| 946 |
+
],
|
| 947 |
+
"page_idx": 6
|
| 948 |
+
},
|
| 949 |
+
{
|
| 950 |
+
"type": "text",
|
| 951 |
+
"text": "Reliability. Complementary to Table 1, we present the robustness evaluation curves in Figure 2 for each attack on M2, C1, and I1. In Appendix B.3, we include similar curves for all other configurations. These curves go beyond the only median statistic and $\\mathrm{ASR}_k$ , providing further evidence that $\\sigma$ -zero achieves higher ASRs with smaller $\\ell_0$ -norm perturbations compared to the competing attacks. More importantly, the ASR of $\\sigma$ -zero reaches almost always $100\\%$ as the perturbation budget grows, meaning that its optimization only rarely fails to find an adversarial example. In Appendix B.1, we further demonstrate that even when the number of iterations is reduced to $N = 100$ , $\\sigma$ -zero consistently achieves an $\\mathrm{ASR}_{\\infty}$ of $100\\%$ across all models. This is not observed with other attacks, which often fail when using fewer iterations, thereby increasing the risk of overestimating adversarial robustness. These results reinforce our previous findings, confirming that $\\sigma$ -zero can help mitigate the issue of overestimating adversarial robustness – a crucial aspect to foster scientific progress in defense developments and evaluations (Carlini et al., 2019; Pintor et al., 2022).",
|
| 952 |
+
"bbox": [
|
| 953 |
+
169,
|
| 954 |
+
608,
|
| 955 |
+
826,
|
| 956 |
+
777
|
| 957 |
+
],
|
| 958 |
+
"page_idx": 6
|
| 959 |
+
},
|
| 960 |
+
{
|
| 961 |
+
"type": "text",
|
| 962 |
+
"text": "Ablation Study. In Table 2 we present an ablation study to evaluate the relevance of $\\sigma$ -zero's components. Our findings indicate that all the non-trivial components in $\\sigma$ -zero are essential for ensuring the effectiveness of the attack. Specifically, we observe that the $\\ell_0$ -norm approximation $\\hat{\\ell}_0$ (Eq. 7, line 3) leads the optimization algorithm to perturb all input features, albeit with small contributions. The projection operator (line 6) plays a crucial role by significantly decreasing the number of perturbed features, effectively removing the least significant contributions. Furthermore, gradient normalization (line 4) accelerates convergence, enhancing efficiency. Lastly, the adaptive projection operator (line 8) fine-tunes the results, reduces the number of perturbed features, and mitigates the dependency on hyperparameter choices. These results underline the importance of each component in $\\sigma$ -zero, highlighting their contributions to the overall performance of the attack.",
|
| 963 |
+
"bbox": [
|
| 964 |
+
169,
|
| 965 |
+
782,
|
| 966 |
+
826,
|
| 967 |
+
924
|
| 968 |
+
],
|
| 969 |
+
"page_idx": 6
|
| 970 |
+
},
|
| 971 |
+
{
|
| 972 |
+
"type": "header",
|
| 973 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 974 |
+
"bbox": [
|
| 975 |
+
171,
|
| 976 |
+
32,
|
| 977 |
+
478,
|
| 978 |
+
47
|
| 979 |
+
],
|
| 980 |
+
"page_idx": 6
|
| 981 |
+
},
|
| 982 |
+
{
|
| 983 |
+
"type": "page_number",
|
| 984 |
+
"text": "7",
|
| 985 |
+
"bbox": [
|
| 986 |
+
493,
|
| 987 |
+
948,
|
| 988 |
+
503,
|
| 989 |
+
959
|
| 990 |
+
],
|
| 991 |
+
"page_idx": 6
|
| 992 |
+
},
|
| 993 |
+
{
|
| 994 |
+
"type": "table",
|
| 995 |
+
"img_path": "images/55b56e0aad13c01e2977aadadab8bf05f8a5db681ca87397ffb6b9048b291f13.jpg",
|
| 996 |
+
"table_caption": [
|
| 997 |
+
"Table 2: Ablation study on the $\\sigma$ -zero components integrated in Algorithm 1. Columns describe respectively: Gradient normalization factor (line 4); dynamic projection adjustment (line 8); projection operator $\\Pi_{\\tau}$ (line 6); and the $\\ell_0$ norm approximation $\\hat{\\ell}_0$ (line 3)."
|
| 998 |
+
],
|
| 999 |
+
"table_footnote": [],
|
| 1000 |
+
"table_body": "<table><tr><td>Model</td><td>Normalization</td><td>Adaptive τ</td><td>Projection</td><td>\\( \\hat{\\ell}_{0} \\)</td><td>\\( \\mathrm{ASR}_{10} \\)</td><td>\\( \\mathrm{ASR}_{50} \\)</td><td>ASR</td><td>\\( \\tilde{\\ell}_{0} \\)</td></tr><tr><td rowspan=\"6\">C10</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>21.68</td><td>73.02</td><td>100.0</td><td>32</td></tr><tr><td>✓</td><td></td><td>✓</td><td>✓</td><td>21.89</td><td>71.66</td><td>100.0</td><td>32</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>16.81</td><td>39.76</td><td>100.0</td><td>65</td></tr><tr><td></td><td></td><td>✓</td><td>✓</td><td>12.95</td><td>13.23</td><td>100.0</td><td>505</td></tr><tr><td></td><td></td><td></td><td>✓</td><td>12.95</td><td>12.95</td><td>100.0</td><td>3004</td></tr><tr><td>✓</td><td></td><td></td><td>✓</td><td>12.95</td><td>12.95</td><td>100.0</td><td>3070</td></tr><tr><td rowspan=\"6\">C5</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>37.27</td><td>82.92</td><td>100.0</td><td>20</td></tr><tr><td>✓</td><td></td><td>✓</td><td>✓</td><td>37.01</td><td>79.83</td><td>100.0</td><td>21</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>29.56</td><td>52.83</td><td>100.0</td><td>46</td></tr><tr><td></td><td></td><td>✓</td><td>✓</td><td>25.46</td><td>32.84</td><td>100.0</td><td>144</td></tr><tr><td></td><td></td><td></td><td>✓</td><td>23.78</td><td>23.78</td><td>100.0</td><td>3064</td></tr><tr><td>✓</td><td></td><td></td><td>✓</td><td>23.78</td><td>23.78</td><td>100.0</td><td>3068</td></tr></table>",
|
| 1001 |
+
"bbox": [
|
| 1002 |
+
191,
|
| 1003 |
+
157,
|
| 1004 |
+
812,
|
| 1005 |
+
363
|
| 1006 |
+
],
|
| 1007 |
+
"page_idx": 7
|
| 1008 |
+
},
|
| 1009 |
+
{
|
| 1010 |
+
"type": "table",
|
| 1011 |
+
"img_path": "images/201bb647a1e46ccff9dd4bafb044fdf321f7e85e9085cde2b8478085f2b18c31.jpg",
|
| 1012 |
+
"table_caption": [
|
| 1013 |
+
"Table 3: Fixed-budget comparison results with $N = 1000$ ( $N = 2000$ for Sparse-RS) on MNIST and CIFAR-10 at budgets $k = 24, 50, 100$ . Columns $q_{24}$ and $s_{24}$ show the average number of queries (in thousands) and the average execution time per sample (in seconds) at $k = 24$ ."
|
| 1014 |
+
],
|
| 1015 |
+
"table_footnote": [],
|
| 1016 |
+
"table_body": "<table><tr><td>Attack</td><td>M</td><td>ASR24</td><td>ASR50</td><td>ASR100</td><td>q24</td><td>s24</td><td>VRAM</td><td>M</td><td>ASR24</td><td>ASR50</td><td>ASR100</td><td>q24</td><td>s24</td><td>VRAM</td></tr><tr><td colspan=\"15\">MNIST</td></tr><tr><td>PGD-ℓ0</td><td></td><td>73.99</td><td>99.90</td><td>100.0</td><td>2.00</td><td>0.09</td><td>0.04</td><td></td><td>61.87</td><td>94.15</td><td>98.50</td><td>2.00</td><td>0.09</td><td>0.04</td></tr><tr><td>Sparse-RS</td><td></td><td>79.54</td><td>96.35</td><td>99.79</td><td>0.83</td><td>0.21</td><td>0.04</td><td></td><td>98.92</td><td>99.96</td><td>100.0</td><td>0.24</td><td>0.07</td><td>0.04</td></tr><tr><td>sPGDp</td><td>M1</td><td>65.55</td><td>97.97</td><td>99.99</td><td>0.46</td><td>0.09</td><td>0.05</td><td>M2</td><td>67.92</td><td>98.57</td><td>99.97</td><td>0.92</td><td>0.08</td><td>0.05</td></tr><tr><td>sPGDu</td><td></td><td>82.79</td><td>99.65</td><td>100.0</td><td>0.09</td><td>0.08</td><td>0.05</td><td></td><td>62.25</td><td>98.11</td><td>99.99</td><td>1.00</td><td>0.09</td><td>0.05</td></tr><tr><td>σ-zero</td><td></td><td>83.71</td><td>99.98</td><td>100.0</td><td>0.43</td><td>0.02</td><td>0.06</td><td></td><td>98.11</td><td>100.0</td><td>100.0</td><td>0.14</td><td>0.01</td><td>0.06</td></tr><tr><td colspan=\"15\">CIFAR-10</td></tr><tr><td>PGD-ℓ0</td><td></td><td>38.18</td><td>59.67</td><td>87.19</td><td>2.00</td><td>0.78</td><td>1.90</td><td></td><td>22.99</td><td>36.20</td><td>67.54</td><td>2.00</td><td>0.35</td><td>0.69</td></tr><tr><td>Sparse-RS</td><td rowspan=\"4\">C1</td><td>72.51</td><td>86.59</td><td>94.28</td><td>0.77</td><td>0.36</td><td>1.95</td><td></td><td>30.87</td><td>45.65</td><td>63.26</td><td>1.47</td><td>0.28</td><td>0.68</td></tr><tr><td>sPGDp</td><td>66.37</td><td>89.21</td><td>99.36</td><td>0.74</td><td>0.41</td><td>2.06</td><td>C3</td><td>31.82</td><td>58.62</td><td>93.19</td><td>1.39</td><td>0.17</td><td>0.73</td></tr><tr><td>sPGDu</td><td>66.33</td><td>91.07</td><td>99.75</td><td>0.72</td><td>0.41</td><td>2.06</td><td></td><td>36.16</td><td>70.06</td><td>98.07</td><td>1.30</td><td>0.16</td><td>0.73</td></tr><tr><td>σ-zero</td><td>77.08</td><td>95.33</td><td>99.95</td><td>0.65</td><td>0.29</td><td>2.07</td><td></td><td>38.67</td><td>73.00</td><td>98.53</td><td>1.33</td><td>0.15</td><td>0.75</td></tr><tr><td>PGD-ℓ0</td><td></td><td>32.41</td><td>59.19</td><td>89.22</td><td>2.00</td><td>0.57</td><td>2.46</td><td></td><td>34.35</td><td>44.99</td><td>68.61</td><td>2.00</td><td>0.35</td><td>0.70</td></tr><tr><td>Sparse-RS</td><td rowspan=\"4\">C2</td><td>59.24</td><td>79.81</td><td>92.43</td><td>1.04</td><td>0.35</td><td>2.46</td><td></td><td>49.35</td><td>63.01</td><td>76.51</td><td>1.11</td><td>0.37</td><td>0.68</td></tr><tr><td>sPGDp</td><td>58.91</td><td>88.15</td><td>99.42</td><td>0.89</td><td>0.39</td><td>2.57</td><td>C4</td><td>50.41</td><td>75.86</td><td>97.52</td><td>1.02</td><td>0.18</td><td>0.73</td></tr><tr><td>sPGDu</td><td>64.8</td><td>93.15</td><td>99.92</td><td>0.76</td><td>0.48</td><td>2.56</td><td></td><td>55.89</td><td>84.64</td><td>99.56</td><td>0.91</td><td>0.19</td><td>0.73</td></tr><tr><td>σ-zero</td><td>75.09</td><td>97.67</td><td>100.0</td><td>0.65</td><td>0.17</td><td>2.68</td><td></td><td>55.69</td><td>82.72</td><td>99.07</td><td>0.94</td><td>0.11</td><td>0.75</td></tr></table>",
|
| 1017 |
+
"bbox": [
|
| 1018 |
+
183,
|
| 1019 |
+
436,
|
| 1020 |
+
823,
|
| 1021 |
+
681
|
| 1022 |
+
],
|
| 1023 |
+
"page_idx": 7
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "text",
|
| 1027 |
+
"text": "Comparison with Fixed-budget Attacks. We complement our analysis by comparing $\\sigma$ -zero with three fixed-budget $\\ell_0$ -norm attacks, i.e., the $\\ell_0$ -norm Projected Gradient Descent (PGD- $\\ell_0$ ) attack (Croce & Hein 2019), the Sparse Random Search (Sparse-RS) attack (Croce et al. 2022) and the Sparse-PGD attack (Zhong et al. 2024). For Sparse-PGD, we consider the implementation with sparse ( $\\mathrm{sPGD}_p$ ) and with unprojected ( $\\mathrm{sPGD}_u$ ) gradient. In contrast to minimum-norm attacks, fixed-budget attacks optimize adversarial examples within a given maximum perturbation budget $k$ . For a fairer comparison, as done in fixed-budget approaches, we early stop the $\\sigma$ -zero optimization process as soon as an adversarial example with an $\\ell_0$ -norm perturbation smaller than $k$ is found. In these evaluations, we set $N = 1000$ for $\\sigma$ -zero, PGD- $\\ell_0$ , sPGD $p$ , and sPGD $u$ , while using $N = 2000$ for Sparse-RS. Therefore, when using $N = 1000$ steps for $\\sigma$ -zero (which amounts to performing 1000 forward and 1000 backward calls), we set $N = 2000$ steps for Sparse-RS (which",
|
| 1028 |
+
"bbox": [
|
| 1029 |
+
169,
|
| 1030 |
+
712,
|
| 1031 |
+
826,
|
| 1032 |
+
867
|
| 1033 |
+
],
|
| 1034 |
+
"page_idx": 7
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "header",
|
| 1038 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1039 |
+
"bbox": [
|
| 1040 |
+
171,
|
| 1041 |
+
32,
|
| 1042 |
+
478,
|
| 1043 |
+
47
|
| 1044 |
+
],
|
| 1045 |
+
"page_idx": 7
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "page_footnote",
|
| 1049 |
+
"text": "Sparse-RS is a gradient-free (black-box) attack, which only requires query access to the target model. We consider it as an additional baseline in our experiments, but it should not be considered a direct competitor of gradient-based attacks, as it works under much stricter assumptions (i.e., no access to input gradients).",
|
| 1050 |
+
"bbox": [
|
| 1051 |
+
169,
|
| 1052 |
+
883,
|
| 1053 |
+
823,
|
| 1054 |
+
925
|
| 1055 |
+
],
|
| 1056 |
+
"page_idx": 7
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "page_number",
|
| 1060 |
+
"text": "8",
|
| 1061 |
+
"bbox": [
|
| 1062 |
+
493,
|
| 1063 |
+
948,
|
| 1064 |
+
503,
|
| 1065 |
+
959
|
| 1066 |
+
],
|
| 1067 |
+
"page_idx": 7
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "table",
|
| 1071 |
+
"img_path": "images/b2e241d40a75748ba0542aa48a13da97d498d883afd4b3459080c619b936c7c5.jpg",
|
| 1072 |
+
"table_caption": [
|
| 1073 |
+
"Table 4: Fixed-budget comparison results with $N = {1000}\\left( {N = {2000}\\text{for Sparse-RS}}\\right)$ on ImageNet at budgets $k = {100},{150}$ . See the caption of [Table 3] for further details."
|
| 1074 |
+
],
|
| 1075 |
+
"table_footnote": [],
|
| 1076 |
+
"table_body": "<table><tr><td>Attack</td><td>M</td><td>ASR100</td><td>ASR150</td><td>q100</td><td>s100</td><td>VRAM</td><td>M</td><td>ASR100</td><td>ASR150</td><td>q100</td><td>s100</td><td>VRAM</td></tr><tr><td colspan=\"13\">ImageNet</td></tr><tr><td>Sparse-RS</td><td rowspan=\"3\">I1</td><td>89.3</td><td>91.5</td><td>0.39</td><td>0.32</td><td>1.29</td><td rowspan=\"3\">I2</td><td>81.1</td><td>84.1</td><td>0.53</td><td>0.5</td><td>4.39</td></tr><tr><td>sPGDp</td><td>95.4</td><td>98.5</td><td>0.31</td><td>0.16</td><td>1.40</td><td>85.6</td><td>91.2</td><td>0.33</td><td>0.64</td><td>4.48</td></tr><tr><td>sPGDu</td><td>93.6</td><td>97.8</td><td>0.33</td><td>0.12</td><td>1.40</td><td>82.6</td><td>88.7</td><td>0.37</td><td>0.39</td><td>4.49</td></tr><tr><td>σ-zero</td><td></td><td>99.7</td><td>100.0</td><td>0.19</td><td>0.06</td><td>1.79</td><td></td><td>94.7</td><td>97.1</td><td>0.15</td><td>0.17</td><td>4.90</td></tr><tr><td>Sparse-RS</td><td rowspan=\"3\">I3</td><td>69.1</td><td>72.2</td><td>0.81</td><td>0.62</td><td>4.39</td><td rowspan=\"3\">I4</td><td>45.9</td><td>47.4</td><td>1.17</td><td>1.12</td><td>5.72</td></tr><tr><td>sPGDp</td><td>85.4</td><td>93.4</td><td>0.32</td><td>0.55</td><td>4.49</td><td>66.3</td><td>74.9</td><td>0.73</td><td>1.39</td><td>5.84</td></tr><tr><td>sPGDu</td><td>83.9</td><td>92.1</td><td>0.35</td><td>0.39</td><td>4.49</td><td>66.0</td><td>76.0</td><td>0.72</td><td>1.01</td><td>5.84</td></tr><tr><td>σ-zero</td><td></td><td>97.7</td><td>99.6</td><td>0.34</td><td>0.37</td><td>4.90</td><td></td><td>78.8</td><td>85.8</td><td>0.49</td><td>0.70</td><td>6.29</td></tr></table>",
|
| 1077 |
+
"bbox": [
|
| 1078 |
+
173,
|
| 1079 |
+
142,
|
| 1080 |
+
826,
|
| 1081 |
+
296
|
| 1082 |
+
],
|
| 1083 |
+
"page_idx": 8
|
| 1084 |
+
},
|
| 1085 |
+
{
|
| 1086 |
+
"type": "text",
|
| 1087 |
+
"text": "amounts to performing 2000 forward calls). Furthermore, to compute the ASR at different $k$ ( $\\mathrm{ASR}_k$ ), we separately execute fixed-budget attacks for $k = 24, 50, 100$ features on MNIST and CIFAR-10, and with $k = 100, 150$ features on ImageNet (excluding PGD- $\\ell_0$ ) due to computational demands), reporting only the maximum number of queries and execution time across all distinct runs. We report the average query usage at $k$ ( $\\mathbf{q}_k$ ) and the average execution time per sample at $k$ ( $\\mathbf{s}_k$ ). We report the execution time of $\\mathbf{s}_k$ for the smaller $k$ , as it requires, on average, more iterations due to the more challenging problem. The results, shown in Tables 3-4, confirm that $\\sigma$ -zero outperforms competing approaches in 17 out of 18 configurations (see Appendix B.2 for additional results). Only against C4 the fixed-budget attack $sPGD_u$ slightly increases the ASR. The advantages of $\\sigma$ -zero become even more evident when looking at the results on ImageNet, where, on average, it improves the $\\mathrm{ASR}_{100}$ of $9.6\\%$ across all models in Table 4. The results also indicate that early stopping enables $\\sigma$ -zero to save a significant number of queries and runtime while preserving a high ASR. In Appendix B.2 we also report additional comparisons with $N = 2500$ and $N = 5000$ , i.e., a more favorable scenario for the competing attacks, confirming that $\\sigma$ -zero remains competitive even at higher budgets.",
|
| 1088 |
+
"bbox": [
|
| 1089 |
+
169,
|
| 1090 |
+
323,
|
| 1091 |
+
826,
|
| 1092 |
+
532
|
| 1093 |
+
],
|
| 1094 |
+
"page_idx": 8
|
| 1095 |
+
},
|
| 1096 |
+
{
|
| 1097 |
+
"type": "text",
|
| 1098 |
+
"text": "Summary. Our experiments show that $\\sigma$ -zero: (i) outperforms minimum-norm attacks by improving the success rate and decreasing the $\\ell_0$ norm of the generated adversarial examples (see Table 1 and Appendix B.1); (ii) is significantly faster and scales easily to large datasets (see Table 1 and Appendix B.1); (iii) is robust to hyperparameter selection, not requiring sophisticated and time-consuming tuning (see Appendix A.2); (iv) does not require any adversarial initialization (see Table 1); (v) provides more reliable adversarial robustness evaluations, consistently achieving $100\\%$ ASRs (see Table 1, Figure 2, Appendix B.3); and (vi) remains competitive against fixed-budget attacks even when given the same query budget (Table 3.4).",
|
| 1099 |
+
"bbox": [
|
| 1100 |
+
169,
|
| 1101 |
+
539,
|
| 1102 |
+
828,
|
| 1103 |
+
652
|
| 1104 |
+
],
|
| 1105 |
+
"page_idx": 8
|
| 1106 |
+
},
|
| 1107 |
+
{
|
| 1108 |
+
"type": "text",
|
| 1109 |
+
"text": "4 RELATED WORK",
|
| 1110 |
+
"text_level": 1,
|
| 1111 |
+
"bbox": [
|
| 1112 |
+
171,
|
| 1113 |
+
674,
|
| 1114 |
+
346,
|
| 1115 |
+
690
|
| 1116 |
+
],
|
| 1117 |
+
"page_idx": 8
|
| 1118 |
+
},
|
| 1119 |
+
{
|
| 1120 |
+
"type": "text",
|
| 1121 |
+
"text": "Optimizing $\\ell_0$ -norm adversarial examples with gradient-based algorithms is challenging due to nonconvex and non-differentiable constraints. We categorize them into two main groups: (i) multiple-norm attacks extended to $\\ell_0$ , and (ii) attacks specifically designed to optimize the $\\ell_0$ norm.",
|
| 1122 |
+
"bbox": [
|
| 1123 |
+
169,
|
| 1124 |
+
707,
|
| 1125 |
+
826,
|
| 1126 |
+
750
|
| 1127 |
+
],
|
| 1128 |
+
"page_idx": 8
|
| 1129 |
+
},
|
| 1130 |
+
{
|
| 1131 |
+
"type": "text",
|
| 1132 |
+
"text": "Multiple-norm Attacks Extended to $\\ell_0$ . These attacks have been developed to work with multiple $\\ell_p$ norms, including extensions for the $\\ell_0$ norm. While they can find sparse perturbations, they often rely heavily on heuristics in this setting. [Brendel et al. (2019a) initialize the attack from an adversarial example far away from the clean sample and optimizes the perturbation by following the decision boundary to get closer to the source sample. In general, the algorithm can be used for any $\\ell_p$ norm, including $\\ell_0$ , but the individual optimization steps are very costly. [Pintor et al. (2021) propose the FMN attack that does not require an initialization step and converges efficiently with lightweight gradient-descent steps. However, their approach was developed to generalize over $\\ell_p$ norms, but does not make special adaptations to minimize the $\\ell_0$ norm specifically. Matyasko & Chau (2021) use",
|
| 1133 |
+
"bbox": [
|
| 1134 |
+
169,
|
| 1135 |
+
756,
|
| 1136 |
+
825,
|
| 1137 |
+
883
|
| 1138 |
+
],
|
| 1139 |
+
"page_idx": 8
|
| 1140 |
+
},
|
| 1141 |
+
{
|
| 1142 |
+
"type": "header",
|
| 1143 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1144 |
+
"bbox": [
|
| 1145 |
+
171,
|
| 1146 |
+
32,
|
| 1147 |
+
478,
|
| 1148 |
+
47
|
| 1149 |
+
],
|
| 1150 |
+
"page_idx": 8
|
| 1151 |
+
},
|
| 1152 |
+
{
|
| 1153 |
+
"type": "page_footnote",
|
| 1154 |
+
"text": "${}^{8}N = {2000}$ is suggested as a lower bound number of iterations to ensure the convergence of Sparse-RS by Croce et al. (2022). Additional results with $N = {5000}/{10000}$ for Sparse-RS can be found in Appendix B.2",
|
| 1155 |
+
"bbox": [
|
| 1156 |
+
169,
|
| 1157 |
+
896,
|
| 1158 |
+
823,
|
| 1159 |
+
928
|
| 1160 |
+
],
|
| 1161 |
+
"page_idx": 8
|
| 1162 |
+
},
|
| 1163 |
+
{
|
| 1164 |
+
"type": "page_number",
|
| 1165 |
+
"text": "9",
|
| 1166 |
+
"bbox": [
|
| 1167 |
+
493,
|
| 1168 |
+
948,
|
| 1169 |
+
504,
|
| 1170 |
+
959
|
| 1171 |
+
],
|
| 1172 |
+
"page_idx": 8
|
| 1173 |
+
},
|
| 1174 |
+
{
|
| 1175 |
+
"type": "text",
|
| 1176 |
+
"text": "relaxations of the $\\ell_0$ norm (e.g., $\\ell_{1/2}$ ) to promote sparsity. However, this scheme does not strictly minimize the $\\ell_0$ norm, as the relaxation does not set the lowest components exactly to zero.",
|
| 1177 |
+
"bbox": [
|
| 1178 |
+
169,
|
| 1179 |
+
103,
|
| 1180 |
+
823,
|
| 1181 |
+
133
|
| 1182 |
+
],
|
| 1183 |
+
"page_idx": 9
|
| 1184 |
+
},
|
| 1185 |
+
{
|
| 1186 |
+
"type": "text",
|
| 1187 |
+
"text": "$\\ell_0$ -specific Attacks. Croce et al. (2022) introduced Sparse-RS, a random search-based attack that, unlike minimum-norm attacks, aims to find adversarial examples that are misclassified with high confidence within a fixed perturbation budget. On the same track we find Sparse-PGD (Zhong et al., 2024) and PGD- $\\ell_0$ (Croce & Hein, 2019), white-box fixed-budget alternatives to Sparse-RS. Lastly, Césaire et al. (2021) induces folded Gaussian noise to selected input components, iteratively finding the set that achieves misclassification with minimal perturbation. However, it requires considerable memory to explore possible combinations and find an optimal solution, limiting its scalability.",
|
| 1188 |
+
"bbox": [
|
| 1189 |
+
169,
|
| 1190 |
+
138,
|
| 1191 |
+
826,
|
| 1192 |
+
238
|
| 1193 |
+
],
|
| 1194 |
+
"page_idx": 9
|
| 1195 |
+
},
|
| 1196 |
+
{
|
| 1197 |
+
"type": "text",
|
| 1198 |
+
"text": "Overall, current implementations of $\\ell_0$ -norm attacks present a crucial suboptimal trade-off between their success rate and efficiency, i.e., they are either accurate but slow (e.g., BB) or fast but inaccurate (e.g., FMN). This is also confirmed by a recent work that has benchmarked more than 100 gradient-based attacks (Cinà et al., 2025) on 9 additional robust models. In that open-source benchmark, $\\sigma$ -zero consistently and significantly outperformed all the existing implementations of competing $\\ell_0$ -norm attacks, establishing a performance very close to that of the empirical oracle (obtained by assembling all the attacks tested). In summary, our attack combines the benefits of the two families of attack detailed above, i.e., effectiveness and efficiency, providing the state-of-the-art solution for adversarial robustness evaluations of DNNs when considering $\\ell_0$ -norm attacks.",
|
| 1199 |
+
"bbox": [
|
| 1200 |
+
169,
|
| 1201 |
+
243,
|
| 1202 |
+
826,
|
| 1203 |
+
369
|
| 1204 |
+
],
|
| 1205 |
+
"page_idx": 9
|
| 1206 |
+
},
|
| 1207 |
+
{
|
| 1208 |
+
"type": "text",
|
| 1209 |
+
"text": "5 CONCLUSIONS AND FUTURE WORK",
|
| 1210 |
+
"text_level": 1,
|
| 1211 |
+
"bbox": [
|
| 1212 |
+
171,
|
| 1213 |
+
388,
|
| 1214 |
+
504,
|
| 1215 |
+
405
|
| 1216 |
+
],
|
| 1217 |
+
"page_idx": 9
|
| 1218 |
+
},
|
| 1219 |
+
{
|
| 1220 |
+
"type": "text",
|
| 1221 |
+
"text": "In this work, we propose $\\sigma$ -zero, a novel attack aimed to find minimum $\\ell_0$ -norm adversarial examples, based on the following main technical contributions: (i) a differentiable approximation of the $\\ell_0$ norm to define a novel, smooth objective that can be minimized via gradient descent; and (ii) an adaptive projection operator to enforce sparsity in the adversarial perturbation, by zeroing out the least relevant features in each iteration. $\\sigma$ -zero also leverages specific optimization tricks to stabilize and speed up the optimization. Our extensive experiments demonstrate that $\\sigma$ -zero consistently discovers more effective and reliable $\\ell_0$ -norm adversarial perturbations across all models and datasets while maintaining computational efficiency and robustness to hyperparameters choice. In conclusion, $\\sigma$ -zero emerges as a highly promising candidate to evaluate robustness against $\\ell_0$ -norm perturbations and promote the development of novel robust models against sparse attacks.",
|
| 1222 |
+
"bbox": [
|
| 1223 |
+
169,
|
| 1224 |
+
420,
|
| 1225 |
+
823,
|
| 1226 |
+
559
|
| 1227 |
+
],
|
| 1228 |
+
"page_idx": 9
|
| 1229 |
+
},
|
| 1230 |
+
{
|
| 1231 |
+
"type": "text",
|
| 1232 |
+
"text": "Ethics Statement. Based on our comprehensive analysis, we assert that there are no identifiable ethical considerations or foreseeable negative societal consequences that warrant specific attention within the limits of this study. This study will rather help improve the understanding of adversarial robustness of DNNs and identify potential ways to improve it.",
|
| 1233 |
+
"bbox": [
|
| 1234 |
+
169,
|
| 1235 |
+
566,
|
| 1236 |
+
823,
|
| 1237 |
+
625
|
| 1238 |
+
],
|
| 1239 |
+
"page_idx": 9
|
| 1240 |
+
},
|
| 1241 |
+
{
|
| 1242 |
+
"type": "text",
|
| 1243 |
+
"text": "Reproducibility. To ensure the reproducibility of our work, we have detailed the experimental setup in Section 3.1 where we describe the datasets, models, and attacks used, along with their respective sources. Additionally, we have provided our source code as part of the supplementary material, which will be made publicly available as open source upon acceptance.",
|
| 1244 |
+
"bbox": [
|
| 1245 |
+
169,
|
| 1246 |
+
628,
|
| 1247 |
+
823,
|
| 1248 |
+
686
|
| 1249 |
+
],
|
| 1250 |
+
"page_idx": 9
|
| 1251 |
+
},
|
| 1252 |
+
{
|
| 1253 |
+
"type": "text",
|
| 1254 |
+
"text": "ACKNOWLEDGMENTS",
|
| 1255 |
+
"text_level": 1,
|
| 1256 |
+
"bbox": [
|
| 1257 |
+
171,
|
| 1258 |
+
700,
|
| 1259 |
+
328,
|
| 1260 |
+
714
|
| 1261 |
+
],
|
| 1262 |
+
"page_idx": 9
|
| 1263 |
+
},
|
| 1264 |
+
{
|
| 1265 |
+
"type": "text",
|
| 1266 |
+
"text": "This work has been partially supported by the project Sec4AI4Sec, under the EU's Horizon Europe Research and Innovation Programme (grant agreement no. 101120393); the project ELSA, under the EU's Horizon Europe Research and Innovation Programme (grant agreement no. 101070617); the EU—NGEU National Sustainable Mobility Center (CN00000023), Italian Ministry of University and Research (MUR) Decree n. 1033—17/06/2022 (Spoke 10); projects SERICS (PE00000014) and FAIR (PE0000013) under the MUR NRRP funded by the EU—NGEU; and by the German Federal Ministry of Education and Research under the grant AIgenCY (16KIS2012).",
|
| 1267 |
+
"bbox": [
|
| 1268 |
+
169,
|
| 1269 |
+
724,
|
| 1270 |
+
823,
|
| 1271 |
+
824
|
| 1272 |
+
],
|
| 1273 |
+
"page_idx": 9
|
| 1274 |
+
},
|
| 1275 |
+
{
|
| 1276 |
+
"type": "header",
|
| 1277 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1278 |
+
"bbox": [
|
| 1279 |
+
171,
|
| 1280 |
+
32,
|
| 1281 |
+
478,
|
| 1282 |
+
47
|
| 1283 |
+
],
|
| 1284 |
+
"page_idx": 9
|
| 1285 |
+
},
|
| 1286 |
+
{
|
| 1287 |
+
"type": "page_number",
|
| 1288 |
+
"text": "10",
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
490,
|
| 1291 |
+
946,
|
| 1292 |
+
508,
|
| 1293 |
+
960
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 9
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "text",
|
| 1299 |
+
"text": "REFERENCES",
|
| 1300 |
+
"text_level": 1,
|
| 1301 |
+
"bbox": [
|
| 1302 |
+
174,
|
| 1303 |
+
102,
|
| 1304 |
+
287,
|
| 1305 |
+
117
|
| 1306 |
+
],
|
| 1307 |
+
"page_idx": 10
|
| 1308 |
+
},
|
| 1309 |
+
{
|
| 1310 |
+
"type": "list",
|
| 1311 |
+
"sub_type": "ref_text",
|
| 1312 |
+
"list_items": [
|
| 1313 |
+
"Sravanti Addepalli, Samyak Jain, and Venkatesh Babu R. Efficient and effective augmentation strategy for adversarial training. In NeurIPS, 2022.",
|
| 1314 |
+
"Maximilian Augustin, Alexander Meinke, and Matthias Hein. Adversarial robustness on in- and out-distribution improves explainability. In Computer Vision - ECCV 2020 - 16th European Conference, volume 12371 of Lecture Notes in Computer Science, pp. 228-245. Springer, 2020.",
|
| 1315 |
+
"Battista Biggio and Fabio Roli. Wild patterns: Ten years after the rise of adversarial machine learning. Pattern Recognition, 84:317-331, 2018.",
|
| 1316 |
+
"Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Srndic, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. Evasion attacks against machine learning at test time. In *Machine Learning and Knowledge Discovery in Databases - European Conference*, ECML PKDD, volume 8190 of Lecture Notes in Computer Science, pp. 387-402. Springer, 2013.",
|
| 1317 |
+
"Wieland Brendel, Jonas Rauber, Matthias Kümmerer, Ivan Ustyuzhaninov, and Matthias Bethge. Accurate, reliable and fast robustness evaluation. In Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems, NeurIPS, 2019a.",
|
| 1318 |
+
"Wieland Brendel, Jonas Rauber, Matthias Kümmerer, Ivan Ustyuzhaninov, and Matthias Bethge. Accurate, reliable and fast robustness evaluation. In Conference on Neural Information Processing Systems (NeurIPS), 2019b.",
|
| 1319 |
+
"Nicholas Carlini and David Wagner. Towards evaluating the robustness of neural networks. In IEEE Symposium on Security and Privacy (S&P), 2017a.",
|
| 1320 |
+
"Nicholas Carlini and David A. Wagner. Towards evaluating the robustness of neural networks. In 2017 IEEE Symposium on Security and Privacy SP, pp. 39-57. IEEE Computer Society, 2017b.",
|
| 1321 |
+
"Nicholas Carlini, Anish Athalye, Nicolas Papernot, Wieland Brendel, Jonas Rauber, Dimitris Tsipras, Ian J. Goodfellow, Aleksander Madry, and Alexey Kurakin. On evaluating adversarial robustness. CoRR, abs/1902.06705, 2019.",
|
| 1322 |
+
"Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, John C Duchi, and Percy S Liang. Unlabeled data improves adversarial robustness. In Conference on Neural Information Processing Systems (NeurIPS), 2019.",
|
| 1323 |
+
"Manon Césaire, Lucas Schott, Hatem Hajri, Sylvain Lamprier, and Patrick Gallinari. Stochastic sparse adversarial attacks. In 33rd IEEE International Conference on Tools with Artificial Intelligence, ICTAI, pp. 1247-1254. IEEE, 2021.",
|
| 1324 |
+
"Pin-Yu Chen, Yash Sharma, Huan Zhang, Jinfeng Yi, and Cho-Jui Hsieh. EAD: elastic-net attacks to deep neural networks via adversarial examples. In Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), pp. 10-17. AAAI Press, 2018.",
|
| 1325 |
+
"Tianlong Chen, Sijia Liu, Shiyu Chang, Yu Cheng, Lisa Amini, and Zhangyang Wang. Adversarial robustness: From self-supervised pre-training to fine-tuning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pp. 696-705. Computer Vision Foundation / IEEE, 2020.",
|
| 1326 |
+
"A. E. Cina, J. Rony, M. Pintor, L. Demetrio, A. Demontis, B. Biggio, I. B. Ayed, and F. Roli. Attackbench: Evaluating gradient-based attacks for adversarial examples. In AAAI Conference on Artificial Intelligence, 2025.",
|
| 1327 |
+
"Francesco Croce and Matthias Hein. Sparse and imperceivable adversarial attacks. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 4723-4731, 2019.",
|
| 1328 |
+
"Francesco Croce and Matthias Hein. Mind the box: $l_{1}$ -apgd for sparse adversarial attacks on image classifiers. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML, volume 139 of Proceedings of Machine Learning Research, pp. 2201-2211. PMLR, 2021."
|
| 1329 |
+
],
|
| 1330 |
+
"bbox": [
|
| 1331 |
+
171,
|
| 1332 |
+
125,
|
| 1333 |
+
826,
|
| 1334 |
+
924
|
| 1335 |
+
],
|
| 1336 |
+
"page_idx": 10
|
| 1337 |
+
},
|
| 1338 |
+
{
|
| 1339 |
+
"type": "header",
|
| 1340 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1341 |
+
"bbox": [
|
| 1342 |
+
171,
|
| 1343 |
+
32,
|
| 1344 |
+
478,
|
| 1345 |
+
47
|
| 1346 |
+
],
|
| 1347 |
+
"page_idx": 10
|
| 1348 |
+
},
|
| 1349 |
+
{
|
| 1350 |
+
"type": "page_number",
|
| 1351 |
+
"text": "11",
|
| 1352 |
+
"bbox": [
|
| 1353 |
+
490,
|
| 1354 |
+
948,
|
| 1355 |
+
504,
|
| 1356 |
+
959
|
| 1357 |
+
],
|
| 1358 |
+
"page_idx": 10
|
| 1359 |
+
},
|
| 1360 |
+
{
|
| 1361 |
+
"type": "list",
|
| 1362 |
+
"sub_type": "ref_text",
|
| 1363 |
+
"list_items": [
|
| 1364 |
+
"Francesco Croce, Maksym Andriushchenko, Vikash Sehwag, Edoardo Debenedetti, Nicolas Flammarion, Mung Chiang, Prateek Mittal, and Matthias Hein. Robustbench: a standardized adversarial robustness benchmark. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks, 2021.",
|
| 1365 |
+
"Francesco Croce, Maksym Andriushchenko, Naman D. Singh, Nicolas Flammarion, and Matthias Hein. Sparse-rs: A versatile framework for query-efficient sparse black-box adversarial attacks. In Thirty-Sixth AAAI Conference on Artificial Intelligence, AAAI, pp. 6437-6445. AAAI Press, 2022.",
|
| 1366 |
+
"Geoff Davis, Stephane Mallat, and Marco Avellaneda. Adaptive greedy approximations. Constructive approximation, 13:57-98, 1997.",
|
| 1367 |
+
"Edoardo Debenedetti, Vikash Sehwag, and Prateek Mittal. A light recipe to train robust vision transformers. In First IEEE Conference on Secure and Trustworthy Machine Learning, 2023. URL https://openreview.net/forum?id=IztT98ky0cKs.",
|
| 1368 |
+
"Logan Engstrom, Andrew Ilyas, Hadi Salman, Shibani Santurkar, and Dimitris Tsipras. Robustness (python library), 2019. URL https://github.com/MadryLab/robustness.",
|
| 1369 |
+
"Foolbox. Datasetattack, 2017. URL https://foolbox.readthedocs.io/en/stable/modules/attacks.html#foolboxattacks.DatasetAttack",
|
| 1370 |
+
"Justin Gilmer, Ryan P. Adams, Ian J. Goodfellow, David Andersen, and George E. Dahl. Motivating the rules of the game for adversarial example research. CoRR, abs/1807.06732, 2018.",
|
| 1371 |
+
"Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, and Timothy A. Mann. Improving robustness using generated data. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS, pp. 4218-4233, 2021.",
|
| 1372 |
+
"Kaiming He, X. Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770-778, 2015.",
|
| 1373 |
+
"Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In 2021 IEEE/CVF International Conference on Computer Vision, ICCV, pp. 8320-8329. IEEE, 2021.",
|
| 1374 |
+
"Yulun Jiang, Chen Liu, Zhichao Huang, Mathieu Salzmann, and Sabine Susstrunk. Towards stable and efficient adversarial training against 11 bounded adversarial attacks. In International Conference on Machine Learning, 2023.",
|
| 1375 |
+
"Alex Krizhevsky. Learning multiple layers of features from tiny images. 2009.",
|
| 1376 |
+
"Alex Krizhevsky, Ilya Sutskever, and Geoffrey E. Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60:84 - 90, 2012.",
|
| 1377 |
+
"Yann LeCun and Corinna Cortes. The mnist database of handwritten digits. 2005.",
|
| 1378 |
+
"Alexander Matyasko and Lap-Pui Chau. PDPGD: primal-dual proximal gradient descent adversarial attack. CoRR, abs/2106.01538, 2021. URL https://arxiv.org/abs/2106.01538",
|
| 1379 |
+
"Yichuan Mo, Dongxian Wu, Yifei Wang, Yiwen Guo, and Yisen Wang. When adversarial training meets vision transformers: Recipes from training to architecture. Advances in Neural Information Processing Systems, 35:1859-18611, 2022.",
|
| 1380 |
+
"Apostolos Modas, Seyed-Mohsen Moosavi-Dezfooli, and Pascal Frossard. Sparsefool: a few pixels make a big difference. In Conference on computer vision and pattern recognition (CVPR), 2019.",
|
| 1381 |
+
"Shengyun Peng, Weilin Xu, Cory Cornelius, Matthew Hull, Kevin Li, Rahul Duggal, Mansi Phute, Jason Martin, and Duen Horng Chau. Robust principles: Architectural design principles for adversarially robust cnns. In 34th British Machine Vision Conference 2023, BMVC 2023, Aberdeen, UK, November 20-24, 2023, pp. 739-740. BMVA Press, 2023. URL http://proceedings.bmvc2023.org/739/."
|
| 1382 |
+
],
|
| 1383 |
+
"bbox": [
|
| 1384 |
+
171,
|
| 1385 |
+
102,
|
| 1386 |
+
826,
|
| 1387 |
+
925
|
| 1388 |
+
],
|
| 1389 |
+
"page_idx": 11
|
| 1390 |
+
},
|
| 1391 |
+
{
|
| 1392 |
+
"type": "header",
|
| 1393 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
171,
|
| 1396 |
+
32,
|
| 1397 |
+
478,
|
| 1398 |
+
47
|
| 1399 |
+
],
|
| 1400 |
+
"page_idx": 11
|
| 1401 |
+
},
|
| 1402 |
+
{
|
| 1403 |
+
"type": "page_number",
|
| 1404 |
+
"text": "12",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
488,
|
| 1407 |
+
946,
|
| 1408 |
+
506,
|
| 1409 |
+
959
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 11
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "list",
|
| 1415 |
+
"sub_type": "ref_text",
|
| 1416 |
+
"list_items": [
|
| 1417 |
+
"Maura Pintor, Fabio Roli, Wieland Brendel, and Battista Biggio. Fast minimum-norm adversarial attacks through adaptive norm constraints. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS, pp. 20052-20062, 2021.",
|
| 1418 |
+
"Maura Pintor, Luca Demetrio, Angelo Sotgiu, Ambra Demontis, Nicholas Carlini, Battista Biggio, and Fabio Roli. Indicators of attack failure: Debugging and improving optimization of adversarial examples. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems, volume 35, pp. 23063-23076. Curran Associates, Inc., 2022.",
|
| 1419 |
+
"Jonas Rauber, Wieland Brendel, and Matthias Bethge. Foolbox: A python toolbox to benchmark the robustness of machine learning models, 2017. URL https://github.com/bethgelab/ foolbox.",
|
| 1420 |
+
"Jérôme Rony, Luiz G. Hafemann, Luiz Oliveira, Ismail Ben Ayed, Robert Sabourin, and Eric Granger. Decoupling direction and norm for efficient gradient-based l2 adversarial attacks and defenses. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4317-4325, 2018.",
|
| 1421 |
+
"Jérôme Rony, Eric Granger, Marco Pedersoli, and Ismail Ben Ayed. Augmented lagrangian adversarial attacks. In 2021 IEEE/CVF International Conference on Computer Vision, ICCV, pp. 7718-7727. IEEE, 2021.",
|
| 1422 |
+
"Jérôme Rony and Ismail Ben Ayed. Adversarial Library. URL https://github.com/jeromerony/adversarial-library",
|
| 1423 |
+
"Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish Kapoor, and Aleksander Madry. Do adversarially robust imagenet models transfer better? In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS, 2020.",
|
| 1424 |
+
"Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations (ICLR), 2014.",
|
| 1425 |
+
"Eric Wong, Leslie Rice, and J. Zico Kolter. Fast is better than free: Revisiting adversarial training. In 8th International Conference on Learning Representations, ICLR. OpenReview.net, 2020.",
|
| 1426 |
+
"Yuancheng Xu, Yanchao Sun, Micah Goldblum, Tom Goldstein, and Furong Huang. Exploring and exploiting decision boundary dynamics for adversarial robustness. In International Conference on Learning Representations (ICLR), 2023.",
|
| 1427 |
+
"Xuyang Zhong, Yixiao Huang, and Chen Liu. Towards efficient training and evaluation of robust models against $l_{0}$ bounded adversarial perturbations. In International Conference on Machine Learning ICML. PMLR, 2024."
|
| 1428 |
+
],
|
| 1429 |
+
"bbox": [
|
| 1430 |
+
171,
|
| 1431 |
+
102,
|
| 1432 |
+
826,
|
| 1433 |
+
690
|
| 1434 |
+
],
|
| 1435 |
+
"page_idx": 12
|
| 1436 |
+
},
|
| 1437 |
+
{
|
| 1438 |
+
"type": "header",
|
| 1439 |
+
"text": "Published as a conference paper at ICLR 2025",
|
| 1440 |
+
"bbox": [
|
| 1441 |
+
171,
|
| 1442 |
+
32,
|
| 1443 |
+
478,
|
| 1444 |
+
47
|
| 1445 |
+
],
|
| 1446 |
+
"page_idx": 12
|
| 1447 |
+
},
|
| 1448 |
+
{
|
| 1449 |
+
"type": "page_number",
|
| 1450 |
+
"text": "13",
|
| 1451 |
+
"bbox": [
|
| 1452 |
+
488,
|
| 1453 |
+
946,
|
| 1454 |
+
506,
|
| 1455 |
+
959
|
| 1456 |
+
],
|
| 1457 |
+
"page_idx": 12
|
| 1458 |
+
}
|
| 1459 |
+
]
|
2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/3820601e-9eb2-4b22-a325-26893c36ef95_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/3820601e-9eb2-4b22-a325-26893c36ef95_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1a35d3d962ef7368cccbb1e5e111fc111666e9483b3a2386abdee3d74b5c6d72
|
| 3 |
+
size 1364035
|
2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/full.md
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $\sigma$ -ZERO: GRADIENT-BASED OPTIMIZATION OF $\ell_0$ -NORM ADVERSARIAL EXAMPLES
|
| 2 |
+
|
| 3 |
+
Antonio Emanuele Cina $^{1}$
|
| 4 |
+
Battista Biggio $^{2}$
|
| 5 |
+
|
| 6 |
+
Francesco Villani
|
| 7 |
+
|
| 8 |
+
Maura Pintor²
|
| 9 |
+
|
| 10 |
+
Lea Schonherr<sup>3</sup>
|
| 11 |
+
|
| 12 |
+
$^{1}$ Department of Computer Science, Bioengineering, Robotics and Systems, University of Genoa, Italy
|
| 13 |
+
$^{2}$ Department of Electrical and Electronic Engineering, University of Cagliari, Italy
|
| 14 |
+
$^{3}$ CISPA Helmholtz Center for Information Security, Germany
|
| 15 |
+
<sup>4</sup>Department of Environmental Sciences, Informatics and Statistics, Ca' Foscari University of Venice, Italy
|
| 16 |
+
antonio.cina@unige.it francesco.villani@edu.unige.it maura.pintor@unica.it
|
| 17 |
+
schoenherr@cispa.de battista.biggio@unica.it pelillo@unive.it
|
| 18 |
+
|
| 19 |
+
# ABSTRACT
|
| 20 |
+
|
| 21 |
+
Evaluating the adversarial robustness of deep networks to gradient-based attacks is challenging. While most attacks consider $\ell_{2}$ - and $\ell_{\infty}$ -norm constraints to craft input perturbations, only a few investigate sparse $\ell_{1}$ - and $\ell_{0}$ -norm attacks. In particular, $\ell_{0}$ -norm attacks remain the least studied due to the inherent complexity of optimizing over a non-convex and non-differentiable constraint. However, evaluating adversarial robustness under these attacks could reveal weaknesses otherwise left untested with more conventional $\ell_{2}$ - and $\ell_{\infty}$ -norm attacks. In this work, we propose a novel $\ell_{0}$ -norm attack, called $\sigma$ -zero, which leverages a differentiable approximation of the $\ell_{0}$ norm to facilitate gradient-based optimization, and an adaptive projection operator to dynamically adjust the trade-off between loss minimization and perturbation sparsity. Extensive evaluations using MNIST, CIFAR10, and ImageNet datasets, involving robust and non-robust models, show that $\sigma$ -zero finds minimum $\ell_{0}$ -norm adversarial examples without requiring any time-consuming hyperparameter tuning, and that it outperforms all competing sparse attacks in terms of success rate, perturbation size, and efficiency.
|
| 22 |
+
|
| 23 |
+
# 1 INTRODUCTION
|
| 24 |
+
|
| 25 |
+
Early research has revealed that machine learning models are fooled by adversarial examples, i.e., slightly-perturbed inputs optimized to cause misclassifications (Biggio et al., 2013; Szegedy et al., 2014). The discovery of this phenomenon has, in turn, demanded a more careful evaluation of the robustness of such models, especially when deployed in security-sensitive and safety-critical applications. Most of the gradient-based attacks proposed to evaluate the adversarial robustness of Deep Neural Networks (DNNs) optimize adversarial examples under different $\ell_p$ -norm constraints. In particular, while convex $\ell_1$ , $\ell_2$ , and $\ell_{\infty}$ norms have been widely studied (Chen et al., 2018; Croce & Hein, 2021), only a few $\ell_0$ -norm attacks have been considered to date. The main reason is that finding minimum $\ell_0$ -norm solutions is known to be an NP-hard problem (Davis et al., 1997), and thus ad-hoc approximations must be adopted to overcome issues related to the non-convexity and non-differentiability of such (pseudo) norm. Although this is a challenging task, attacks based on the $\ell_0$ norm have the potential to uncover issues in DNNs that may not be evident when considering other attacks (Carlini & Wagner, 2017b; Croce & Hein, 2021). In particular, $\ell_0$ -norm attacks, known to perturb a minimal fraction of input values, can be used to determine the most sensitive characteristics that influence the model's decision-making process, offering a different and relevant threat model to benchmark existing defenses and a different understanding of the model's inner workings.
|
| 26 |
+
|
| 27 |
+
Unfortunately, current $\ell_0$ -norm attacks exhibit a largely suboptimal trade-off between their success rate and efficiency, i.e., they are either accurate but slow or fast but inaccurate. In particular, the accurate ones use complex projections and advanced initialization strategies (e.g., adversarial
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
Figure 1: The leftmost plot shows the execution of $\sigma$ -zero on a two-dimensional problem. The initial point $\mathbf{x}$ (red dot) is updated via gradient descent to find the adversarial example $\mathbf{x}^{\star}$ (green star) while minimizing the number of perturbed features (i.e., the $\ell_0$ norm of the perturbation). The gray lines surrounding $\mathbf{x}$ demarcate regions where the $\ell_0$ norm is minimized. The rightmost plot shows the adversarial images (top row) and the corresponding perturbations (bottom row) found by $\sigma$ -zero during the three steps highlighted in the leftmost plot, along with their prediction and $\ell_0$ norm.
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
$\mathrm{Frog}\rightarrow \mathrm{Frog}\ell_{0:2813}$
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Frog $\rightarrow$ Chameleon $\ell_0:1381$
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
Frog $\rightarrow$ Chameleon $\ell_0:52$
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
|
| 47 |
+
initialization) to find smaller input perturbations but suffer from time or memory limitations, hindering their scalability to larger networks or high-dimensional data (Brendel et al., 2019a; Césaire et al., 2021). Other attacks execute faster, but their returned solution is typically less accurate and largely suboptimal (Matyasko & Chau, 2021; Pintor et al., 2021). This results in overestimating adversarial robustness and, in turn, contributes to spreading a false sense of security, hindering the development of effective defense mechanisms (Carlini et al., 2019; Pintor et al., 2022). Developing a reliable, scalable, and compelling method to assess the robustness of DNN models against sparse perturbations with minimum $\ell_0$ norm remains thus a relevant and challenging open problem.
|
| 48 |
+
|
| 49 |
+
In this work, we propose a novel $\ell_0$ -norm attack, named $\sigma$ -zero, which iteratively promotes the sparsity of the adversarial perturbation by minimizing its $\ell_0$ norm (see Figure 1 and Sect. 2). To overcome the limitations of previous approaches, our attack leverages two main technical contributions: (i) a smooth, differentiable approximation of the $\ell_0$ norm to enable the minimization of the attack loss via gradient descent; and (ii) an adaptive projection operator that dynamically increases sparsity to further reduce the perturbation size while keeping the perturbed sample in the adversarial region.
|
| 50 |
+
|
| 51 |
+
Our experiments (Sect. 3) provide compelling evidence of the remarkable performance of $\sigma$ -zero. We evaluate it on 3 well-known benchmark datasets (i.e., MNIST, CIFAR10, and ImageNet), using 22 different models from Robustbench (Croce et al. 2021) and the corresponding official repositories. We compare the performance of $\sigma$ -zero against more than 10 competing attacks, totaling almost 450 different comparisons. Our analysis shows that $\sigma$ -zero outperforms state-of-the-art attacks in terms of both attack success rate and perturbation size (lower $\ell_0$ norm), while being also significantly faster (i.e., requiring fewer queries and lower runtime). Our attack also provides some additional advantages: (i) it does not require any sophisticated, time-consuming hyperparameter tuning; (ii) it does not require being initialized from an adversarial input; (iii) it is less likely to fail, i.e., it consistently achieves an attack success rate of $100\%$ for sufficiently-large perturbation budgets, thereby enabling more reliable robustness evaluations (Carlini et al., 2019). We thus believe that $\sigma$ -zero will foster significant advancements in the development of better robustness evaluation tools and more robust models against sparse attacks. We conclude the paper by discussing related work (Sect. 4), along with the main contributions and future research directions (Sect. 5).
|
| 52 |
+
|
| 53 |
+
# 2 $\sigma$ -ZERO: MINIMUM $\ell_0$ -NORM ATTACKS
|
| 54 |
+
|
| 55 |
+
We present here $\sigma$ -zero, a gradient-based attack that finds minimum $\ell_0$ -norm adversarial examples.
|
| 56 |
+
|
| 57 |
+
Threat Model. We assume that the attacker has complete access to the target model, including its architecture and trained parameters, and exploits its gradient for staging white-box untargeted attacks (Carlini & Wagner, 2017b; Biggio & Roli, 2018). This setting is useful for worst-case evaluation of the adversarial robustness of DNNs, providing an empirical assessment of the performance degradation that may be incurred under attack. Note that this is the standard setting adopted
|
| 58 |
+
|
| 59 |
+
in previous work for gradient-based adversarial robustness evaluations (Carlini & Wagner, 2017b; Brendel et al., 2019b; Croce et al., 2021; Pintor et al., 2021).
|
| 60 |
+
|
| 61 |
+
Problem Formulation. In this work, we seek untargeted minimum $\ell_0$ -norm adversarial perturbations that steer the model's decision towards misclassification (Carlini & Wagner 2017b). To this end, let $\mathbf{x} \in \mathcal{X} = [0,1]^d$ be a $d$ -dimensional input sample, $y \in \mathcal{Y} = \{1,\dots,l\}$ its associated true label, and $f: \mathcal{X} \times \Theta \mapsto \mathcal{Y}$ the target model, parameterized by $\theta \in \Theta$ . While $f$ outputs the predicted label, we will also use $f_k$ to denote the continuous-valued output (logit) for class $k \in \mathcal{Y}$ . The goal of our attack is to find the minimum $\ell_0$ -norm adversarial perturbation $\delta^\star$ such that the corresponding adversarial example $\mathbf{x}^\star = \mathbf{x} + \delta^\star$ is misclassified by $f$ . This can be formalized as:
|
| 62 |
+
|
| 63 |
+
$$
|
| 64 |
+
\delta^ {\star} \in \arg \min _ {\delta} \| \delta \| _ {0}, \tag {1}
|
| 65 |
+
$$
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\text {s . t .} \quad f (\mathbf {x} + \boldsymbol {\delta}, \boldsymbol {\theta}) \neq y, \tag {2}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\mathbf {x} + \boldsymbol {\delta} \in [ 0, 1 ] ^ {d}, \tag {3}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $\| \cdot \|_0$ denotes the $\ell_0$ norm, which counts the number of non-zero components. The hard constraint in Eq. (2) ensures that the perturbation $\delta$ is valid only if the target model $f$ misclassifies the perturbed sample $\mathbf{x} + \boldsymbol{\delta}$ , while the box constraint in Eq. (3) ensures that the perturbed sample lies in $[0,1]^d$ . Since the problem in Eqs. (1)-(3) can not be solved directly, we reformulate it as:
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\boldsymbol {\delta} ^ {\star} \in \arg \min _ {\boldsymbol {\delta}} \mathcal {L} (\mathbf {x} + \boldsymbol {\delta}, y, \boldsymbol {\theta}) + \frac {1}{d} \hat {\ell} _ {0} (\boldsymbol {\delta}) \tag {4}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\text {s . t .} \quad \mathbf {x} + \boldsymbol {\delta} \in [ 0, 1 ] ^ {d}, \tag {5}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
where we use a differentiable approximation $\hat{\ell}_0(\delta)$ instead of $||\delta ||_0$ , and normalize it with respect to the number of features $d$ to ensure that its value is within the interval [0, 1]. The loss $\mathcal{L}$ is defined as:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
\mathcal {L} (\mathbf {x}, y, \boldsymbol {\theta}) = \max \left(f _ {y} (\mathbf {x}, \boldsymbol {\theta}) - \max _ {k \neq y} f _ {k} (\mathbf {x}, \boldsymbol {\theta}), 0\right) + \mathbb {I} (f (\mathbf {x}, \boldsymbol {\theta}) = y). \tag {6}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
The first term in $\mathcal{L}$ represents the logit difference, which is positive when the sample is correctly assigned to the true class $y$ , and clipped to zero when it is misclassified (Carlini & Wagner, 2017b). The second term merely adds 1 to the loss if the sample is correctly classified. This ensures that $\mathcal{L} = 0$ only when an adversarial example is found and $\mathcal{L} \geq 1$ otherwise. In practice, when minimizing the objective in Eq. (4), this loss term induces an alternate optimization process between minimizing the loss function itself (to find an adversarial example) and minimizing the $\ell_0$ -norm of the adversarial perturbation (when an adversarial example is found). It is also worth remarking that, conversely to the objective function proposed by Carlini & Wagner (2017b), our objective does not require tuning any trade-off hyperparameters to balance between minimizing the loss and reducing the perturbation size, thereby avoiding a computationally expensive line search for each input sample.
|
| 92 |
+
|
| 93 |
+
$\ell_0$ -norm Approximation. Besides the formalization of the attack objective, one of the main technical advantages of $\sigma$ -zero is the smooth, differentiable approximation of the $\ell_0$ norm, thereby enabling the use of gradient-based optimization. To this end, we first note that the $\ell_0$ -norm of a vector can be rewritten as $\| \mathbf{x}\| _0 = \sum_{i = 1}^d\mathrm{sign}(x_i)^2$ , and then approximate the sign function as $\mathrm{sign}(x_i)\approx x_i / \sqrt{x_i^2 + \sigma}$ , where $\sigma >0$ is a smoothing hyperparameter that makes the approximation sharper as $\sigma \rightarrow 0$ . This, in turn, yields the following smooth approximation of the $\ell_0$ norm:
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\hat {\ell} _ {0} (\mathbf {x}, \sigma) = \sum_ {i = 1} ^ {d} \frac {x _ {i} ^ {2}}{x _ {i} ^ {2} + \sigma}, \sigma > 0, \quad \hat {\ell} _ {0} (\mathbf {x}, \sigma) \in [ 0, d ]. \tag {7}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
Adaptive Projection $\Pi_{\tau}$ . The considered $\ell_0$ -norm approximation allows optimizing Eq. (4) via gradient descent. However, using such a smooth approximation tends to promote solutions that are not fully sparse, i.e., with many components that are very close to zero but not exactly equal to zero, thereby yielding inflated $\ell_0$ -norm values. To overcome this issue, we introduce an adaptive projection operator $\Pi_{\tau}$ that sets to zero the components with a perturbation intensity lower than a given sparsity
|
| 100 |
+
|
| 101 |
+
Input: $\mathbf{x} \in [0,1]^d$ , the input sample; y, the true class label; $\theta$ , the target model; N, the number of iterations; $\eta_0 = 1.0$ , the initial step size; $\sigma = 10^{-3}$ , the $\ell_0$ -norm smoothing hyperparameter; $\tau_0 = 0.3$ , the initial sparsity threshold; $t = 0.01$ , the sparsity threshold adjustment factor.
|
| 102 |
+
|
| 103 |
+
Output: $\mathbf{x}^{\star}$ , the minimum $\ell_0$ -norm adversarial example.
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\begin{array}{l} \begin{array}{c c} \mathbf {1} \delta \leftarrow \mathbf {0}; & \delta^ {\star} \leftarrow \infty ; \quad \tau \leftarrow \tau_ {0}; \quad \eta \leftarrow \eta_ {0} \end{array} \\ 2 \text {f o r} i \text {i n} 1, \dots , N \mathbf {d o} \\ \end{array}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
Algorithm 1 $\sigma$ -zero Attack Algorithm.
|
| 110 |
+
$\nabla \mathbf{g}\gets \nabla_{\pmb{\delta}}[\mathcal{L}(\mathbf{x} + \pmb {\delta},y,\pmb {\theta}) + \frac{1}{d}\hat{\ell}_0(\pmb {\delta},\sigma)]$ Gradient Descent for Eq. (4).
|
| 111 |
+
$\nabla \mathbf{g}\gets \nabla \mathbf{g} / \| \nabla \mathbf{g}\|_{\infty}$ Gradient Normalization.
|
| 112 |
+
$\delta \leftarrow \mathrm{clip}(\mathbf{x} + [\pmb {\delta} - \eta \cdot \nabla \mathbf{g}]) - \mathbf{x}$ Box Constraints.
|
| 113 |
+
$\delta \leftarrow \Pi_{\tau}(\delta)$ Adaptive Projection Operator.
|
| 114 |
+
$\eta = \mathrm{cosine\_annealing}(\eta_0,i)$ Learning Rate Decay.
|
| 115 |
+
if $\mathcal{L}(\mathbf{x} + \pmb {\delta},y,\pmb {\theta})\leq 0$ .. $\tau + = t\cdot \eta$ ,else $\tau - = t\cdot \eta$ Adaptive Adjustment for $\tau$
|
| 116 |
+
if $\mathcal{L}(\mathbf{x} + \pmb {\delta},y,\pmb {\theta})\leq 0\wedge \| \pmb {\delta}\| _0 < \| \pmb {\delta}^*\| _0$ .. $\pmb{\delta}^{\star}\gets \pmb{\delta}$
|
| 117 |
+
|
| 118 |
+
10 end
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\begin{array}{l} 1 1 \text {i f} \mathcal {L} (\mathbf {x} + \delta^ {\star}, y, \theta) > 0: \delta^ {\star} \leftarrow \infty \\ 1 2 \operatorname {r e t u r n} \mathrm {x} ^ {\star} \leftarrow \mathrm {x} + \delta^ {\star} \\ \end{array}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
threshold $\tau$ in each iteration. The sparsity threshold $\tau$ is initialized with a starting value $\tau_0$ and then dynamically adjusted for each sample during each iteration; in particular, it is increased to find sparser perturbations when the current sample is already adversarial, while it is decreased otherwise. The updates to $\tau$ are proportional to the step size and follow its annealing strategy, as detailed below.
|
| 125 |
+
|
| 126 |
+
Solution Algorithm. Our attack, given as Algorithm 1, solves the problem in Eqs. (4)-(5) via a fast and memory-efficient gradient-based optimization. After initializing the adversarial perturbation $\delta = 0$ (line 1), it computes the gradient of the objective in Eq. (4) with respect to $\delta$ (line 3). The gradient is then normalized such that its largest components (in absolute value) equal $\pm 1$ (line 4). This stabilizes the optimization by making the update independent from the gradient size, and also makes the selection of the step size independent from the input dimensionality (Rony et al., 2018; Pintor et al., 2021). We then update $\delta$ to minimize the objective via gradient descent while also enforcing the box constraints in Eq. (5) through the usage of the clip operator (line 5). We increase sparsity in $\delta$ by zeroing all components lower than the current sparsity threshold $\tau$ (line 6), as discussed in the previous paragraph. We then decrease the step size $\eta$ via cosine annealing (line 7), as suggested by Rony et al. (2018); Pintor et al. (2021), and adjust the sparsity threshold $\tau$ accordingly (line 8). In particular, if the current sample is adversarial, we increase $\tau$ by $t \cdot \eta$ to promote sparser perturbations; otherwise, we decrease $\tau$ by the same amount to promote the minimization of $\mathcal{L}$ . The above process is repeated for $N$ iterations while keeping track of the best solution found, i.e., the adversarial perturbation $\delta^{\star}$ with the lowest $\ell_0$ norm (line 9). If no adversarial example is found, the algorithm sets $\delta^{\star} = \infty$ (line 11). It terminates by returning $\mathbf{x}^{\star} = \mathbf{x} + \delta^{\star}$ (line 12).
|
| 127 |
+
|
| 128 |
+
Remarks. To summarize, the main contributions behind $\sigma$ -zero are: (i) the use of a smooth $\ell_0$ -norm approximation, along with the definition of an appropriate objective (Eq. 4), to enable optimizing $\ell_0$ -norm adversarial examples via gradient descent; and (ii) the introduction of an adaptive projection operator to further improve sparsity during the optimization. Our algorithm leverages also common strategies like gradient normalization and step size annealing to speed up convergence. As reported by our experiments, $\sigma$ -zero provides a more effective and efficient $\ell_0$ -norm attack that (i) is robust to different hyperparameter choices; (ii) does not require any adversarial initialization; and (iii) enables more reliable robustness evaluations, being able to find adversarial examples also when the competing attacks may fail (Carlini et al., 2019; Pintor et al., 2022).
|
| 129 |
+
|
| 130 |
+
# 3 EXPERIMENTS
|
| 131 |
+
|
| 132 |
+
We report here an extensive experimental evaluation comparing $\sigma$ -zero against 11 state-of-the-art sparse attacks, including both $\ell_0$ - and $\ell_1$ -norm attacks. We test all attacks using different settings on 18 distinct models and 3 different datasets, yielding almost 450 different comparisons in total.
|
| 133 |
+
|
| 134 |
+
# 3.1 EXPERIMENTAL SETUP
|
| 135 |
+
|
| 136 |
+
Datasets. We consider the three most popular datasets used for benchmarking adversarial robustness: MNIST (LeCun & Cortes, 2005), CIFAR-10 (Krizhevsky, 2009) and ImageNet (Krizhevsky et al., 2012). To evaluate the attack performance, we use the entire test set for MNIST and CIFAR-10 (with a batch size of 32), and a subset of 1000 test samples for ImageNet (with a batch size of 16).
|
| 137 |
+
|
| 138 |
+
Models. We use a selection of both baseline and robust models to evaluate the attacks under different conditions. We evaluate $\sigma$ -zero on a vast set of models to ensure its broad effectiveness and expose vulnerabilities that may not be revealed by other attacks (Croce & Hein, 2021). For the MNIST dataset, we consider two adversarially trained convolutional neural network (CNN) models by Rony et al. (2021), i.e., CNN-DDN and CNN-Trades. These models have been trained to be robust to both $\ell_2$ and $\ell_{\infty}$ adversarial attacks. We denote them M1 and M2, respectively. For the CIFAR-10 and ImageNet datasets, we employ state-of-the-art robust models from RobustBench (Croce et al., 2021) and the paper's official repositories. For CIFAR-10, we adopt ten models, denoted as C1-C12. C1 (Carmon et al., 2019) and C2 (Augustin et al., 2020) combine training data augmentation with adversarial training to improve robustness to $\ell_{\infty}$ and $\ell_2$ attacks. C3 (Croce & Hein, 2021) and C4 (Jiang et al., 2023) are $\ell_1$ robust models. C5 (Croce et al., 2021) is a non-robust WideResNet-28-10 model. C6 (Gowal et al., 2021) uses generative models to artificially augment the original training set and improve adversarial robustness to generic $\ell_p$ -norm attacks. C7 (Engstrom et al., 2019) is an adversarial trained model that is robust against $\ell_2$ -norm attacks. C8 (Chen et al., 2020) is a robust ensemble model. C9 (Xu et al., 2023) is a recently proposed adversarial training defense robust to $\ell_2$ attacks. C10 (Addepalli et al., 2022) enforces diversity during data augmentation and combines it with adversarial training. Lastly, C11 (Zhong et al., 2024) and C12 (Zhong et al., 2024) are two adversarial trained models robust against $\ell_0$ -norm adversarial perturbations. For ImageNet, we consider a pretrained ResNet-18 denoted with I1 (He et al., 2015), and five robust models to $\ell_{\infty}$ -attacks, denoted with I2 (Engstrom et al., 2019), I3 (Hendrycks et al., 2021), I4 (Debenedetti et al., 2023), I5 (Wong et al., 2020), and I6 (Salman et al., 2020). Lastly, in the appendix, we present two $\ell_0$ -robust models, C11 (Zhong et al., 2024) and C12 (Zhong et al., 2024), for CIFAR-10, along with two large $\ell_{\infty}$ -robust models, I7 (Peng et al., 2023) and I8 (Mo et al., 2022), for ImageNet.
|
| 139 |
+
|
| 140 |
+
Attacks. We compare $\sigma$ -zero against the following state-of-the-art minimum-norm attacks, in their $\ell_0$ -norm variants: the Voting Folded Gaussian Attack (VFGA) attack (Cétaire et al. 2021), the Primal-Dual Proximal Gradient Descent (PDPGD) attack (Matyasko & Chau 2021), the Brendel & Bethge (BB) attack (Brendel et al., 2019a), including also its variant with adversarial initialization (BBadv), and the Fast Minimum Norm (FMN) attack (Pintor et al. 2021). We also consider two state-of-the-art $\ell_1$ -norm attacks as additional baselines, i.e., the Elastic-Net (EAD) attack (Chen et al. 2018) and SparseFool (SF) by Modas et al. (2019). All attacks are set to manipulate the input values independently; e.g., for CIFAR-10, the number of modifiable inputs is $3 \times 32 \times 32 = 3072$ .
|
| 141 |
+
|
| 142 |
+
Hyperparameters. We run our experiments using the default hyperparameters from the original implementations provided in the authors' repositories, AdversarialLib (Rony & Ben Ayed) and Foolbox (Rauber et al., 2017). We set the maximum number of iterations to $N = 1000$ to ensure that all attacks reach convergence (Pintor et al., 2022). For $\sigma$ -zero, we set $\eta_0 = 1$ , $\tau_0 = 0.3$ , $t = 0.01$ , and $\sigma = 10^{-3}$ , and keep the same configuration for all models and datasets.
|
| 143 |
+
|
| 144 |
+
Evaluation Metrics. For each attack, we report the Attack Success Rate (ASR) at different values of $k$ , denoted with $\mathrm{ASR}_k$ , i.e., the fraction of successful attacks for which $\| \delta^{\star} \|_0 \leq k$ , and the median value of $\| \delta^{\star} \|_0$ over the test samples, denoted with $\tilde{\ell}_0$ . We compare the computational effort of each attack considering the mean runtime (s) (per sample), the mean number of queries (q) (i.e., the total number of forwards and backwards required to perform the attack, divided by the number of samples), and the Video Random Access Memory (VRAM) consumed by the Graphics Processing Unit (GPU). We measure the runtime on a workstation with an NVIDIA A100 Tensor Core GPU (40 GB memory) and two Intel® XeO® Gold 6238R processors. We evaluate memory consumption as the maximum VRAM used among all batches, representing the minimum requirement to run without failure.
|
| 145 |
+
|
| 146 |
+
3We utilize the Foolbox DatasetAttack (Foolbox, 2017) for adversarial initialization.
|
| 147 |
+
4Additional results using only $N = 100$ steps are reported in Appendix B.1
|
| 148 |
+
5To show that no specific hyperparameter tuning is required, additional results are reported in Appendix A.2
|
| 149 |
+
${}^{6}$ If no adversarial example is found for a given $\mathbf{x}$ ,we set ${\begin{Vmatrix}{\mathbf{\delta }}^{ * }\end{Vmatrix}}_{0} = \infty$ ,as done by Brendel et al. (2019a).
|
| 150 |
+
|
| 151 |
+
Table 1: Minimum-norm comparison results on MNIST, CIFAR10 and ImageNet with $N = 1000$ . For each attack and model (M), we report ASR at $k = 24, 50, \infty$ , median perturbation size $\tilde{\ell}_0$ , mean runtime $s$ (in seconds), mean number of queries $q$ (in thousands), and maximum VRAM usage (in GB). When VFGA exceeds the VRAM limit, we re-run it using a smaller batch size, increasing its runtime $t$ . We denote those cases with the symbol $\star$ . Remaining models in Appendix B, Table 6.
|
| 152 |
+
|
| 153 |
+
<table><tr><td>Attack</td><td>M</td><td>ASR24</td><td>ASR50</td><td>ASR∞</td><td>\( \tilde{\ell}_{0} \)</td><td>s</td><td>q</td><td>VRAM</td><td>M</td><td>ASR24</td><td>ASR50</td><td>ASR∞</td><td>\( \tilde{\ell}_{0} \)</td><td>s</td><td>q</td><td>VRAM</td></tr><tr><td colspan="17">MNIST</td></tr><tr><td>SF</td><td></td><td>6.66</td><td>6.76</td><td>96.98</td><td>469</td><td>1.07</td><td>0.18</td><td>0.06</td><td></td><td>1.03</td><td>1.21</td><td>91.68</td><td>463</td><td>2.87</td><td>0.86</td><td>0.07</td></tr><tr><td>EAD</td><td></td><td>3.83</td><td>53.66</td><td>100.0</td><td>49</td><td>0.47</td><td>6.28</td><td>0.05</td><td></td><td>2.13</td><td>55.57</td><td>100.0</td><td>48</td><td>0.50</td><td>6.73</td><td>0.05</td></tr><tr><td>PDPGD</td><td></td><td>26.77</td><td>74.08</td><td>100.0</td><td>38</td><td>0.23</td><td>2.00</td><td>0.04</td><td></td><td>16.91</td><td>66.30</td><td>100.0</td><td>42</td><td>0.23</td><td>2.00</td><td>0.04</td></tr><tr><td>VFGA</td><td>MI</td><td>43.58</td><td>82.42</td><td>99.98</td><td>27</td><td>0.05</td><td>0.77</td><td>0.21</td><td>M2</td><td>5.00</td><td>39.33</td><td>99.95</td><td>57</td><td>0.05</td><td>1.33</td><td>0.21</td></tr><tr><td>FMN</td><td></td><td>35.90</td><td>93.74</td><td>100.0</td><td>29</td><td>0.21</td><td>2.00</td><td>0.04</td><td></td><td>50.74</td><td>91.84</td><td>99.41</td><td>24</td><td>0.22</td><td>2.00</td><td>0.04</td></tr><tr><td>BB</td><td></td><td>71.23</td><td>97.86</td><td>100.0</td><td>18</td><td>0.90</td><td>2.99</td><td>0.05</td><td></td><td>56.53</td><td>91.62</td><td>100.0</td><td>18</td><td>0.74</td><td>3.71</td><td>0.05</td></tr><tr><td>BBadv</td><td></td><td>67.06</td><td>91.23</td><td>100.0</td><td>19</td><td>0.77</td><td>2.01</td><td>0.07</td><td></td><td>29.17</td><td>40.88</td><td>100.0</td><td>89</td><td>0.71</td><td>2.01</td><td>0.07</td></tr><tr><td>σ-zero</td><td></td><td>83.79</td><td>99.98</td><td>100.0</td><td>16</td><td>0.31</td><td>2.00</td><td>0.04</td><td></td><td>98.03</td><td>100.0</td><td>100.0</td><td>9</td><td>0.31</td><td>2.00</td><td>0.04</td></tr><tr><td colspan="17">CIFAR-10</td></tr><tr><td>SF</td><td></td><td>18.71</td><td>18.77</td><td>56.39</td><td>3072</td><td>11.31</td><td>1.40</td><td>1.62</td><td rowspan="7">C3</td><td>20.46</td><td>24.36</td><td>58.29</td><td>3072</td><td>1.63</td><td>0.48</td><td>0.66</td></tr><tr><td>EAD</td><td></td><td>16.32</td><td>30.38</td><td>100.0</td><td>90</td><td>1.92</td><td>5.70</td><td>1.47</td><td>13.01</td><td>13.23</td><td>100.0</td><td>800</td><td>0.94</td><td>4.89</td><td>0.65</td></tr><tr><td>PDPGD</td><td rowspan="5">CI</td><td>26.84</td><td>42.50</td><td>100.0</td><td>63</td><td>0.64</td><td>2.00</td><td>1.32</td><td>22.30</td><td>35.13</td><td>100.0</td><td>75</td><td>0.41</td><td>2.00</td><td>0.59</td></tr><tr><td>VFGA</td><td>51.06</td><td>75.37</td><td>99.92</td><td>24</td><td>0.59</td><td>0.78</td><td>11.71</td><td>28.47</td><td>49.98</td><td>99.72</td><td>51</td><td>0.32</td><td>1.25</td><td>4.44</td></tr><tr><td>FMN</td><td>48.89</td><td>74.70</td><td>100.0</td><td>26</td><td>0.59</td><td>2.00</td><td>1.31</td><td>27.45</td><td>48.87</td><td>100.0</td><td>52</td><td>0.24</td><td>2.00</td><td>0.60</td></tr><tr><td>BB</td><td>13.27</td><td>14.24</td><td>14.70</td><td>∞</td><td>0.63</td><td>2.05</td><td>1.47</td><td>16.88</td><td>22.91</td><td>27.64</td><td>∞</td><td>1.04</td><td>2.25</td><td>0.65</td></tr><tr><td>BBadv</td><td>65.96</td><td>90.57</td><td>100.0</td><td>16</td><td>4.68</td><td>2.01</td><td>1.64</td><td>36.47</td><td>72.43</td><td>100.0</td><td>34</td><td>5.28</td><td>2.01</td><td>0.64</td></tr><tr><td>σ-zero</td><td></td><td>76.53</td><td>95.38</td><td>100.0</td><td>11</td><td>0.73</td><td>2.00</td><td>1.53</td><td></td><td>38.60</td><td>73.02</td><td>100.0</td><td>32</td><td>0.43</td><td>2.00</td><td>0.71</td></tr><tr><td>SF</td><td></td><td>19.66</td><td>21.22</td><td>98.74</td><td>3070</td><td>3.62</td><td>0.46</td><td>1.90</td><td rowspan="7">C4</td><td>31.76</td><td>43.07</td><td>91.14</td><td>69</td><td>4.32</td><td>1.49</td><td>0.66</td></tr><tr><td>EAD</td><td></td><td>9.73</td><td>11.42</td><td>100.0</td><td>360</td><td>2.53</td><td>5.62</td><td>1.89</td><td>24.21</td><td>24.78</td><td>100.0</td><td>768</td><td>1.04</td><td>4.99</td><td>0.65</td></tr><tr><td>PDPGD</td><td rowspan="5">CI</td><td>28.02</td><td>45.15</td><td>100.0</td><td>55</td><td>1.12</td><td>2.00</td><td>1.8</td><td>26.89</td><td>42.38</td><td>100.0</td><td>66</td><td>0.40</td><td>2.00</td><td>0.60</td></tr><tr><td>VFGA</td><td>39.58</td><td>66.50</td><td>99.62</td><td>34</td><td>0.48</td><td>0.94</td><td>16.53</td><td>46.71</td><td>69.47</td><td>99.83</td><td>28</td><td>0.25</td><td>0.82</td><td>4.22</td></tr><tr><td>FMN</td><td>39.30</td><td>71.70</td><td>100.0</td><td>33</td><td>1.08</td><td>2.00</td><td>1.8</td><td>43.06</td><td>62.96</td><td>100.0</td><td>34</td><td>0.35</td><td>2.00</td><td>0.59</td></tr><tr><td>BB</td><td>38.73</td><td>56.78</td><td>58.64</td><td>33</td><td>2.31</td><td>2.89</td><td>1.89</td><td>25.95</td><td>27.98</td><td>29.50</td><td>∞</td><td>0.54</td><td>2.09</td><td>0.65</td></tr><tr><td>BBadv</td><td>70.07</td><td>96.31</td><td>100.0</td><td>17</td><td>3.92</td><td>2.01</td><td>1.99</td><td>53.17</td><td>82.46</td><td>100.0</td><td>22</td><td>3.03</td><td>2.01</td><td>0.65</td></tr><tr><td>σ-zero</td><td></td><td>74.63</td><td>97.55</td><td>100.0</td><td>15</td><td>1.41</td><td>2.00</td><td>1.92</td><td></td><td>55.42</td><td>82.92</td><td>100.0</td><td>20</td><td>0.42</td><td>2.00</td><td>0.72</td></tr><tr><td colspan="17">ImageNet</td></tr><tr><td>EAD</td><td></td><td>35.4</td><td>36.3</td><td>100.0</td><td>460</td><td>4.13</td><td>2.69</td><td>0.46</td><td></td><td>27.0</td><td>28.4</td><td>100.0</td><td>981</td><td>19.25</td><td>5.49</td><td>1.41</td></tr><tr><td>VFGA</td><td rowspan="3">II</td><td>57.9</td><td>72.5</td><td>99.9</td><td>14</td><td>1.22*</td><td>1.08</td><td>>40</td><td rowspan="3">I3</td><td>46.7</td><td>59.5</td><td>97.9</td><td>31</td><td>6.93*</td><td>1.98</td><td>>40</td></tr><tr><td>FMN</td><td>62.6</td><td>81.0</td><td>100.0</td><td>12</td><td>0.73</td><td>2.00</td><td>0.66</td><td>49.1</td><td>67.7</td><td>100.0</td><td>25</td><td>1.98</td><td>2.00</td><td>2.30</td></tr><tr><td>BBadv</td><td>77.5</td><td>93.2</td><td>100.0</td><td>7</td><td>231.67</td><td>2.01</td><td>0.72</td><td>64.7</td><td>85.5</td><td>100.0</td><td>14</td><td>205.11</td><td>2.01</td><td>2.41</td></tr><tr><td>σ-zero</td><td></td><td>82.6</td><td>95.9</td><td>100.0</td><td>5</td><td>1.18</td><td>2.00</td><td>0.84</td><td></td><td>66.7</td><td>86.9</td><td>100.0</td><td>13</td><td>2.76</td><td>2.00</td><td>2.52</td></tr><tr><td>EAD</td><td></td><td>46.8</td><td>51.0</td><td>100.0</td><td>42</td><td>18.10</td><td>5.45</td><td>1.42</td><td rowspan="4"></td><td>32.8</td><td>33.5</td><td>100.0</td><td>572</td><td>11.43</td><td>5.34</td><td>1.68</td></tr><tr><td>VFGA</td><td rowspan="3">II</td><td>54.7</td><td>63.4</td><td>96.7</td><td>12</td><td>8.21*</td><td>2.35</td><td>>40</td><td>40.0</td><td>46.5</td><td>95.5</td><td>66</td><td>33.88*</td><td>3.97</td><td>>40</td></tr><tr><td>FMN</td><td>57.8</td><td>67.0</td><td>100.0</td><td>9</td><td>1.97</td><td>2.00</td><td>2.30</td><td>40.3</td><td>47.2</td><td>100.0</td><td>58</td><td>4.28</td><td>2.00</td><td>2.97</td></tr><tr><td>BBadv</td><td>71.0</td><td>82.3</td><td>100</td><td>4</td><td>182.65</td><td>2.01</td><td>2.40</td><td>46.8</td><td>59.8</td><td>100.0</td><td>31</td><td>178.06</td><td>2.01</td><td>3.07</td></tr><tr><td>σ-zero</td><td></td><td>76.9</td><td>87.4</td><td>100.0</td><td>3</td><td>2.75</td><td>2.00</td><td>2.52</td><td></td><td>50.7</td><td>65.1</td><td>100.0</td><td>23</td><td>5.72</td><td>2.00</td><td>3.20</td></tr></table>
|
| 154 |
+
|
| 155 |
+
# 3.2 EXPERIMENTAL RESULTS
|
| 156 |
+
|
| 157 |
+
We report the success rate and computational effort metrics of $\sigma$ -zero against minimum-norm attacks in Table 1 and fixed-budget attacks in Table 3-4. In these tables, we consider the most robust models for each dataset, and we provide the remaining results in Appendix B. Finally, for ImageNet, we narrow our analysis to EAD, FMN, BBadv, and VFGA minimum-norm attacks, as they surpass competing attacks on MNIST and CIFAR-10 in terms of ASR, perturbation size, or execution time.
|
| 158 |
+
|
| 159 |
+
Effectiveness. The median values of $||\delta^{\star}||_{0}$ , denoted as $\tilde{\ell}_0$ , and the ASRs are reported in Table 1 for all models and datasets. To facilitate comparison, the attacks are sorted from the least to the most effective, on average. In all dataset-model configurations, $\sigma$ -zero significantly outperforms all the considered attacks. Taking the best-performing attack among the fastest competitors as a reference (i.e., FMN), $\sigma$ -zero is able to find smaller perturbations and higher ASRs in all configurations. In particular, on CIFAR-10, $\sigma$ -zero reduces the median number of manipulated features from 52 to 32 against the most robust model (C3), with an average reduction of $49\%$ across all models. On ImageNet, this improvement is even more pronounced, with a reduction of up to $58\%$ . In the best case (I4), the median $||\delta^{\star}||_{0}$ is reduced from 58 to 23, and in the worst case (I2), from 9 to 3. Alternatively, the most competitive attack in finding small perturbations is BBadv, which is significantly slower and requires starting from an already-adversarial input. The $\mathrm{ASR}_{\infty}$ of BB
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Figure 2: Robustness evaluation curves (ASR vs. perturbation budget $k$ ) for M2 on MNIST (left), C1 on CIFAR-10 (middle), and I1 on ImageNet (right).
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
|
| 168 |
+
(i.e., without adversarial initialization) indeed decreases with increasing input dimensionality (e.g., CIFAR-10). This occurs because BB often stops unexpectedly before reaching the specified number of steps due to initialization failures; in particular, Table 1 shows that the median perturbation size found by BB is sometimes $\infty$ , as its $\mathrm{ASR}_{\infty}$ is lower than $50\%$ . Although BBadv does not suffer from the same issue, as it leverages adversarial initialization, it is still outperformed by $\sigma$ -zero. Specifically, $\sigma$ -zero reduces the $\ell_0$ norm of the adversarial examples from 16 to 11 in the best case (C1), while achieving an average improvement of $24\%$ across all dataset-model configurations.
|
| 169 |
+
|
| 170 |
+
Efficiency. We evaluate the computational effort required to run each attack by reporting in Table 1 the mean runtime $s$ (in seconds), the mean number of queries $q$ issued to the model (in thousands), and the maximum VRAM used. Note that, while the runtime $s$ and the consumed VRAM may depend on the attack implementation, the number of queries $q$ counts the total number of forward and backward passes performed by the attack, thus providing a fairer evaluation of the attack complexity. In fact, some attacks perform more than 2000 queries even if $N = 1000$ , i.e., they perform more than one forward and one backward pass per iteration (see, e.g., EAD and BB). Other attacks, instead, might use less than 2000 queries as they implement early stopping strategies. The results indicate that $\sigma$ -zero exhibits similar runtime performance when compared to the fastest algorithms FMN, PDPGD, and VFGA, while preserving higher effectiveness. In contrast, when compared against the BBadv attack, which competes in terms of $\tilde{\ell}_0$ , our attack is much faster across all the dataset-model configurations, especially forImagenet. For example, $\sigma$ -zero is 10 times faster than BBadv on C4 and 100 times faster on I3 on ImageNet. This confirms that $\sigma$ -zero establishes a better effectiveness-efficiency trade-off than that provided by state-of-the-art $\ell_0$ -norm attacks.
|
| 171 |
+
|
| 172 |
+
Reliability. Complementary to Table 1, we present the robustness evaluation curves in Figure 2 for each attack on M2, C1, and I1. In Appendix B.3, we include similar curves for all other configurations. These curves go beyond the only median statistic and $\mathrm{ASR}_k$ , providing further evidence that $\sigma$ -zero achieves higher ASRs with smaller $\ell_0$ -norm perturbations compared to the competing attacks. More importantly, the ASR of $\sigma$ -zero reaches almost always $100\%$ as the perturbation budget grows, meaning that its optimization only rarely fails to find an adversarial example. In Appendix B.1, we further demonstrate that even when the number of iterations is reduced to $N = 100$ , $\sigma$ -zero consistently achieves an $\mathrm{ASR}_{\infty}$ of $100\%$ across all models. This is not observed with other attacks, which often fail when using fewer iterations, thereby increasing the risk of overestimating adversarial robustness. These results reinforce our previous findings, confirming that $\sigma$ -zero can help mitigate the issue of overestimating adversarial robustness – a crucial aspect to foster scientific progress in defense developments and evaluations (Carlini et al., 2019; Pintor et al., 2022).
|
| 173 |
+
|
| 174 |
+
Ablation Study. In Table 2 we present an ablation study to evaluate the relevance of $\sigma$ -zero's components. Our findings indicate that all the non-trivial components in $\sigma$ -zero are essential for ensuring the effectiveness of the attack. Specifically, we observe that the $\ell_0$ -norm approximation $\hat{\ell}_0$ (Eq. 7, line 3) leads the optimization algorithm to perturb all input features, albeit with small contributions. The projection operator (line 6) plays a crucial role by significantly decreasing the number of perturbed features, effectively removing the least significant contributions. Furthermore, gradient normalization (line 4) accelerates convergence, enhancing efficiency. Lastly, the adaptive projection operator (line 8) fine-tunes the results, reduces the number of perturbed features, and mitigates the dependency on hyperparameter choices. These results underline the importance of each component in $\sigma$ -zero, highlighting their contributions to the overall performance of the attack.
|
| 175 |
+
|
| 176 |
+
Table 2: Ablation study on the $\sigma$ -zero components integrated in Algorithm 1. Columns describe respectively: Gradient normalization factor (line 4); dynamic projection adjustment (line 8); projection operator $\Pi_{\tau}$ (line 6); and the $\ell_0$ norm approximation $\hat{\ell}_0$ (line 3).
|
| 177 |
+
|
| 178 |
+
<table><tr><td>Model</td><td>Normalization</td><td>Adaptive τ</td><td>Projection</td><td>\( \hat{\ell}_{0} \)</td><td>\( \mathrm{ASR}_{10} \)</td><td>\( \mathrm{ASR}_{50} \)</td><td>ASR</td><td>\( \tilde{\ell}_{0} \)</td></tr><tr><td rowspan="6">C10</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>21.68</td><td>73.02</td><td>100.0</td><td>32</td></tr><tr><td>✓</td><td></td><td>✓</td><td>✓</td><td>21.89</td><td>71.66</td><td>100.0</td><td>32</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>16.81</td><td>39.76</td><td>100.0</td><td>65</td></tr><tr><td></td><td></td><td>✓</td><td>✓</td><td>12.95</td><td>13.23</td><td>100.0</td><td>505</td></tr><tr><td></td><td></td><td></td><td>✓</td><td>12.95</td><td>12.95</td><td>100.0</td><td>3004</td></tr><tr><td>✓</td><td></td><td></td><td>✓</td><td>12.95</td><td>12.95</td><td>100.0</td><td>3070</td></tr><tr><td rowspan="6">C5</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>37.27</td><td>82.92</td><td>100.0</td><td>20</td></tr><tr><td>✓</td><td></td><td>✓</td><td>✓</td><td>37.01</td><td>79.83</td><td>100.0</td><td>21</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>29.56</td><td>52.83</td><td>100.0</td><td>46</td></tr><tr><td></td><td></td><td>✓</td><td>✓</td><td>25.46</td><td>32.84</td><td>100.0</td><td>144</td></tr><tr><td></td><td></td><td></td><td>✓</td><td>23.78</td><td>23.78</td><td>100.0</td><td>3064</td></tr><tr><td>✓</td><td></td><td></td><td>✓</td><td>23.78</td><td>23.78</td><td>100.0</td><td>3068</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 3: Fixed-budget comparison results with $N = 1000$ ( $N = 2000$ for Sparse-RS) on MNIST and CIFAR-10 at budgets $k = 24, 50, 100$ . Columns $q_{24}$ and $s_{24}$ show the average number of queries (in thousands) and the average execution time per sample (in seconds) at $k = 24$ .
|
| 181 |
+
|
| 182 |
+
<table><tr><td>Attack</td><td>M</td><td>ASR24</td><td>ASR50</td><td>ASR100</td><td>q24</td><td>s24</td><td>VRAM</td><td>M</td><td>ASR24</td><td>ASR50</td><td>ASR100</td><td>q24</td><td>s24</td><td>VRAM</td></tr><tr><td colspan="15">MNIST</td></tr><tr><td>PGD-ℓ0</td><td></td><td>73.99</td><td>99.90</td><td>100.0</td><td>2.00</td><td>0.09</td><td>0.04</td><td></td><td>61.87</td><td>94.15</td><td>98.50</td><td>2.00</td><td>0.09</td><td>0.04</td></tr><tr><td>Sparse-RS</td><td></td><td>79.54</td><td>96.35</td><td>99.79</td><td>0.83</td><td>0.21</td><td>0.04</td><td></td><td>98.92</td><td>99.96</td><td>100.0</td><td>0.24</td><td>0.07</td><td>0.04</td></tr><tr><td>sPGDp</td><td>M1</td><td>65.55</td><td>97.97</td><td>99.99</td><td>0.46</td><td>0.09</td><td>0.05</td><td>M2</td><td>67.92</td><td>98.57</td><td>99.97</td><td>0.92</td><td>0.08</td><td>0.05</td></tr><tr><td>sPGDu</td><td></td><td>82.79</td><td>99.65</td><td>100.0</td><td>0.09</td><td>0.08</td><td>0.05</td><td></td><td>62.25</td><td>98.11</td><td>99.99</td><td>1.00</td><td>0.09</td><td>0.05</td></tr><tr><td>σ-zero</td><td></td><td>83.71</td><td>99.98</td><td>100.0</td><td>0.43</td><td>0.02</td><td>0.06</td><td></td><td>98.11</td><td>100.0</td><td>100.0</td><td>0.14</td><td>0.01</td><td>0.06</td></tr><tr><td colspan="15">CIFAR-10</td></tr><tr><td>PGD-ℓ0</td><td></td><td>38.18</td><td>59.67</td><td>87.19</td><td>2.00</td><td>0.78</td><td>1.90</td><td></td><td>22.99</td><td>36.20</td><td>67.54</td><td>2.00</td><td>0.35</td><td>0.69</td></tr><tr><td>Sparse-RS</td><td rowspan="4">C1</td><td>72.51</td><td>86.59</td><td>94.28</td><td>0.77</td><td>0.36</td><td>1.95</td><td></td><td>30.87</td><td>45.65</td><td>63.26</td><td>1.47</td><td>0.28</td><td>0.68</td></tr><tr><td>sPGDp</td><td>66.37</td><td>89.21</td><td>99.36</td><td>0.74</td><td>0.41</td><td>2.06</td><td>C3</td><td>31.82</td><td>58.62</td><td>93.19</td><td>1.39</td><td>0.17</td><td>0.73</td></tr><tr><td>sPGDu</td><td>66.33</td><td>91.07</td><td>99.75</td><td>0.72</td><td>0.41</td><td>2.06</td><td></td><td>36.16</td><td>70.06</td><td>98.07</td><td>1.30</td><td>0.16</td><td>0.73</td></tr><tr><td>σ-zero</td><td>77.08</td><td>95.33</td><td>99.95</td><td>0.65</td><td>0.29</td><td>2.07</td><td></td><td>38.67</td><td>73.00</td><td>98.53</td><td>1.33</td><td>0.15</td><td>0.75</td></tr><tr><td>PGD-ℓ0</td><td></td><td>32.41</td><td>59.19</td><td>89.22</td><td>2.00</td><td>0.57</td><td>2.46</td><td></td><td>34.35</td><td>44.99</td><td>68.61</td><td>2.00</td><td>0.35</td><td>0.70</td></tr><tr><td>Sparse-RS</td><td rowspan="4">C2</td><td>59.24</td><td>79.81</td><td>92.43</td><td>1.04</td><td>0.35</td><td>2.46</td><td></td><td>49.35</td><td>63.01</td><td>76.51</td><td>1.11</td><td>0.37</td><td>0.68</td></tr><tr><td>sPGDp</td><td>58.91</td><td>88.15</td><td>99.42</td><td>0.89</td><td>0.39</td><td>2.57</td><td>C4</td><td>50.41</td><td>75.86</td><td>97.52</td><td>1.02</td><td>0.18</td><td>0.73</td></tr><tr><td>sPGDu</td><td>64.8</td><td>93.15</td><td>99.92</td><td>0.76</td><td>0.48</td><td>2.56</td><td></td><td>55.89</td><td>84.64</td><td>99.56</td><td>0.91</td><td>0.19</td><td>0.73</td></tr><tr><td>σ-zero</td><td>75.09</td><td>97.67</td><td>100.0</td><td>0.65</td><td>0.17</td><td>2.68</td><td></td><td>55.69</td><td>82.72</td><td>99.07</td><td>0.94</td><td>0.11</td><td>0.75</td></tr></table>
|
| 183 |
+
|
| 184 |
+
Comparison with Fixed-budget Attacks. We complement our analysis by comparing $\sigma$ -zero with three fixed-budget $\ell_0$ -norm attacks, i.e., the $\ell_0$ -norm Projected Gradient Descent (PGD- $\ell_0$ ) attack (Croce & Hein 2019), the Sparse Random Search (Sparse-RS) attack (Croce et al. 2022) and the Sparse-PGD attack (Zhong et al. 2024). For Sparse-PGD, we consider the implementation with sparse ( $\mathrm{sPGD}_p$ ) and with unprojected ( $\mathrm{sPGD}_u$ ) gradient. In contrast to minimum-norm attacks, fixed-budget attacks optimize adversarial examples within a given maximum perturbation budget $k$ . For a fairer comparison, as done in fixed-budget approaches, we early stop the $\sigma$ -zero optimization process as soon as an adversarial example with an $\ell_0$ -norm perturbation smaller than $k$ is found. In these evaluations, we set $N = 1000$ for $\sigma$ -zero, PGD- $\ell_0$ , sPGD $p$ , and sPGD $u$ , while using $N = 2000$ for Sparse-RS. Therefore, when using $N = 1000$ steps for $\sigma$ -zero (which amounts to performing 1000 forward and 1000 backward calls), we set $N = 2000$ steps for Sparse-RS (which
|
| 185 |
+
|
| 186 |
+
Table 4: Fixed-budget comparison results with $N = {1000}\left( {N = {2000}\text{for Sparse-RS}}\right)$ on ImageNet at budgets $k = {100},{150}$ . See the caption of [Table 3] for further details.
|
| 187 |
+
|
| 188 |
+
<table><tr><td>Attack</td><td>M</td><td>ASR100</td><td>ASR150</td><td>q100</td><td>s100</td><td>VRAM</td><td>M</td><td>ASR100</td><td>ASR150</td><td>q100</td><td>s100</td><td>VRAM</td></tr><tr><td colspan="13">ImageNet</td></tr><tr><td>Sparse-RS</td><td rowspan="3">I1</td><td>89.3</td><td>91.5</td><td>0.39</td><td>0.32</td><td>1.29</td><td rowspan="3">I2</td><td>81.1</td><td>84.1</td><td>0.53</td><td>0.5</td><td>4.39</td></tr><tr><td>sPGDp</td><td>95.4</td><td>98.5</td><td>0.31</td><td>0.16</td><td>1.40</td><td>85.6</td><td>91.2</td><td>0.33</td><td>0.64</td><td>4.48</td></tr><tr><td>sPGDu</td><td>93.6</td><td>97.8</td><td>0.33</td><td>0.12</td><td>1.40</td><td>82.6</td><td>88.7</td><td>0.37</td><td>0.39</td><td>4.49</td></tr><tr><td>σ-zero</td><td></td><td>99.7</td><td>100.0</td><td>0.19</td><td>0.06</td><td>1.79</td><td></td><td>94.7</td><td>97.1</td><td>0.15</td><td>0.17</td><td>4.90</td></tr><tr><td>Sparse-RS</td><td rowspan="3">I3</td><td>69.1</td><td>72.2</td><td>0.81</td><td>0.62</td><td>4.39</td><td rowspan="3">I4</td><td>45.9</td><td>47.4</td><td>1.17</td><td>1.12</td><td>5.72</td></tr><tr><td>sPGDp</td><td>85.4</td><td>93.4</td><td>0.32</td><td>0.55</td><td>4.49</td><td>66.3</td><td>74.9</td><td>0.73</td><td>1.39</td><td>5.84</td></tr><tr><td>sPGDu</td><td>83.9</td><td>92.1</td><td>0.35</td><td>0.39</td><td>4.49</td><td>66.0</td><td>76.0</td><td>0.72</td><td>1.01</td><td>5.84</td></tr><tr><td>σ-zero</td><td></td><td>97.7</td><td>99.6</td><td>0.34</td><td>0.37</td><td>4.90</td><td></td><td>78.8</td><td>85.8</td><td>0.49</td><td>0.70</td><td>6.29</td></tr></table>
|
| 189 |
+
|
| 190 |
+
amounts to performing 2000 forward calls). Furthermore, to compute the ASR at different $k$ ( $\mathrm{ASR}_k$ ), we separately execute fixed-budget attacks for $k = 24, 50, 100$ features on MNIST and CIFAR-10, and with $k = 100, 150$ features on ImageNet (excluding PGD- $\ell_0$ ) due to computational demands), reporting only the maximum number of queries and execution time across all distinct runs. We report the average query usage at $k$ ( $\mathbf{q}_k$ ) and the average execution time per sample at $k$ ( $\mathbf{s}_k$ ). We report the execution time of $\mathbf{s}_k$ for the smaller $k$ , as it requires, on average, more iterations due to the more challenging problem. The results, shown in Tables 3-4, confirm that $\sigma$ -zero outperforms competing approaches in 17 out of 18 configurations (see Appendix B.2 for additional results). Only against C4 the fixed-budget attack $sPGD_u$ slightly increases the ASR. The advantages of $\sigma$ -zero become even more evident when looking at the results on ImageNet, where, on average, it improves the $\mathrm{ASR}_{100}$ of $9.6\%$ across all models in Table 4. The results also indicate that early stopping enables $\sigma$ -zero to save a significant number of queries and runtime while preserving a high ASR. In Appendix B.2 we also report additional comparisons with $N = 2500$ and $N = 5000$ , i.e., a more favorable scenario for the competing attacks, confirming that $\sigma$ -zero remains competitive even at higher budgets.
|
| 191 |
+
|
| 192 |
+
Summary. Our experiments show that $\sigma$ -zero: (i) outperforms minimum-norm attacks by improving the success rate and decreasing the $\ell_0$ norm of the generated adversarial examples (see Table 1 and Appendix B.1); (ii) is significantly faster and scales easily to large datasets (see Table 1 and Appendix B.1); (iii) is robust to hyperparameter selection, not requiring sophisticated and time-consuming tuning (see Appendix A.2); (iv) does not require any adversarial initialization (see Table 1); (v) provides more reliable adversarial robustness evaluations, consistently achieving $100\%$ ASRs (see Table 1, Figure 2, Appendix B.3); and (vi) remains competitive against fixed-budget attacks even when given the same query budget (Table 3.4).
|
| 193 |
+
|
| 194 |
+
# 4 RELATED WORK
|
| 195 |
+
|
| 196 |
+
Optimizing $\ell_0$ -norm adversarial examples with gradient-based algorithms is challenging due to nonconvex and non-differentiable constraints. We categorize them into two main groups: (i) multiple-norm attacks extended to $\ell_0$ , and (ii) attacks specifically designed to optimize the $\ell_0$ norm.
|
| 197 |
+
|
| 198 |
+
Multiple-norm Attacks Extended to $\ell_0$ . These attacks have been developed to work with multiple $\ell_p$ norms, including extensions for the $\ell_0$ norm. While they can find sparse perturbations, they often rely heavily on heuristics in this setting. [Brendel et al. (2019a) initialize the attack from an adversarial example far away from the clean sample and optimizes the perturbation by following the decision boundary to get closer to the source sample. In general, the algorithm can be used for any $\ell_p$ norm, including $\ell_0$ , but the individual optimization steps are very costly. [Pintor et al. (2021) propose the FMN attack that does not require an initialization step and converges efficiently with lightweight gradient-descent steps. However, their approach was developed to generalize over $\ell_p$ norms, but does not make special adaptations to minimize the $\ell_0$ norm specifically. Matyasko & Chau (2021) use
|
| 199 |
+
|
| 200 |
+
relaxations of the $\ell_0$ norm (e.g., $\ell_{1/2}$ ) to promote sparsity. However, this scheme does not strictly minimize the $\ell_0$ norm, as the relaxation does not set the lowest components exactly to zero.
|
| 201 |
+
|
| 202 |
+
$\ell_0$ -specific Attacks. Croce et al. (2022) introduced Sparse-RS, a random search-based attack that, unlike minimum-norm attacks, aims to find adversarial examples that are misclassified with high confidence within a fixed perturbation budget. On the same track we find Sparse-PGD (Zhong et al., 2024) and PGD- $\ell_0$ (Croce & Hein, 2019), white-box fixed-budget alternatives to Sparse-RS. Lastly, Césaire et al. (2021) induces folded Gaussian noise to selected input components, iteratively finding the set that achieves misclassification with minimal perturbation. However, it requires considerable memory to explore possible combinations and find an optimal solution, limiting its scalability.
|
| 203 |
+
|
| 204 |
+
Overall, current implementations of $\ell_0$ -norm attacks present a crucial suboptimal trade-off between their success rate and efficiency, i.e., they are either accurate but slow (e.g., BB) or fast but inaccurate (e.g., FMN). This is also confirmed by a recent work that has benchmarked more than 100 gradient-based attacks (Cinà et al., 2025) on 9 additional robust models. In that open-source benchmark, $\sigma$ -zero consistently and significantly outperformed all the existing implementations of competing $\ell_0$ -norm attacks, establishing a performance very close to that of the empirical oracle (obtained by assembling all the attacks tested). In summary, our attack combines the benefits of the two families of attack detailed above, i.e., effectiveness and efficiency, providing the state-of-the-art solution for adversarial robustness evaluations of DNNs when considering $\ell_0$ -norm attacks.
|
| 205 |
+
|
| 206 |
+
# 5 CONCLUSIONS AND FUTURE WORK
|
| 207 |
+
|
| 208 |
+
In this work, we propose $\sigma$ -zero, a novel attack aimed to find minimum $\ell_0$ -norm adversarial examples, based on the following main technical contributions: (i) a differentiable approximation of the $\ell_0$ norm to define a novel, smooth objective that can be minimized via gradient descent; and (ii) an adaptive projection operator to enforce sparsity in the adversarial perturbation, by zeroing out the least relevant features in each iteration. $\sigma$ -zero also leverages specific optimization tricks to stabilize and speed up the optimization. Our extensive experiments demonstrate that $\sigma$ -zero consistently discovers more effective and reliable $\ell_0$ -norm adversarial perturbations across all models and datasets while maintaining computational efficiency and robustness to hyperparameters choice. In conclusion, $\sigma$ -zero emerges as a highly promising candidate to evaluate robustness against $\ell_0$ -norm perturbations and promote the development of novel robust models against sparse attacks.
|
| 209 |
+
|
| 210 |
+
Ethics Statement. Based on our comprehensive analysis, we assert that there are no identifiable ethical considerations or foreseeable negative societal consequences that warrant specific attention within the limits of this study. This study will rather help improve the understanding of adversarial robustness of DNNs and identify potential ways to improve it.
|
| 211 |
+
|
| 212 |
+
Reproducibility. To ensure the reproducibility of our work, we have detailed the experimental setup in Section 3.1 where we describe the datasets, models, and attacks used, along with their respective sources. Additionally, we have provided our source code as part of the supplementary material, which will be made publicly available as open source upon acceptance.
|
| 213 |
+
|
| 214 |
+
# ACKNOWLEDGMENTS
|
| 215 |
+
|
| 216 |
+
This work has been partially supported by the project Sec4AI4Sec, under the EU's Horizon Europe Research and Innovation Programme (grant agreement no. 101120393); the project ELSA, under the EU's Horizon Europe Research and Innovation Programme (grant agreement no. 101070617); the EU—NGEU National Sustainable Mobility Center (CN00000023), Italian Ministry of University and Research (MUR) Decree n. 1033—17/06/2022 (Spoke 10); projects SERICS (PE00000014) and FAIR (PE0000013) under the MUR NRRP funded by the EU—NGEU; and by the German Federal Ministry of Education and Research under the grant AIgenCY (16KIS2012).
|
| 217 |
+
|
| 218 |
+
# REFERENCES
|
| 219 |
+
|
| 220 |
+
Sravanti Addepalli, Samyak Jain, and Venkatesh Babu R. Efficient and effective augmentation strategy for adversarial training. In NeurIPS, 2022.
|
| 221 |
+
Maximilian Augustin, Alexander Meinke, and Matthias Hein. Adversarial robustness on in- and out-distribution improves explainability. In Computer Vision - ECCV 2020 - 16th European Conference, volume 12371 of Lecture Notes in Computer Science, pp. 228-245. Springer, 2020.
|
| 222 |
+
Battista Biggio and Fabio Roli. Wild patterns: Ten years after the rise of adversarial machine learning. Pattern Recognition, 84:317-331, 2018.
|
| 223 |
+
Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Srndic, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. Evasion attacks against machine learning at test time. In *Machine Learning and Knowledge Discovery in Databases - European Conference*, ECML PKDD, volume 8190 of Lecture Notes in Computer Science, pp. 387-402. Springer, 2013.
|
| 224 |
+
Wieland Brendel, Jonas Rauber, Matthias Kümmerer, Ivan Ustyuzhaninov, and Matthias Bethge. Accurate, reliable and fast robustness evaluation. In Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems, NeurIPS, 2019a.
|
| 225 |
+
Wieland Brendel, Jonas Rauber, Matthias Kümmerer, Ivan Ustyuzhaninov, and Matthias Bethge. Accurate, reliable and fast robustness evaluation. In Conference on Neural Information Processing Systems (NeurIPS), 2019b.
|
| 226 |
+
Nicholas Carlini and David Wagner. Towards evaluating the robustness of neural networks. In IEEE Symposium on Security and Privacy (S&P), 2017a.
|
| 227 |
+
Nicholas Carlini and David A. Wagner. Towards evaluating the robustness of neural networks. In 2017 IEEE Symposium on Security and Privacy SP, pp. 39-57. IEEE Computer Society, 2017b.
|
| 228 |
+
Nicholas Carlini, Anish Athalye, Nicolas Papernot, Wieland Brendel, Jonas Rauber, Dimitris Tsipras, Ian J. Goodfellow, Aleksander Madry, and Alexey Kurakin. On evaluating adversarial robustness. CoRR, abs/1902.06705, 2019.
|
| 229 |
+
Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, John C Duchi, and Percy S Liang. Unlabeled data improves adversarial robustness. In Conference on Neural Information Processing Systems (NeurIPS), 2019.
|
| 230 |
+
Manon Césaire, Lucas Schott, Hatem Hajri, Sylvain Lamprier, and Patrick Gallinari. Stochastic sparse adversarial attacks. In 33rd IEEE International Conference on Tools with Artificial Intelligence, ICTAI, pp. 1247-1254. IEEE, 2021.
|
| 231 |
+
Pin-Yu Chen, Yash Sharma, Huan Zhang, Jinfeng Yi, and Cho-Jui Hsieh. EAD: elastic-net attacks to deep neural networks via adversarial examples. In Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), pp. 10-17. AAAI Press, 2018.
|
| 232 |
+
Tianlong Chen, Sijia Liu, Shiyu Chang, Yu Cheng, Lisa Amini, and Zhangyang Wang. Adversarial robustness: From self-supervised pre-training to fine-tuning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pp. 696-705. Computer Vision Foundation / IEEE, 2020.
|
| 233 |
+
A. E. Cina, J. Rony, M. Pintor, L. Demetrio, A. Demontis, B. Biggio, I. B. Ayed, and F. Roli. Attackbench: Evaluating gradient-based attacks for adversarial examples. In AAAI Conference on Artificial Intelligence, 2025.
|
| 234 |
+
Francesco Croce and Matthias Hein. Sparse and imperceivable adversarial attacks. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 4723-4731, 2019.
|
| 235 |
+
Francesco Croce and Matthias Hein. Mind the box: $l_{1}$ -apgd for sparse adversarial attacks on image classifiers. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML, volume 139 of Proceedings of Machine Learning Research, pp. 2201-2211. PMLR, 2021.
|
| 236 |
+
|
| 237 |
+
Francesco Croce, Maksym Andriushchenko, Vikash Sehwag, Edoardo Debenedetti, Nicolas Flammarion, Mung Chiang, Prateek Mittal, and Matthias Hein. Robustbench: a standardized adversarial robustness benchmark. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks, 2021.
|
| 238 |
+
Francesco Croce, Maksym Andriushchenko, Naman D. Singh, Nicolas Flammarion, and Matthias Hein. Sparse-rs: A versatile framework for query-efficient sparse black-box adversarial attacks. In Thirty-Sixth AAAI Conference on Artificial Intelligence, AAAI, pp. 6437-6445. AAAI Press, 2022.
|
| 239 |
+
Geoff Davis, Stephane Mallat, and Marco Avellaneda. Adaptive greedy approximations. Constructive approximation, 13:57-98, 1997.
|
| 240 |
+
Edoardo Debenedetti, Vikash Sehwag, and Prateek Mittal. A light recipe to train robust vision transformers. In First IEEE Conference on Secure and Trustworthy Machine Learning, 2023. URL https://openreview.net/forum?id=IztT98ky0cKs.
|
| 241 |
+
Logan Engstrom, Andrew Ilyas, Hadi Salman, Shibani Santurkar, and Dimitris Tsipras. Robustness (python library), 2019. URL https://github.com/MadryLab/robustness.
|
| 242 |
+
Foolbox. Datasetattack, 2017. URL https://foolbox.readthedocs.io/en/stable/modules/attacks.html#foolboxattacks.DatasetAttack
|
| 243 |
+
Justin Gilmer, Ryan P. Adams, Ian J. Goodfellow, David Andersen, and George E. Dahl. Motivating the rules of the game for adversarial example research. CoRR, abs/1807.06732, 2018.
|
| 244 |
+
Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, and Timothy A. Mann. Improving robustness using generated data. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS, pp. 4218-4233, 2021.
|
| 245 |
+
Kaiming He, X. Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770-778, 2015.
|
| 246 |
+
Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The many faces of robustness: A critical analysis of out-of-distribution generalization. In 2021 IEEE/CVF International Conference on Computer Vision, ICCV, pp. 8320-8329. IEEE, 2021.
|
| 247 |
+
Yulun Jiang, Chen Liu, Zhichao Huang, Mathieu Salzmann, and Sabine Susstrunk. Towards stable and efficient adversarial training against 11 bounded adversarial attacks. In International Conference on Machine Learning, 2023.
|
| 248 |
+
Alex Krizhevsky. Learning multiple layers of features from tiny images. 2009.
|
| 249 |
+
Alex Krizhevsky, Ilya Sutskever, and Geoffrey E. Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60:84 - 90, 2012.
|
| 250 |
+
Yann LeCun and Corinna Cortes. The mnist database of handwritten digits. 2005.
|
| 251 |
+
Alexander Matyasko and Lap-Pui Chau. PDPGD: primal-dual proximal gradient descent adversarial attack. CoRR, abs/2106.01538, 2021. URL https://arxiv.org/abs/2106.01538
|
| 252 |
+
Yichuan Mo, Dongxian Wu, Yifei Wang, Yiwen Guo, and Yisen Wang. When adversarial training meets vision transformers: Recipes from training to architecture. Advances in Neural Information Processing Systems, 35:1859-18611, 2022.
|
| 253 |
+
Apostolos Modas, Seyed-Mohsen Moosavi-Dezfooli, and Pascal Frossard. Sparsefool: a few pixels make a big difference. In Conference on computer vision and pattern recognition (CVPR), 2019.
|
| 254 |
+
Shengyun Peng, Weilin Xu, Cory Cornelius, Matthew Hull, Kevin Li, Rahul Duggal, Mansi Phute, Jason Martin, and Duen Horng Chau. Robust principles: Architectural design principles for adversarially robust cnns. In 34th British Machine Vision Conference 2023, BMVC 2023, Aberdeen, UK, November 20-24, 2023, pp. 739-740. BMVA Press, 2023. URL http://proceedings.bmvc2023.org/739/.
|
| 255 |
+
|
| 256 |
+
Maura Pintor, Fabio Roli, Wieland Brendel, and Battista Biggio. Fast minimum-norm adversarial attacks through adaptive norm constraints. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems, NeurIPS, pp. 20052-20062, 2021.
|
| 257 |
+
Maura Pintor, Luca Demetrio, Angelo Sotgiu, Ambra Demontis, Nicholas Carlini, Battista Biggio, and Fabio Roli. Indicators of attack failure: Debugging and improving optimization of adversarial examples. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems, volume 35, pp. 23063-23076. Curran Associates, Inc., 2022.
|
| 258 |
+
Jonas Rauber, Wieland Brendel, and Matthias Bethge. Foolbox: A python toolbox to benchmark the robustness of machine learning models, 2017. URL https://github.com/bethgelab/ foolbox.
|
| 259 |
+
Jérôme Rony, Luiz G. Hafemann, Luiz Oliveira, Ismail Ben Ayed, Robert Sabourin, and Eric Granger. Decoupling direction and norm for efficient gradient-based l2 adversarial attacks and defenses. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4317-4325, 2018.
|
| 260 |
+
Jérôme Rony, Eric Granger, Marco Pedersoli, and Ismail Ben Ayed. Augmented lagrangian adversarial attacks. In 2021 IEEE/CVF International Conference on Computer Vision, ICCV, pp. 7718-7727. IEEE, 2021.
|
| 261 |
+
Jérôme Rony and Ismail Ben Ayed. Adversarial Library. URL https://github.com/jeromerony/adversarial-library
|
| 262 |
+
Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish Kapoor, and Aleksander Madry. Do adversarially robust imagenet models transfer better? In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS, 2020.
|
| 263 |
+
Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations (ICLR), 2014.
|
| 264 |
+
Eric Wong, Leslie Rice, and J. Zico Kolter. Fast is better than free: Revisiting adversarial training. In 8th International Conference on Learning Representations, ICLR. OpenReview.net, 2020.
|
| 265 |
+
Yuancheng Xu, Yanchao Sun, Micah Goldblum, Tom Goldstein, and Furong Huang. Exploring and exploiting decision boundary dynamics for adversarial robustness. In International Conference on Learning Representations (ICLR), 2023.
|
| 266 |
+
Xuyang Zhong, Yixiao Huang, and Chen Liu. Towards efficient training and evaluation of robust models against $l_{0}$ bounded adversarial perturbations. In International Conference on Machine Learning ICML. PMLR, 2024.
|
2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5953d9dddba89c281f1bc096da8d1e1a232b92fbc9c0f43bd01ce72eb1748ed2
|
| 3 |
+
size 654953
|
2025/$_sigma$-zero_ Gradient-based Optimization of $_ell_0$-norm Adversarial Examples/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/821ea593-4f97-43c8-a9eb-9a23f906d645_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/821ea593-4f97-43c8-a9eb-9a23f906d645_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/821ea593-4f97-43c8-a9eb-9a23f906d645_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b60aeb1813907a6e602d94a461525370a800798faf64a8b7faa3dd31944c2743
|
| 3 |
+
size 7438391
|
2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15b0c043f8b3b80596b93be2fa07683059fd8b9a538673dda7f17e03537e7159
|
| 3 |
+
size 1955643
|
2025/$_text{D}_{2}_text{O}$_ Dynamic Discriminative Operations for Efficient Long-Context Inference of Large Language Models/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$q$-exponential family for policy optimization/e136c89b-38c0-4066-b262-245af928b74a_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$q$-exponential family for policy optimization/e136c89b-38c0-4066-b262-245af928b74a_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/$q$-exponential family for policy optimization/e136c89b-38c0-4066-b262-245af928b74a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:43f847fc4c3a68ad653ef3966da7c16bfb4d9e2ae709069dcbbc466bfbccbcb5
|
| 3 |
+
size 10093219
|
2025/$q$-exponential family for policy optimization/full.md
ADDED
|
@@ -0,0 +1,658 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# q-EXPONENTIAL FAMILY FOR POLICY OPTIMIZATION
|
| 2 |
+
|
| 3 |
+
Lingwei Zhu*
|
| 4 |
+
|
| 5 |
+
University of Tokyo
|
| 6 |
+
|
| 7 |
+
lingwei4@ualberta.ca
|
| 8 |
+
|
| 9 |
+
Haseeb Shah*
|
| 10 |
+
|
| 11 |
+
University of Alberta
|
| 12 |
+
|
| 13 |
+
hshah1@ualberta.ca
|
| 14 |
+
|
| 15 |
+
Han Wang*
|
| 16 |
+
|
| 17 |
+
University of Alberta
|
| 18 |
+
|
| 19 |
+
han8@ualberta.ca
|
| 20 |
+
|
| 21 |
+
Yukie Nagai
|
| 22 |
+
|
| 23 |
+
University of Tokyo
|
| 24 |
+
|
| 25 |
+
Martha White
|
| 26 |
+
|
| 27 |
+
University of Alberta
|
| 28 |
+
|
| 29 |
+
# ABSTRACT
|
| 30 |
+
|
| 31 |
+
Policy optimization methods benefit from a simple and tractable policy parametrization, usually the Gaussian for continuous action spaces. In this paper, we consider a broader policy family that remains tractable: the $q$ -exponential family. This family of policies is flexible, allowing the specification of both heavy-tailed policies ( $q > 1$ ) and light-tailed policies ( $q < 1$ ). This paper examines the interplay between $q$ -exponential policies for several actor-critic algorithms conducted on both online and offline problems. We find that heavy-tailed policies are more effective in general and can consistently improve on Gaussian. In particular, we find the Student's t-distribution to be more stable than the Gaussian across settings and that a heavy-tailed $q$ -Gaussian for Tsallis Advantage Weighted Actor-Critic consistently performs well in offline benchmark problems. In summary, we find that the Student's t policy a strong candidate for drop-in replacement to the Gaussian. Our code is available at https://github.com/lingweizhu/qexp.
|
| 32 |
+
|
| 33 |
+
# 1 INTRODUCTION
|
| 34 |
+
|
| 35 |
+
Policy optimization methods optimize the parameters of a stochastic policy towards maximizing some performance measure (Sutton et al., 1999). These methods benefit from a simple and tractable policy functional. For discrete action spaces, the Boltzmann-Gibbs (BG) policy is often preferred (Mei et al., 2020; Cen et al., 2022); while the Gaussian policy is standard for the continuous case. For continuous action spaces, sampling the BG policy is computationally expensive due to the normalizing log-partition function. A Gaussian policy is often used as a tractable approximation. While there are other candidates such as the Beta policy (Chou et al., 2017), the Gaussian remains the most common choice for both online and offline policy optimization methods (Haarnoja et al.,
|
| 36 |
+
|
| 37 |
+
2018; Neumann et al., 2023; Xiao et al., 2023).
|
| 38 |
+
|
| 39 |
+
In this paper, we consider a broader policy family that remains tractable called the $q$ -exponential family. The $q$ -exponential family was proposed to study non-extensive system behaviors in the statistical physics (Naudts, 2010; Matsuzoe & Ohara, 2011), and has recently been exploited in transformers (Peters et al., 2019; Martins et al., 2022). By setting $q = 1$ , it recovers the standard exponential family. With $q > 1$ , we can obtain policies with heavier tails than the Gaussian, such as the Student's t-distribution (Kobayashi, 2019) or the Lévy Process distribution (Simsekli et al., 2019; Bedi et al., 2024). Heavy-tailed distributions can preferable as they are more robust (Lange et al., 1989), can facilitate exploration and help escape local optima in the sparse reward context (Chakraborty et al., 2023). When $q < 1$ , light-tailed (sparse) policies
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Figure 1: The policy parametrizations in this paper. Student's t is a $q$ -exponential with $q = 1 + 2 / (\nu + 1) \approx 1.67$ .
|
| 43 |
+
|
| 44 |
+
such as the $q$ -Gaussian distribution can be recovered. The sparse $q$ -Gaussian has finite support and
|
| 45 |
+
|
| 46 |
+
<table><tr><td>Reference</td><td>Scope of expq</td><td>Explicit?</td><td>Heavy & Sparse?</td><td>Continuous?</td><td>RL?</td></tr><tr><td>Naudts (2010); Matsuzoe & Ohara (2011)</td><td>q ∈ R</td><td>-</td><td>✓</td><td>✓</td><td>✗</td></tr><tr><td>Martins et al. (2022)</td><td>q < 1</td><td>-</td><td>✗</td><td>✓</td><td>✗</td></tr><tr><td>Lee et al. (2018); Chow et al. (2018a)</td><td>q = 0</td><td>✓</td><td>✗</td><td>✗</td><td>✓</td></tr><tr><td>Lee et al. (2020); Zhu et al. (2023; 2024)</td><td>q < 1</td><td>✗</td><td>✗</td><td>✓</td><td>✓</td></tr><tr><td>Li et al. (2023)</td><td>q = 0</td><td>✓</td><td>✗</td><td>✓</td><td>✓</td></tr><tr><td>This paper</td><td>q ∈ R</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
|
| 47 |
+
|
| 48 |
+
Table 1: Existing works and their scopes. We are the first to consider the general $q$ -exponential family for the parameterized policy in reinforcement learning. The family includes continuous heavy-tailed and sparse policies. Prior works in RL considered only the discrete case or continuous policy with a specific entropic index $q$ . Further, in many cases they still used a Gaussian policy parameterization to approximate an implicit target distribution that is a $q$ -exponential, rather than explicitly using the $q$ -Gaussian as the policy parameterization.
|
| 49 |
+
|
| 50 |
+
can serve as a continuous generalization of the discrete sparsemax. As a result, $q$ -Gaussian helps alleviate safety concerns incurred by the infinite support Gaussian (Xu et al., 2023; Li et al., 2023).
|
| 51 |
+
|
| 52 |
+
Such $q$ -exponential families have been considered in reinforcement learning, with the existing work summarized in Table 1. Lee et al. (2018); Chow et al. (2018b) studied the discrete setting with $q = 0$ , called the sparsemax. Li et al. (2023) similarly considered $q = 0$ policy parameterization for the continuous action setting. All other works, however, used a Gaussian policy parameterization to (implicitly) approximate an idealized target distribution that is $q$ -Gaussian, and specifically for $q < 1$ Lee et al. (2020); Zhu et al. (2024). Such a choice is suboptimal, as Gaussians are used to approximate light-tailed (sparse) target policies. And in fact that choice was not strictly necessary as the policy parameterization need not have been chosen to be Gaussian: it could also have been a $q$ -Gaussian. Though obvious in hindsight, this gap was likely due to simply not considering the use of general continuous $q$ -exponential family for the policy parameterization, which is what we introduce in this work.
|
| 53 |
+
|
| 54 |
+
In this paper, we empirically investigate the $q$ -exponential family as a replacement for the Gaussian inside several existing policy optimization algorithms. Our contributions include the following. (1) We show how to use $q$ -exponential family policy parameterizations inside a variety of existing actor-critic algorithms. (2) We provide comprehensive experiments on both online and offline problems showing that $q$ -exponential family policies can improve on the Gaussian by a large margin. In particular, we find that the Student's t policy is more stable, performing well across algorithms and problems, shown in Figure 2. (3) We provide empirical evidence supporting the assumption that algorithms may prefer specific policies depending on the actor loss objective. In particular, we find that by replacing the Gaussian with a heavy-tailed $q$ -Gaussian, Tsallis Advantage Weighted Actor-Critic (Zhu et al., 2024) consistently performs better across offline benchmark problems. This outcome makes sense; as men
|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
Figure 2: Performance relative to the Squashed Gaussian on the offline D4RL MuJoCo task, averaged across the selected algorithms and environments.
|
| 58 |
+
|
| 59 |
+
tioned above, this algorithm implicitly has a target policy that is a $q$ -Gaussian, so using a matching $q$ -Gaussian parameterization should perform better.
|
| 60 |
+
|
| 61 |
+
# 2 BACKGROUND
|
| 62 |
+
|
| 63 |
+
We focus on discounted Markov Decision Processes (MDPs) expressed by the tuple $(\mathcal{S},\mathcal{A},P,\mu ,r,\gamma)$ , where $\mathcal{S}$ and $\mathcal{A}$ denote state space and action space, respectively. Let $\Delta (\mathcal{X})$ denote the set of probability distributions over $\mathcal{X}$ . $P$ and $\mu$ denote the transition probability and initial state distribution, respectively. $r(s,a)$ defines the reward associated with that transition. $\gamma \in (0,1)$ is the discount factor.
|
| 64 |
+
|
| 65 |
+
A policy $\pi : S \to \Delta(\mathcal{A})$ is a mapping from the state space to distributions over actions. To assess the quality of a policy, we define the expected return as $J(\pi) = \int_{\mathcal{S}} \rho^{\pi}(s) \int_{\mathcal{A}} \pi(a|s)r(s,a)\mathrm{d}a\mathrm{d}s$ , where $\rho^{\pi}(s) = \sum_{t=0}^{\infty} \gamma^{t}P(s_{t} = s)$ is the unnormalized state visitation frequency. The goal is to learn a policy that maximizes $J(\pi)$ . We also define the action value and state value as $Q^{\pi}(s,a) = \mathbb{E}_{\pi}\left[\sum_{t=0}^{\infty} \gamma^{t}r(s_{t},a_{t})\big|s_{0} \sim \mu, a_{0} = a\right]$ , $V^{\pi}(s) = \mathbb{E}_{\pi}\left[Q^{\pi}(s,a)\right]$ . For the ease of later notations, we write the dependence on state as subscript, e.g. $Q(s,a)$ will be written as $Q_{s}(a)$ .
|
| 66 |
+
|
| 67 |
+
In practice, the policy is often parametrized by a vector of parameters $\theta \in \mathbb{R}^n$ . The policy can then be optimized by adjusting its parameters to the high reward region utilizing its gradient information.
|
| 68 |
+
|
| 69 |
+
The Policy Gradient Theorem (Sutton et al., 1999) featured by many policy gradient methods states that the gradient can be computed by:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\nabla_ {\theta} J (\pi ; \theta) = \mathbb {E} _ {s \sim \rho^ {\pi}, a \sim \pi_ {\theta}} \left[ Q _ {s} ^ {\pi} (a) \nabla_ {\theta} \ln \pi_ {s} (a; \theta) \right].
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
In practice, the expectation is approximated by sampling. When the state space is large, the action value function is also parametrized, leading to the Actor-Critic methods (Degris et al., 2012).
|
| 76 |
+
|
| 77 |
+
In contrast to the study of policy gradient algorithms, the impact of specific policy parametrizations on performance remains a less studied topic. Researchers typically consider policy parametrizations that can be written as the following:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\pi_ {s} (a; \theta) = \frac {1}{Z _ {s}} \exp (\theta^ {\top} \phi_ {s} (a)) = \exp (\theta^ {\top} \phi_ {s} (a) - Z _ {s} ^ {\prime}). \tag {1}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
Here, $\phi_s(a)$ is a vector of statistics and $\theta \in \mathbb{R}^n$ is a vector of parameters, $Z_{s}$ is the normalizing constant ensuring the policy is a valid distribution and $Z_{s}\coloneqq \exp (Z_{s}^{\prime})$ . One immediate instance is the Boltzmann-Gibbs (BG) policy $\pi_{\mathrm{BG},s}(a;\theta) = \exp \left(Q_s(a) - Z_s\right)$ , where $Z_{s}^{\prime} = \ln \int \exp \left(Q_{s}(a)\right)\mathrm{d}a$ is the log-partition function. In the discrete case, it is also called the softmax transformation (Cover & Thomas, 2006). BG policy has been studied extensively in RL for encouraging exploration and smoothing the optimization landscape, to name a few applications (Haarnoja et al., 2018; Ahmed et al., 2019; Cen et al., 2022). However, evaluating the log-partition function is in general intractable.
|
| 84 |
+
|
| 85 |
+
# 3 EXPONENTIAL AND $q$ -EXPONENTIAL FAMILIES
|
| 86 |
+
|
| 87 |
+
We first review the commonly used policy parametrizations. They permit an expression using the exponential function. We arrive at the more general $q$ -exponential family by deforming the exponential. In Table 2, we summarize all policies presented in the paper.
|
| 88 |
+
|
| 89 |
+
# 3.1 THE EXPONENTIAL FAMILY POLICIES
|
| 90 |
+
|
| 91 |
+
The Gaussian policy is one of the simplest distributions one can consider due to its omnipresence in statistics and parametric estimation as well as its widely available sampling procedure implementations. Since evaluating the log-partition function of BG is intractable, due to the aforementioned advantages many researchers consider the Gaussian policy instead: $\pi_s(a) = \frac{1}{\sqrt{2\pi}\sigma_s}\exp \left(\frac{-(a - \mu_s)^2}{2\sigma_s^2}\right)$ . For simplicity we drop the dependence on state $s$ . To see it is a member of the exponential family, in Eq. (1) let $\theta = [\frac{\mu}{\sigma^2}, - \frac{1}{2\sigma^2}]^\top$ for $\mu \in (-\infty ,\infty),\sigma >0;\phi_s(a) = [a,a^2 ]^\top$ , and $Z_{s} = \ln \left(\sqrt{2\pi}\sigma\right)$ . This amounts to setting $Q_{s}(a) = -(a - \mu)^{2}$ in the BG (Gu et al., 2016). We write a Gaussian policy as $\pi_{\mathcal{N},s}(a) = \mathcal{N}(a;\mu ,\sigma^2)$ . The gradients of the Gaussian are $\nabla_{\mu}\ln \pi_{s}(a) = \frac{(a - \mu)}{\sigma^{2}}$ and $\nabla_{\sigma}\ln \pi_{s}(a) = \frac{(a - \mu)^{2}}{\sigma^{3}} -\frac{1}{\sigma}$ . On one hand, the Gaussian policy is simple to implement. On the other hand, when $\sigma$ becomes small, Gaussian can be unstable due to overly large gradients and can prematurely concentrate on a suboptimal action. As a result, it is susceptible to noise/outliers and does not encourage sufficient exploration due to its thin tails. This paper investigates location-scale alternatives within the generalized $q$ -exponential family.
|
| 92 |
+
|
| 93 |
+
Another interesting member is the Beta distribution (Chou et al., 2017): $\pi_{\mathrm{Beta},s}(a) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha)\Gamma(\beta)} a^{\alpha -1}(1 - a)^{\beta -1}$ , $a\in (0,1)$ , where $\Gamma (\cdot)$ is the gamma function. It can be retrieved from equation 1 by letting $\theta = [\alpha ,\beta ]^{\top},\phi_s(a) = [\ln a,\ln (1 - a)]^\top$ $Z_{s} = \frac{\Gamma(\alpha)\Gamma(\beta)}{\Gamma(\alpha + \beta)}$ . Since Beta distribution's support is bounded between $(0,1)$ , Chou et al. (2017) argued that it might alleviate the
|
| 94 |
+
|
| 95 |
+
<table><tr><td>Family</td><td>Policy</td><td>Parameters θ</td><td>Statistics φs(a)</td><td>Normalization Zs</td><td>∇ ln πs(a)</td></tr><tr><td rowspan="2">exp</td><td>Gaussian</td><td>[μ/σ2, -1/2σ2]</td><td>[a, a2]</td><td>√2πσ</td><td>Eq. (13)</td></tr><tr><td>Beta</td><td>[α, β]</td><td>[ln a, ln(1 - a)]</td><td>Γ(α)Γ(β)/Γ(α+β)</td><td>-</td></tr><tr><td rowspan="3">q-exp</td><td>Student's t</td><td>[-2μ/νσ, 1/νσ]</td><td>[a, a2]</td><td>√πνσΓ(ν/2)/Γ(ν+1/2)</td><td>Eq. (14)</td></tr><tr><td>q-Gaussian (q < 1)</td><td>[μ/σ2, -1/2σ2]</td><td>[a, a2]</td><td>√π/1-q Γ(1/1-q+1)/Γ(1/1-q+3/2)</td><td>Eq. (15)</td></tr><tr><td>q-Gaussian (1 < q < 3)</td><td></td><td></td><td>√π/√q-1 Γ(1/q-1-1/2)/Γ(1/q-1)</td><td></td></tr></table>
|
| 96 |
+
|
| 97 |
+
Table 2: Policy parametrizations from the exp and $q$ -exp families studied in this paper. We are primarily interested in the location-scale family. Their multivariate forms are shown in Appendix A.
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
Figure 3: $\exp_q x$ and $\ln_q x$ for $q < 1$ and $q > 1$ . When $q = 1$ they respectively recover their standard counterpart. For $q < 1$ the $q$ -exp can return zero values and hence $q$ -exp policies may achieve sparsity. For $q > 1$ , $q$ -exp decays more slowly towards 0, resulting in heavy-tailed behaviors. The rightmost shows the $q$ -Gaussian with different $q$ .
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
|
| 106 |
+
bias introduced by truncating Gaussian densities outside the action space bounds. The beta policy is the only non-location-scale family distribution in this paper. However, as we will show in the experiments, the Beta policy generally does not perform favourably against the Gaussian.
|
| 107 |
+
|
| 108 |
+
# 3.2 THE $q$ -EXPONENTIAL FAMILY, HEAVY-TAILED AND LIGHT-TAILED POLICIES
|
| 109 |
+
|
| 110 |
+
Generalizing the exponential family using the $q$ -exponential function has been extensively discussed in statistical physics (Naudts, 2002; Tsallis, 2009; Naudts, 2010; Amari & Ohara, 2011). In the machine learning literature, the $q$ -exponential generalization has attracted some attention since it allows for tuning the tail behavior by adjusting the value of $q$ (Sears, 2008; Ding & Vishwanathan, 2010; Amid et al., 2019). The $q$ -exponential and its unique inverse function $q$ -logarithm are:
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\exp_ {q} x := \left\{ \begin{array}{l l} \exp x, & q = 1 \\ [ 1 + (1 - q) x ] _ {+} ^ {\frac {1}{1 - q}}, & q \neq 1 \end{array} , \quad \ln_ {q} x := \left\{ \begin{array}{l l} \ln x, & q = 1 \\ \frac {x ^ {1 - q} - 1}{1 - q}, & q \neq 1 \end{array} \right. \right. \tag {2}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
where $[\cdot ]_{+}:= \max \{\cdot ,0\}$ . $q$ -exp/log generalize exp/log since $\lim_{q\to 1}\exp_qx = \exp x$ and $\lim_{q\to 1}\ln_qx = \ln x$ . Similar to exp, $q$ -exp is an increasing and convex function for $q > 0$ , satisfying $\exp_q(0) = 1$ . However, an important difference of $q$ -exp is that $\exp_q(a + b)\neq \exp_q(a)\exp_q(b)$ unless $q = 1$ . We visualize $q$ -exp/log in Figure 3.
|
| 117 |
+
|
| 118 |
+
We now define the $q$ -exponential family as:
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\pi_ {q, s} (a; \theta) = \frac {1}{Z _ {q , s}} \exp_ {q} \left(\theta^ {\top} \phi_ {s} (a)\right) = \exp_ {q} \left(\theta^ {\top} \phi_ {s} (a) - Z _ {q, s} ^ {\prime}\right), \tag {3}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
where $\theta, \phi_s(a), Z_{q,s}$ have similar meanings to equation 1. Note that $Z_{q,s} \neq \exp_q(Z_{q,s}')$ unless $q = 1$ . The $q$ -exponential family includes the $q$ -Gaussian and Student's t distributions described in the next subsections.
|
| 125 |
+
|
| 126 |
+
# 3.2.1 $q$ -GAUSSIAN
|
| 127 |
+
|
| 128 |
+
As the counterpart of Gaussian in the $q$ -exp family, $q$ -Gaussian unifies both light-tailed and heavy-tailed policies by varying the entropic index $q$ (Matsuzoe & Ohara, 2011):
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\pi_ {\mathcal {N} _ {q}, s} (a) = \frac {1}{Z _ {q , s}} \exp_ {q} \left(- \frac {(a - \mu) ^ {2}}{2 \sigma^ {2}}\right),
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\text {w h e r e} Z _ {q, s} = \left\{ \begin{array}{l l} \sigma \sqrt {\frac {\pi}{1 - q}} \Gamma \left(\frac {1}{1 - q} + 1\right) / \Gamma \left(\frac {1}{1 - q} + \frac {3}{2}\right) & \text {i f} - \infty < q < 1, \\ \sigma \sqrt {\frac {\pi}{q - 1}} \Gamma \left(\frac {1}{q - 1} - \frac {1}{2}\right) / \Gamma \left(\frac {1}{q - 1}\right) & \text {i f} 1 < q < 3. \end{array} \right. \tag {4}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
It is heavy-tailed when $1 < q < 3$ and light-tailed when $q < 1$ . $\pi_{\mathcal{N}_{q,s}}(a)$ is no longer integrable for $q \geq 3$ (Naudts, 2010). We visualize these $q$ -Gaussians in Figure 3.
|
| 139 |
+
|
| 140 |
+
Since popular libraries like the PyTorch (Paszke et al., 2019) do not have implementations of $q$ -Gaussians available, we discuss their sampling methods. It was shown by (Martins et al., 2022) that a sparse $q$ -Gaussian ( $q < 1$ ) random variable permits a stochastic representation $\boldsymbol{\mu} + r\boldsymbol{A}\boldsymbol{u}$ , where $\boldsymbol{u} \sim \mathrm{Unif}\left(\mathbb{S}^N\right)$ is a random sample from the $N - 1$ dimensional unit sphere. $A$ is the scaled matrix $|\Sigma|^{-\frac{1}{2N + \frac{4}{1 - q}}}\Sigma^{\frac{1}{2}}$ . $r$ is the radius of the distribution, and the ratio follows the Beta distribution $r^2 / R^2 \sim \mathrm{Beta}\left((2 - q) / (1 - q), N / 2\right)$ , where $R$ is the radius of the supporting sphere of the standard $q$ -Gaussian $\mathcal{N}_q(0, I)$ :
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
R = \left(\frac {\Gamma \left(\frac {N}{2} + \frac {2 - q}{1 - q}\right)}{\Gamma \left(\frac {2 - q}{1 - q}\right) \pi^ {\frac {N}{2}}} \cdot \left(\frac {2}{1 - q}\right) ^ {\frac {1}{1 - q}}\right) ^ {\frac {1 - q}{2 + (1 - q) N}}. \tag {5}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
Notice that $R$ depends only on the dimensionality $N$ and the entropic index $q$ . This method provides low-variance samples, but unfortunately it does not extend to $q > 1$ . Therefore, for $1 < q < 3$ we adopt the Generalized Box-Muller Method (GBMM) (Thistleton et al., 2007) to transform uniform random variables $\mathbf{u}_1, \mathbf{u}_2 \sim \mathrm{Unif}(0,1)^N$ by the following:
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
\boldsymbol {z} _ {1} = \sqrt {- 2 \ln_ {q} (\boldsymbol {u} _ {1})} \cdot \cos (2 \pi \boldsymbol {u} _ {2}), \quad \boldsymbol {z} _ {2} = \sqrt {- 2 \ln_ {q} (\boldsymbol {u} _ {1})} \cdot \sin (2 \pi \boldsymbol {u} _ {2}). \tag {6}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
Then each of $z_1, z_2$ is a standard $q$ -Gaussian variable with new entropic index $q' = (3q - 1) / (q + 1)$ . Often we know the desired $q'$ in advance, in this case we simply let the $q$ -log take on the index $q = (q' - 1) / (3 - q')$ . The desired random vector is given by $\pmb{\mu} + \Sigma^{\frac{1}{2}}\pmb{z}$ .
|
| 153 |
+
|
| 154 |
+
# 3.2.2 STUDENT'S T
|
| 155 |
+
|
| 156 |
+
Heavy-tailed distributions like the Student's t are popular for robust modelling (Lange et al., 1989). The Student's t distribution is
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
\pi_ {\mathrm {S t}, s} (a) = \frac {\Gamma \left(\frac {\nu + 1}{2}\right)}{\sqrt {\pi \nu \sigma}} \Gamma \left(\frac {\nu}{2}\right) \left(1 + \frac {(a - \mu) ^ {2}}{\sigma \nu}\right) ^ {- \frac {\nu + 1}{2}}, \tag {7}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
where $\nu > 0$ is the degree of freedom. As $\nu \to \infty$ , Student's t distribution approaches the Gaussian. Numerically, Student's t with $\nu \geq 30$ is considered to closely match the Gaussian. Therefore, $\nu$ can be an important learnable parameter in addition to its location $\mu$ and scale $\sigma$ . It allows the policy to adaptively balance the exploration-exploitation trade-off by interpolating the Gaussian and heavy-tailed policies. Now let $q = 1 + \frac{2}{\nu + 1}$ and define
|
| 163 |
+
|
| 164 |
+
Algorithm 1: $q$ -Gaussian sampling
|
| 165 |
+
|
| 166 |
+
Input: $q, N, \mu, \Sigma$
|
| 167 |
+
|
| 168 |
+
if $q < 1$ then
|
| 169 |
+
|
| 170 |
+
sample $\pmb{u}\sim \mathrm{Unif}(\mathbb{S}^N)$
|
| 171 |
+
|
| 172 |
+
sample $z\sim \mathrm{Beta}\left(\frac{2 - q}{1 - q},\frac{N}{2}\right)$
|
| 173 |
+
|
| 174 |
+
compute $R$ per Eq. (5)
|
| 175 |
+
|
| 176 |
+
compute $A = |\Sigma |^{-\frac{1}{2N + \frac{4}{1 - q}}}\sum \frac{1}{2}$
|
| 177 |
+
|
| 178 |
+
return $\mu +\sqrt{zR^2} Au$
|
| 179 |
+
|
| 180 |
+
else if $q > 1$ then
|
| 181 |
+
|
| 182 |
+
sample $\pmb{u}_1, \pmb{u}_2 \sim \mathrm{Unif}(0, 1)^N$
|
| 183 |
+
|
| 184 |
+
compute $z$ by GBMM Eq. (6)
|
| 185 |
+
|
| 186 |
+
return $\mu +\Sigma^{\frac{1}{2}}z$
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
Z _ {q, s} := \frac {\sqrt {\pi \nu \sigma} \Gamma \left(\frac {\nu}{2}\right)}{\Gamma \left(\frac {\nu + 1}{2}\right)}, \quad \theta^ {\top} \phi_ {s} (a) := \frac {Z _ {q , s} ^ {q - 1}}{(1 - q)} \frac {(a - \mu) ^ {2}}{\sigma \nu}, \tag {8}
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
\Rightarrow \quad \pi_ {\mathrm {S t}, s} (a) = \exp_ {q} \left(\theta^ {\top} \phi_ {s} (a) - \ln_ {2 - q} Z _ {q, s}\right),
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
which we see it is indeed a $q$ -exp policy and $Z_{q,s}^{\prime} = \ln_{2 - q}Z_{q,s}$ . Student's t policy has been used in (Kobayashi, 2019) to encourage exploration and to escape local optima. Another related case is the Cauchy's distribution recovered when $q = 2$ (or $\nu = 1$ from Student's t). Cauchy's distribution can be used as the starting point for learning Student's t. Note that Cauchy's distribution does not have valid mean, variance or any higher moments.
|
| 197 |
+
|
| 198 |
+
# 4 USING $q$ -EXPONENTIAL FAMILIES FOR ACTOR-CRITIC ALGORITHMS
|
| 199 |
+
|
| 200 |
+
In this section, we outline three key actor-critic algorithms we use in our study and the nuances of incorporating $q$ -exp policies into them. For example, the $q$ -exp policies may not have closed-form Shannon entropy. Therefore, approximations are needed for algorithms like SAC and GreedyAC. Moreover, though for the Gaussian evaluating the log-likelihood for off-policy/offline actions causes no problem, it raises a new issue for the light-tailed $q$ -Gaussian, since these actions can fall outside of its support.
|
| 201 |
+
|
| 202 |
+
Soft Actor-Critic. SAC (Haarnoja et al., 2018) encourages exploration by adding to reward the Shannon entropy. The actor minimizes the following KL loss
|
| 203 |
+
|
| 204 |
+
$$
|
| 205 |
+
\mathcal {L} _ {\mathrm {S A C}} (\phi) := \mathbb {E} _ {s \sim \mathcal {B}} \left[ D _ {K L} (\pi_ {\phi , s} \| \pi_ {\mathrm {B G}, s}) \right] = \mathbb {E} _ {s \sim \mathcal {B}} \left[ D _ {K L} \left(\pi_ {\phi , s} \left\| \frac {\exp (\tau^ {- 1} Q _ {s})}{Z _ {s}}\right) \right], \right.
|
| 206 |
+
$$
|
| 207 |
+
|
| 208 |
+
where states are sampled from replay buffer $\mathcal{B}$ . The parametrized policy $\pi_{\phi}$ is projected to be close to the BG policy. By default $\pi_{\phi}$ is chosen to be the Gaussian policy, but potentially a more exploring policy like the Student's t could be better. Depending on action values, BG can have multiple modes and heavy tails. The Gaussian may not be able to fully capture these characteristics.
|
| 209 |
+
|
| 210 |
+
Greedy Actor-Critic. GreedyAC (Neumann et al., 2023) maintains an additional proposal policy for exploration by maximizing Shannon entropy augmented rewards. Its actor policy maximizes unbiased reward and learns from the high-quality actions generated by the proposal policy. To simplify notations, we use $I(s)$ to denote the set of high quality actions given $s$ .
|
| 211 |
+
|
| 212 |
+
$$
|
| 213 |
+
\mathcal{L}_{\text{GreedyAC,prop}}(\phi):= \mathbb{E}_{\substack{s\sim \mathcal{B}\\ a\in I(s)}}\left[-\ln \pi_{\phi ,s} - \mathcal{H}\left(\pi_{\phi ,s}\right)\right],
|
| 214 |
+
$$
|
| 215 |
+
|
| 216 |
+
$$
|
| 217 |
+
\mathcal{L}_{\text{GreedyAC, actor}}(\bar{\phi}):= \mathbb{E}_{\substack{s\sim \mathcal{B}\\ a\in \widetilde{I} (s)}}\left[-\ln \pi_{\bar{\phi},s}\right].
|
| 218 |
+
$$
|
| 219 |
+
|
| 220 |
+
GreedyAC maximizes log-likelihood of the actor and entropy-augmented likelihood for the proposal policy. Note that the when $\pi_{\phi, s}$ is a $q$ -exp policy, it may not have a closed-form Shannon entropy expression. Therefore, we can use log-probabilities as a surrogate just like in SAC.
|
| 221 |
+
|
| 222 |
+
Tsallis Advantage Weighted Actor-Critic. TAWAC (Zhu et al., 2024) proposed to use a light-tailed $q$ -exp policy for offline learning. However, the light-tailed distribution was approximated with the Gaussian which is an infinite-support policy. Let $\pi_{\mathcal{D}}$ denote the empirical behavior policy and $\mathcal{D}$ the offline dataset. TAWAC minimizes the following actor loss:
|
| 223 |
+
|
| 224 |
+
$$
|
| 225 |
+
\mathcal {L} (\phi) := \mathbb {E} _ {s \sim \mathcal {D}} \left[ D _ {K L} \left(\pi_ {\mathrm {T K L}, s} \| \pi_ {\phi , s}\right) \right] = \mathbb {E} _ {s \sim \mathcal {D}} _ {a \sim \pi_ {\mathcal {D}}} \left[ - \exp_ {q ^ {\prime}} \left(\frac {Q _ {s} (a) - V _ {s}}{\tau}\right) \ln \pi_ {\phi , s} (a) \right], \tag {9}
|
| 226 |
+
$$
|
| 227 |
+
|
| 228 |
+
where $\pi_{\mathrm{TKL},s}(a)\propto \pi_{\mathcal{D},s}(a)\exp_{q'}\big(\tau^{-1}\big(Q_s(a) - V_s\big)\big)$ denotes the Tsallis KL regularized policy. $\pi_{\phi ,s}$ mimics a TKL policy which can be sparse depending on $q^{\prime}$ . In this case, it is natural to expect that a sparse policy parametrization may lead to better performance.
|
| 229 |
+
|
| 230 |
+
Algorithms like TAWAC that sample from a behavior policy $\pi_{\mathcal{D}}$ needs extra caution when using the $q$ -exp policies. When the light-tailed $q$ -Gaussian is used, numerical issues can be incurred since the action sampled may fall outside the support of $\pi_{\phi}$ , leading to undefined log-likelihood. To resolve this problem, we propose to sample from $\pi_{\phi}$ a batch of $K$ actions and replace the out-of-support action with the in-support one with least $L_{2}$ distance, see Alg. 2.
|
| 231 |
+
|
| 232 |
+
Algorithm 2: Out-of-support action handling for the light-tailed $q$ -Gaussian
|
| 233 |
+
|
| 234 |
+
Input: out-of-support action $a$
|
| 235 |
+
sample in-support actions $\{\pmb{b}_i\}_{i=1:K}$
|
| 236 |
+
solve $i^* = \arg \min_i \| \pmb{b}_i - \pmb{a} \|_2^2$
|
| 237 |
+
return $\pmb{b}_{i^*}$
|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
Figure 4: Learning curves on the classic control environments. Only the Gaussian and the best policy parametrization for each setting were shown with full opacity. The best policy is picked based on the total area under the curve (AUC). TAWAC(0) refers to TAWAC with entropic index $q' = 0$ in Eq. (9). Despite tuning hyperparameters separately for each policy, Gaussian is the best policy in only $1/12$ settings. In most other settings, the Gaussian policy performs significantly worse than the best.
|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
Figure 5: (Left) The percentage of times that each policy parametrization is better than the Gaussian across all algorithm-environment combinations based on total AUC. If the bar is above the $50\%$ line, then it means that the policy parametrization is better than Gaussian on average. We see that Student's t and Light-tailed Gaussians are better than the Gaussian in $75\%$ and $66\%$ of the settings, respectively. (Right) Count of times where a policy parametrization performed the best across all algorithm-environment combinations based on AUC. We observe that the student-t policy performed the best in $5/12$ settings, whereas the Gaussian policy performed the best only once.
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
|
| 247 |
+
# 5 EXPERIMENTS
|
| 248 |
+
|
| 249 |
+
Our empirical study's primary goal is to understand better the performance differences under this broader class of policy parameterizations in both online and offline settings. We ran experiments
|
| 250 |
+
|
| 251 |
+
with different algorithms, to get a better sense of how conclusions about policy parameterization vary across different actor-critic algorithms.
|
| 252 |
+
|
| 253 |
+
We parametrize Student-t's DOF parameter $\nu$ in addition to its location and scale. By contrast, the heavy-tailed $q$ -Gaussian is fixed at $q = 2$ , since its allowable range is $1 < q < 3$ . For the light-tailed $q$ -Gaussian, we opt for the standard choice of $q = 0$ . Since Student's t, heavy-tailed q-Gaussian, and Gaussian have unbounded support, we clipped the sampled action to fit the task's action space without modifying the density. We swept the hyperparameters using five random seeds, then increased the number of seeds to 10 for the best parameter setting. The hyperparameter sweep ranges and the best values are provided in Appendix D.2 and D.3.
|
| 254 |
+
|
| 255 |
+
# 5.1 ONLINE CLASSIC CONTROL
|
| 256 |
+
|
| 257 |
+
Domains and Baselines. We used three classical control environments in the continuous action setting: Mountain Car (Sutton & Barto, 2018), Pendulum (Degris et al., 2012) and Acrobot (Sutton & Barto, 2018). We chose the cost-to-goal version of Mountain Car, which outputs $-1$ reward per time step to encourage reaching the goal early. We compared SAC, GreedyAC and two versions of TAWAC, $q' = 0$ and $q' = 2$ .
|
| 258 |
+
|
| 259 |
+
Results. Figure 4 shows the learning curves of all algorithm-environment combinations. Only the Gaussian and the environment-specific best policy are shown with full opacity, computed based on area under curve (AUC). One immediate observation is that, though all three algorithms by default choose the Gaussian policy, it was seldom the best policy parametrization. Environment-wise, on Mountain Car the Gaussian did not rank the best for any of the algorithms. By contrast, the Beta policy attained the first place with SAC, as was the light-tailed $q$ -Gaussian with TAWAC. The same trend for the Gaussian holds in Acrobot and Pendulum as well, with exception only on TAWAC(0) Acrobot, where its curve closely resembled that of the light-tailed $q$ -Gaussian.
|
| 260 |
+
|
| 261 |
+
Algorithm-wise, three observations are to be made: (i) on Mountain Car the Beta policy performed significantly better than others. This could be due to its flexibility in maintaining a skewed distribution shape that matches the BG policy more closely in contrast to the other location scale family members. (ii) The $q$ -Gaussians in general outperformed the Gaussian on TAWAC(0) and TAWAC(2) whose actor explicitly mimics a $q$ -exp policy. (iii) Student's t has ranked the top involving all three algorithms. Figure 5 LHS summarizes the percentage of each policy parametrization outperforming the Gaussian. The Student's t and light-tailed Gaussian went above $50\%$ , suggesting potentially greater applicability. The RHS shows out of 12 total combinations, how many times each policy parametrization has ranked the top. The result shows that the Student's t attained 5 times, contrasting the 1 time of the Gaussian.
|
| 262 |
+
|
| 263 |
+
In Figure 6 we visualized the evolution of Gaussian and $q$ -Gaussian policies on the starting state over the first $4 \times 10^{4}$ steps (10% of the entire learning horizon). Note that the allowed action range is $[-1,1]$ but the plot shows $[-2,2]$ for better visualization. Gaussian tends to quickly concentrate like a delta policy. This can be detrimental to algorithms like SAC and GreedyAC which demand stochasticity to generate diverse samples. By contrast, both light- and heavy-tailed $q$ -Gaussians tend to be more stochastic.
|
| 264 |
+
|
| 265 |
+
In Figure 11 we show the Manhattan plot of SAC with all swept hyperparameters on all environments. Though there is no definitive winner for all cases, it is visible that the Student's t and Gaussian have a similar behavior to hyperparameter changes. Therefore, if we are tackling a problem where the Gaussian works, the Student t is very likely to work. And judging from Fig.
|
| 266 |
+
|
| 267 |
+
5, we know that Student's t is $75\%$ more likely to perform better than Gaussian given the same hyperparameter sweeping range.
|
| 268 |
+
|
| 269 |
+
Greedy-AC Policy Evolution on Mountain Car
|
| 270 |
+
Figure 6: Policy evolution of GreedyAC on Mountain Car. The Gaussian collapsed into a delta-like policy after only $10\%$ of the learning horizon.
|
| 271 |
+

|
| 272 |
+
Gaussian
|
| 273 |
+
Heavy-tailed q-Gaussian
|
| 274 |
+
Light-tailed q-Gaussian
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
Figure 7: Normalized scores on Medium-Replay level datasets from the MuJoCo suite. The black bar shows the median. Boxes and whiskers are $1 \times$ and $1.5 \times$ interquartile ranges, respectively. See Figure 15 for full comparison. Environment-wise, TAWAC with heavy-tailed $q$ -Gaussian is often the top performer. Algorithm-wise, Student's t consistently outperforms Squashed Gaussian.
|
| 278 |
+
|
| 279 |
+
# 5.2 OFFLINE D4RL MUJoCO
|
| 280 |
+
|
| 281 |
+

|
| 282 |
+
Figure 8: Relative improvement to the Squashed Gaussian policy, averaged over multiple environments in the MuJoCo suite. The Student's t consistently outperforms the Gaussian with all the chosen algorithms. The heavy-tailed $q$ -Gaussian with TAWAC and IQL also achieved significant improvement. The improvement can reach up to $\sim 20\%$ . Black vertical lines at the top indicate one standard error.
|
| 283 |
+
|
| 284 |
+
Domains and Baselines. We used the standard benchmark MuJoCo suite from D4RL to evaluate algorithm-policy combinations (Fu et al., 2020). The following algorithms are compared: TAWAC, Advantage Weighetd Actor-Critic (AWAC) (Nair et al., 2021), Implicit Q-Learning (IQL) (Kostrikov et al., 2022), In-sample Actor-Critic (InAC) (Xiao et al., 2023). For TAWAC, we fixed its leading $q'$ -exp with $q' = 0$ . In Appendix C.2 we detailed the compared algorithms. We also included a popular variant of the Gaussian known as the Squashed Gaussian for comparison. Being able to evaluate the offline log-probability is critical to the tested algorithms, we found that light-tailed $q$ -Gaussian leads to poor performance even with random online sampling, hence we do not show them here.
|
| 285 |
+
|
| 286 |
+
Results. Figure 7 compared the normalized scores on the Medium-Replay datasets. It can be seen that environment-wise, TAWAC + heavy-tailed $q$ -Gaussian was the top performer, and could improve on the Squashed Gaussian by a non-negligible margin. On HalfCheetah, heavy-tailed $q$ -Gaussian attained the best score with every algorithm. Algorithm-wise, the heavy-tailed $q$ -Gaussian or/and Student's t were better or equivalent to the Squashed Gaussian, except with AWAC on Hopper. Student's t was stable across algorithms, including these with which heavy-tailed $q$ -Gaussian performed poorly (e.g., InAC). This demonstrates the value of the learnable DOF parameter that allows it interpolates the Gaussian. In Appendix E we provided comparison on other policies and datasets.
|
| 287 |
+
|
| 288 |
+
Figure 8 summarized the relative improvement over the Squashed Gaussian across environments. Several observations can be made: (i) though the Squashed Gaussian outperformed the Gaussian in general, it was seldom the best performer. (ii) the Student's t could consistently perform better
|
| 289 |
+
|
| 290 |
+

|
| 291 |
+
Heavv-Tailed a-Gaussian Student's t Gaussian
|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
Figure 9: Policy evolution of all actions dimensions of TAWAC on Walker2d Medium Replay. Student's t was flexible in that on some dimensions it had lighter tails like the Gaussian by having large DOF (e.g. 4th), and with heavier tails on the others by having smaller DOF (e.g. 3rd, 6th). The peaks at the edges were caused by clipping actions into the allowed range.
|
| 299 |
+
|
| 300 |
+

|
| 301 |
+
|
| 302 |
+

|
| 303 |
+
|
| 304 |
+
than the Gaussian, the improvement can sometimes reach up to $\sim 20\%$ . The same holds for the heavy-tailed $q$ -Gaussian with TAWAC and IQL. (iii) though there was no single winner for all cases, choosing the Student's t for the actors with exponential loss functions (AWAC, IQL, InAC), or the heavy-tailed $q$ -Gaussian for $q$ -exponential actor losses (e.g. TAWAC) are generally effective.
|
| 305 |
+
|
| 306 |
+
Figure 9 visualized the policy evolution of the Squashed Gaussian and the two heavy-tailed policies, learned with TAWAC on Medium-Replay Walker2D. Squashed Gaussian tended to converge slower here. Since the offline MuJoCo environments are fully deterministic, a wide distribution indicates failure of finding the mode of the optimal action and therefore can be detrimental to learning performance. The Squashed Gaussian converged slower than the heavy-tailed (performed the best) and the Student's t. Student's t was flexible in that it beared lighter tails like the Gaussian in some dimensions by having a large DOF, for example in the 4th and 5th dimensions. On the other hand, it could take heavy tails by having a small DOF like in the 3rd and 6th dimensions.
|
| 307 |
+
|
| 308 |
+
# 6 CONCLUSION
|
| 309 |
+
|
| 310 |
+
The Gaussian policy is standard for policy optimization algorithms on continuous action spaces. In this paper we considered a broader family of policies that remains tractable, called the $q$ -exponential family. We empirically investigated their utility as a promising alternative to the Gaussian. Specifically, we looked at the Student's t, light- and heavy-tailed $q$ -Gaussian policies. Extensive experiments on both online and offline tasks with various actor-critic methods showed that heavy-tailed policies are in general effective. In summary, we found the Student's t policy to be generally more performant and stable than the Gaussian and could be used as a drop-in replacement. By contrast, the Heavy-tailed $q$ -Gaussian seemed to favor especially Tsallis regularization and outperformed the baselines.
|
| 311 |
+
|
| 312 |
+
We acknowledge that the paper has limitations. Perhaps the greatest is the inherent dilemma of the light-tailed $q$ -Gaussian evaluating out-of-support actions. Off-policy/offline algorithms require evaluating actions from some behavior policy and the actions can fall outside the support of the sparse $q$ -Gaussian. Naïvely discarding these samples results extremely slow or no learning. In this paper we proposed to alleviate this issue by replacing them with the in-support sampled action with the least $L_{2}$ distance. Nonetheless, this method did not help much in offline experiments. We envision a potential solution that is left to future investigation: projecting the out-of-support actions precisely to the boundary of the $q$ -Gaussian.
|
| 313 |
+
|
| 314 |
+
# REFERENCES
|
| 315 |
+
|
| 316 |
+
Zafarali Ahmed, Nicolas Le Roux, Mohammad Norouzi, and Dale Schuurmans. Understanding the impact of entropy on policy optimization. In Proceedings of 36th International Conference on Machine Learning, volume 97, pp. 151-160, 2019.
|
| 317 |
+
Shun-ichi Amari and Atsumi Ohara. Geometry of q-exponential family of probability distributions. Entropy, 13(6):1170-1185, 2011.
|
| 318 |
+
Ehsan Amid, Manfred K. Warmuth, and Sriram Srinivasan. Two-temperature logistic regression based on the tsallis divergence. In Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics, volume 89, pp. 2388-2396, 2019.
|
| 319 |
+
Amrit Singh Bedi, Anjaly Parayil, Junyu Zhang, Mengdi Wang, and Alec Koppel. On the sample complexity and metastability of heavy-tailed policy search in continuous control. Journal of Machine Learning Research, 25(39):1-58, 2024.
|
| 320 |
+
Boris Belousov and Jan Peters. Entropic regularization of markov decision processes. Entropy, 21(7), 2019.
|
| 321 |
+
Shicong Cen, Chen Cheng, Yuxin Chen, Yuting Wei, and Yuejie Chi. Fast global convergence of natural policy gradient methods with entropy regularization. Operations Research, 70(4): 2563-2578, 2022.
|
| 322 |
+
Souradip Chakraborty, Amrit Singh Bedi, Kasun Weerakoon, Prithvi Poddar, Alec Koppel, Pratap Tokekar, and Dinesh Manocha. Dealing with sparse rewards in continuous control robotics via heavy-tailed policy optimization. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 989-995, 2023.
|
| 323 |
+
Po-Wei Chou, Daniel Maturana, and Sebastian Scherer. Improving stochastic policy gradients in continuous control with deep reinforcement learning using the beta distribution. In Proceedings of the 34th International Conference on Machine Learning, pp. 834–843, 2017.
|
| 324 |
+
Yinlam Chow, Ofir Nachum, and Mohammad Ghavamzadeh. Path consistency learning in Tsallis entropy regularized MDPs. In International Conference on Machine Learning, pp. 979-988, 2018a.
|
| 325 |
+
Yinlam Chow, Nachum Ofir, Edgar Duenez-guzman, and Mohammad Ghavamzadeh. A Lyapunov-based Approach to Safe Reinforcement Learning. In Annual Conference on Neural Information Processing Systems (NIPS), pp. 1-10, 2018b.
|
| 326 |
+
Thomas M. Cover and Joy A. Thomas. Elements of Information Theory (Wiley Series in Telecommunications and Signal Processing). Wiley-Interscience, USA, 2006.
|
| 327 |
+
Thomas Degris, Martha White, and Richard S. Sutton. Off-policy actor-critic. In Proceedings of the 29th International Coherence on International Conference on Machine Learning, pp. 179-186, 2012.
|
| 328 |
+
Nan Ding and S.v.n. Vishwanathan. t-logistic regression. In Advances in Neural Information Processing Systems, volume 23, 2010.
|
| 329 |
+
Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4rl: Datasets for deep data-driven reinforcement learning, 2020.
|
| 330 |
+
Scott Fujimoto and Shixiang Shane Gu. A minimalist approach to offline reinforcement learning. In Thirty-Fifth Conference on Neural Information Processing Systems, 2021.
|
| 331 |
+
S. Furuichi, K. Yanagi, and K. Kuriyama. Fundamental properties of tsallis relative entropy. Journal of Mathematical Physics, 45(12):4868-4877, 2004.
|
| 332 |
+
Shigeru Furuichi. On the maximum entropy principle and the minimization of the fisher information in tsallis statistics. Journal of Mathematical Physics, 50:013303, 01 2010.
|
| 333 |
+
Peter Grünwald and Alexander Dawid. Game theory, maximum entropy, minimum discrepancy and robust bayesian decision theory. Annals of Statistics, 32, 2004.
|
| 334 |
+
|
| 335 |
+
Shixiang Gu, Timothy Lillicrap, Ilya Sutskever, and Sergey Levine. Continuous deep q-learning with model-based acceleration. In Proceedings of The 33rd International Conference on Machine Learning, pp. 2829-2838, 2016.
|
| 336 |
+
Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In Proceedings of the 35th International Conference on Machine Learning, pp. 1861-1870, 2018.
|
| 337 |
+
E. T. Jaynes. Information theory and statistical mechanics. Phys. Rev., 106:620-630, 1957.
|
| 338 |
+
Taisuke Kobayashi. Student-t policy in reinforcement learning to acquire global optimum of robot control. Applied Intelligence, 49:4335-4347, 2019.
|
| 339 |
+
Ilya Kostrikov, Ashvin Nair, and Sergey Levine. Offline reinforcement learning with implicit q-learning. In International Conference on Learning Representations, 2022.
|
| 340 |
+
Kenneth L. Lange, Roderick J. A. Little, and Jeremy M. G. Taylor. Robust statistical modeling using the t distribution. Journal of the American Statistical Association, 84:881-896, 1989.
|
| 341 |
+
Kyungjae Lee, Sungjoon Choi, and Songhwai Oh. Sparse markov decision processes with causal sparse tsallis entropy regularization for reinforcement learning. IEEE Robotics and Automation Letters, 3:1466-1473, 2018.
|
| 342 |
+
Kyungjae Lee, Sungyub Kim, Sungbin Lim, Sungjoon Choi, Mineui Hong, Jae In Kim, Yong-Lae Park, and Songhwai Oh. Generalized tsallis entropy reinforcement learning and its application to soft mobile robots. In Robotics: Science and Systems XVI, pp. 1-10, 2020.
|
| 343 |
+
Yuhan Li, Wenzhuo Zhou, and Ruoqing Zhu. Quasi-optimal reinforcement learning with continuous actions. In The Eleventh International Conference on Learning Representations, 2023.
|
| 344 |
+
AndrA© F. T. Martins, Marcos Treviso, AntA³nio Farinhas, Pedro M. Q. Aguiar, MAriio A. T. Figueiredo, Mathieu Blondel, and Vlad Niculae. Sparse continuous distributions and fenchelyoung losses. Journal of Machine Learning Research, 23(257):1-74, 2022.
|
| 345 |
+
Hiroshi Matsuzoe and Atsumi Ohara. Geometry of q-exponential families. In *Recent Progress in Differential Geometry and Its Related Fields*, pp. 55-71, 2011.
|
| 346 |
+
Jincheng Mei, Chenjun Xiao, Csaba Szepesvari, and Dale Schuurmans. On the global convergence rates of softmax policy gradient methods. In Proceedings of the 37th International Conference on Machine Learning, volume 119, pp. 6820-6829, 2020.
|
| 347 |
+
Ashvin Nair, Murtaza Dalal, Abhishek Gupta, and Sergey Levine. {AWAC}: Accelerating online reinforcement learning with offline datasets, 2021.
|
| 348 |
+
Jan Naudts. Deformed exponentials and logarithms in generalized thermostatistics. Physica A-statistical Mechanics and Its Applications, 316:323-334, 2002.
|
| 349 |
+
Jan Naudts. The q-exponential family in statistical physics. Journal of Physics: Conference Series, pp. 012003, 2010.
|
| 350 |
+
Samuel Neumann, Sungsu Lim, Ajin George Joseph, Yangchen Pan, Adam White, and Martha White. Greedy actor-critic: A new conditional cross-entropy method for policy improvement. In *The Eleventh International Conference on Learning Representations*, 2023.
|
| 351 |
+
Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems 32, pp. 8024-8035, 2019.
|
| 352 |
+
Ben Peters, Vlad Niculae, and André F. T. Martins. Sparse sequence-to-sequence models. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 1504-1519, 2019.
|
| 353 |
+
|
| 354 |
+
Kaare Brandt Petersen and Michael Syskind Pedersen. The matrix cookbook. 2012.
|
| 355 |
+
Timothy Sears. *Generalized Maximum Entropy, Convexity and Machine Learning*. PhD thesis, The Australian National University and Computer Science Laboratory, Research School of Information Sciences and Engineering, 2008.
|
| 356 |
+
Umut Simsekli, Levent Sagun, and Mert Gurbuzbalaban. A tail-index analysis of stochastic gradient noise in deep neural networks. In Proceedings of the 36th International Conference on Machine Learning, Proceedings of Machine Learning Research, pp. 5827-5837, 2019.
|
| 357 |
+
Richard S. Sutton and Andrew G. Barto. Reinforcement Learning: An Introduction. A Bradford Book, Cambridge, MA, USA, 2018.
|
| 358 |
+
Richard S. Sutton, David McAllester, Satinder Singh, and Yishay Mansour. Policy gradient methods for reinforcement learning with function approximation. In Advances in Neural Information Processing Systems (NIPS), pp. 1057-1063, 1999.
|
| 359 |
+
H. Suyari and M. Tsukada. Law of error in tsallis statistics. IEEE Transactions on Information Theory, 51(2):753-757, 2005.
|
| 360 |
+
William J. Thistleton, John A. Marsh, Kenric Nelson, and Constantino Tsallis. Generalized box-muller method for generating $q$ -gaussian random deviates. IEEE Transactions on Information Theory, 53:4805-4810, 2007.
|
| 361 |
+
C. Tsallis. Introduction to Nonextensive Statistical Mechanics: Approaching a Complex World. Springer New York, 2009. ISBN 9780387853581.
|
| 362 |
+
Chenjun Xiao, Han Wang, Yangchen Pan, Adam White, and Martha White. The in-sample softmax for offline reinforcement learning. In *The Eleventh International Conference on Learning Representations*, 2023.
|
| 363 |
+
Haoran Xu, Li Jiang, Jianxiong Li, and Xianyuan Zhan. A policy-guided imitation approach for offline reinforcement learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022.
|
| 364 |
+
Haoran Xu, Li Jiang, Jianxiong Li, Zhuoran Yang, Zhaoran Wang, Victor Wai Kin Chan, and Xianyuan Zhan. Offline RL with no OOD actions: In-sample learning via implicit value regularization. In The Eleventh International Conference on Learning Representations, 2023.
|
| 365 |
+
Lingwei Zhu, Zheng Chen, Matthew Schlegel, and Martha White. Generalized munchausen reinforcement learning using tsallis kl divergence. In Advances in Neural Information Processing Systems (NeurIPS), 2023.
|
| 366 |
+
Lingwei Zhu, Matthew Schlegel, Han Wang, and Martha White. Offline reinforcement learning with tsallis regularization. Transactions on Machine Learning Research, 2024.
|
| 367 |
+
Brian D. Ziebart. *Modeling Purposeful Adaptive Behavior* with the Principle of Maximum Causal Entropy. PhD thesis, Carnegie Mellon University, Carnegie Mellon University, 2010.
|
| 368 |
+
|
| 369 |
+
# APPENDIX
|
| 370 |
+
|
| 371 |
+
The Appendix is organized into the following sections. In section A we summarize the multivariate form of $q$ -exp policies and derive gradients of their log-likelihood. In section C we discuss the connection between the $q$ -exp family and the entropy regularization literature. Based on this, we further discuss how different algorithms may prefer specific policies depending on its actor loss. We then provide implementation details including hyperparameters and how to sample from $q$ -Gaussian in section D. Lastly we provide additional experimental results in section E.
|
| 372 |
+
|
| 373 |
+
A Multivariate $q$ -exp Policies and Log-likelihood
|
| 374 |
+
B Connection to Entropy Regularization
|
| 375 |
+
C Actor Losses
|
| 376 |
+
D Implementation Details
|
| 377 |
+
E Additional Results
|
| 378 |
+
|
| 379 |
+
# A MULTIVARIATE DENSITY OF $q$ -EXP POLICIES
|
| 380 |
+
|
| 381 |
+
<table><tr><td>Policy</td><td>Density</td><td>∇ ln πs(a)</td></tr><tr><td>Gaussian</td><td>1/(2π)N/2|Σ|1/2 exp(-1/2 (a - μ)TΣ-1 (a - μ))</td><td>Eq. (13)</td></tr><tr><td>Student's t</td><td>Γ(N+ν)/Γ(ν/2)(νπ)N/2|Σ|1/2 [1 + 1/ν (a - μ)TΣ-1 (a - μ)]-(N+ν/2)</td><td>Eq. (14)</td></tr><tr><td>q-Gaussian (q < 1)</td><td>(1-q)N/2 Γ(2-q+N/2)/Γ(2-q/1-q)πN/2|Σ|1/2 exp(q - 1/2 (a - μ)TΣ-1 (a - μ))</td><td>Eq. (15)</td></tr><tr><td>q-Gaussian (1 < q < 3)</td><td>(q-1)N/2 Γ(3-q/2(q-1)+N/2)/Γ(3-q/2(q-1))πN/2|Σ|1/2 exp(q - 1/2 (a - μ)TΣ-1 (a - μ))</td><td></td></tr></table>
|
| 382 |
+
|
| 383 |
+
Table 3: Multivariate $q$ -exp policies and gradients of log-likelihood.
|
| 384 |
+
|
| 385 |
+
In Table 3 we show multivariate density of the $q$ -exp policies introduced in the main text. Note that multivariate Student's t is constructed based on the assumption that a diagonal $\Sigma$ leads to independent action dimensions, same as the Gaussian policy. On the other hand, for $q$ -Gaussian this is no longer true, since a diagonal $\Sigma$ does not lead to product of univariate densities.
|
| 386 |
+
|
| 387 |
+
In the main text we showed their one-dimensional cases for simplicity. For experiments the multivariate densities were used for experiments. We now derive their gradients of log-likelihood with respect to parameters. The following equations will be used frequently (Petersen & Pedersen, 2012):
|
| 388 |
+
|
| 389 |
+
$$
|
| 390 |
+
\nabla_ {\boldsymbol {\mu}} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}) = - 2 \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}), \tag {10}
|
| 391 |
+
$$
|
| 392 |
+
|
| 393 |
+
$$
|
| 394 |
+
\nabla_ {\Sigma} \ln | \Sigma | = \left(\Sigma^ {\top}\right) ^ {- 1}, \tag {11}
|
| 395 |
+
$$
|
| 396 |
+
|
| 397 |
+
$$
|
| 398 |
+
\nabla_ {\Sigma} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}) = - \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}) (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1}. \tag {12}
|
| 399 |
+
$$
|
| 400 |
+
|
| 401 |
+
With these tools in hand, the following gradient expressions can be readily derived.
|
| 402 |
+
|
| 403 |
+
# A.1 GAUSSIAN
|
| 404 |
+
|
| 405 |
+
Being a member of the exponential family, the gradient of Gaussian log-likelihood allows straightforward derivation by using Eq. (10)-Eq. (12):
|
| 406 |
+
|
| 407 |
+
$$
|
| 408 |
+
\begin{array}{l} \ln \pi_ {s} (a) = - \frac {N}{2} \ln 2 \pi - \frac {1}{2} \ln | \Sigma | - \frac {1}{2} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}) \\ \Rightarrow \quad \nabla_ {\boldsymbol {\mu}} \ln \pi_ {s} (a) = - \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}), \tag {13} \\ \nabla_ {\Sigma} \ln \pi_ {s} (a) = - \frac {1}{2} \left(\Sigma^ {- 1} - \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}) (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1}\right). \\ \end{array}
|
| 409 |
+
$$
|
| 410 |
+
|
| 411 |
+
# A.2 STUDENT'S T
|
| 412 |
+
|
| 413 |
+
In addition to $\mu, \Sigma$ , Student's t policy has an additional learnable parameter degree of freedom $\nu$ . Recall that $\nu = 1$ corresponds to the Cauchy's distribution, while numerically with $\nu \geq 30$ it can be seen as a Gaussian distribution.
|
| 414 |
+
|
| 415 |
+
$$
|
| 416 |
+
\begin{array}{l} \ln \pi_ {s} (a) = \ln \Gamma \left(\frac {N + \nu}{2}\right) - \ln \Gamma \left(\frac {\nu}{2}\right) - \frac {N}{2} \ln \nu \pi - \frac {1}{2} \ln | \Sigma | - \frac {N + \nu}{2} \ln \left(1 + \frac {1}{\nu} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu})\right) \\ \Rightarrow \nabla_ {\boldsymbol {\mu}} \ln \pi_ {s} (a) = \frac {N + \nu}{\nu} \cdot \frac {\Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu})}{1 + \frac {1}{\nu} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu})}, \\ \nabla_ {\Sigma} \ln \pi_ {s} (a) = - \frac {1}{2} \left(\Sigma^ {- 1} - \frac {(N + \nu) \Sigma^ {- 1} (\pmb {a} - \pmb {\mu}) (\pmb {a} - \pmb {\mu}) ^ {\top} \Sigma^ {- 1}}{\nu + (\pmb {a} - \pmb {\mu}) ^ {\top} \Sigma^ {- 1} (\pmb {a} - \pmb {\mu})}\right), \\ \nabla_ {\nu} \ln \pi_ {s} (a) = \psi \left(\frac {N + \nu}{2}\right) - \psi \left(\frac {\nu}{2}\right) - \frac {N}{2 \nu} - \frac {N}{2} \ln \left(1 + \frac {1}{\nu} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu})\right) \\ + \frac {N + \nu}{2} \frac {\frac {1}{\nu} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu})}{\nu + (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu})}, \tag {14} \\ \end{array}
|
| 417 |
+
$$
|
| 418 |
+
|
| 419 |
+
where $\psi (\cdot)$ is the digamma function. For $\pmb{\mu}$ and $\boldsymbol{\Sigma}$ we again leveraged Eq.(10)-Eq.(12).
|
| 420 |
+
|
| 421 |
+
# A.3 $q$ -GAUSSIAN
|
| 422 |
+
|
| 423 |
+
Since we do not parametrize the entropic index $q$ , the gradients of log-likelihood with respect to $\mu$ , $\Sigma$ are the same for both heavy- and light-tailed $q$ -Gaussian. Therefore, we focus on the light-tailed case $q < 1$ and absorb into the constant $C$ the terms only related to $q$ .
|
| 424 |
+
|
| 425 |
+
$$
|
| 426 |
+
\begin{array}{l} \ln \pi_ {s} (a) = \ln C - \frac {1}{2} \ln | \Sigma | + \frac {1}{1 - q} \ln \left[ 1 - \frac {1 - q}{2} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}) \right] _ {+} \\ \Rightarrow \nabla_ {\pmb {\mu}} \ln \pi_ {s} (a) = \frac {1}{1 - q} \frac {(1 - q) \Sigma^ {- 1} (\pmb {a} - \pmb {\mu})}{\left[ 1 - \frac {1 - q}{2} (\pmb {a} - \pmb {\mu}) ^ {\top} \Sigma^ {- 1} (\pmb {a} - \pmb {\mu}) \right] _ {+}} = \frac {\Sigma^ {- 1} (\pmb {a} - \pmb {\mu})}{\exp_ {q} \left(- \frac {1}{2} (\pmb {a} - \pmb {\mu}) ^ {\top} \Sigma^ {- 1} (\pmb {a} - \pmb {\mu})\right) ^ {1 - q}}, \\ \end{array}
|
| 427 |
+
$$
|
| 428 |
+
|
| 429 |
+
$$
|
| 430 |
+
\nabla_ {\Sigma} \ln \pi_ {s} (a) = - \frac {1}{2} \left(\Sigma^ {- 1} - \frac {\Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu}) (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1}}{\exp_ {q} \left(- \frac {1}{2} (\boldsymbol {a} - \boldsymbol {\mu}) ^ {\top} \Sigma^ {- 1} (\boldsymbol {a} - \boldsymbol {\mu})\right) ^ {1 - q}}\right). \tag {15}
|
| 431 |
+
$$
|
| 432 |
+
|
| 433 |
+
It is interesting to see that the gradients of $q$ -Gaussian log-likelihood can be seen as the Gaussian counterparts scaled by the reciprocal of $\exp_q(\cdot)^{1 - q}$ . Since $\exp_q$ can take on zero values when $q < 1$ , the gradients as well as the log-likelihood function may be undefined outside the support. However, this does not happen for heavy-tailed $q$ -Gaussian $1 < q < 3$ .
|
| 434 |
+
|
| 435 |
+
To make these policies suitable for deep reinforcement learning, we discuss in Appendix D how to parametrize the policies using neural networks.
|
| 436 |
+
|
| 437 |
+
# B CONNECTION TO ENTROPY REGULARIZATION
|
| 438 |
+
|
| 439 |
+
The $q$ -exp family provides a general class of stochastic policies. But perhaps more importantly, they can be derived as solutions to the maximum Tsallis entropy principle (Suyari & Tsukada, 2005;
|
| 440 |
+
|
| 441 |
+
Furuichi, 2010), generalizing the maximum Shannon entropy principle (Jaynes, 1957; Grünwald & Dawid, 2004; Ziebart, 2010). We discuss both principles in equation 16.
|
| 442 |
+
|
| 443 |
+
For notational convenience, we define the inner product for any two functions $F_{1}, F_{2} \in \mathbb{R}^{|S| \times |\mathcal{A}|}$ over actions as $\langle F_1, F_2 \rangle \in \mathbb{R}^{|\mathcal{S}|}$ . We write $F_{s}$ to express the function's dependency $F$ on state $s$ . Often $F_{s} \in \mathbb{R}^{|\mathcal{A}|}$ , whenever its component is of concern, we denote it by $F_{s}(a)$ .
|
| 444 |
+
|
| 445 |
+
# B.1 BOLTZMANN-GIBBS REGULARIZATION
|
| 446 |
+
|
| 447 |
+
Consider a regularized policy as the solution to the following regularization problem:
|
| 448 |
+
|
| 449 |
+
$$
|
| 450 |
+
\pi_ {\Omega , s} = \underset {\pi_ {s} \in \Delta_ {\mathcal {A}}} {\arg \max } \left\langle \pi_ {s}, Q _ {s} \right\rangle - \Omega \left(\pi_ {s}\right), \tag {16}
|
| 451 |
+
$$
|
| 452 |
+
|
| 453 |
+
where $\Omega$ is a proper, lower semi-continuous, strictly convex function. We can absorb the regularization coefficient $\tau > 0$ into $\Omega$ by $\Omega := \tau \tilde{\Omega}$ . It is a classic result that at the limit $\tau \to 0$ the unregularized optimal action is recovered: $\lim_{\tau \to 0} \pi_{\tau \tilde{\Omega}, s} = \mathbb{1}\{a = a^{*}\}$ , i.e., $a^{*}$ that maximizes $Q_{s}$ .
|
| 454 |
+
|
| 455 |
+
One of the most well-studied regularizers is the negative Shannon entropy $\Omega(\pi_s) = \langle \pi_s, \ln \pi_s \rangle$ , which leads to the Boltzmann-Gibbs policy $\pi_{\mathrm{BG},s}(a) = \exp(Q_s(a) - Z_s)$ . Another popular choice is the KL divergence $\Omega(\pi_s) = \langle \pi_s, \ln \pi_s - \ln \mu_s \rangle$ for some reference policy $\mu_s$ . The regularized policy is $\pi_{\mathrm{KL},s}(a) = \mu_s(a) \exp(Q_s(a) - Z_s)$ . Notice that it is also a member of the exponential family by writing $\pi_{\mathrm{KL},s}(a) = \exp(Q_s(a) - Z_s + \ln \mu_s(a))$ .
|
| 456 |
+
|
| 457 |
+
# B.2 TSALLIS REGULARIZATION
|
| 458 |
+
|
| 459 |
+
Originally, the deformed logarithm function was introduced in the statistical physics to generalize the Shannon entropy by deforming the logarithm contained in it (Naudts, 2010). Consider replacing Shannon entropy in equation 16 with the negative Tsallis entropy $\Omega_q(\pi_s) = \frac{1}{q - 1} (\langle \mathbf{1},\pi_s^q\rangle -1)$ . It has been shown that $\Omega_q(\pi_s)$ leads to following regularized policy:
|
| 460 |
+
|
| 461 |
+
$$
|
| 462 |
+
\pi_ {\Omega_ {q, s}} (a) = \exp_ {2 - q} \left(Q _ {s} (a) - Z _ {q, s} ^ {\prime}\right). \tag {17}
|
| 463 |
+
$$
|
| 464 |
+
|
| 465 |
+
We see that when $q = 2$ , it recovers the sparsemax policy introduced in Section 3.2. As indicated by (Zhu et al., 2023), the effect of different $q \in (-\infty, 1)$ lies in the extent of thresholding. One can also consider regularization by the Tsallis KL divergence $D_{KL}^{q}(\pi_{s} \| \mu_{s}) \coloneqq \left\langle \pi_{s}, -\ln q \frac{\mu_{s}}{\pi_{s}} \right\rangle$ (Furuichi et al., 2004). Likewise to the KL case, $\mu$ is typically taken to be the last policy, in which the regularized policy is the product of two $q$ -exp functions.
|
| 466 |
+
|
| 467 |
+
It is worth noting that there are other regularization functionals that can induce $q$ -exp policies. One of the prominent examples is the $\alpha$ -entropy/divergence, which can be defined by simply letting $p = \frac{1}{q}$ in $\Omega_q(\pi_s)$ (Peters et al., 2019; Belousov & Peters, 2019). It is shown in (Xu et al., 2022; 2023) that when $\alpha = -1$ it induces the sparsemax policy. Therefore, $q$ -exp policies can also be viewed as solutions to the $\alpha$ regularization.
|
| 468 |
+
|
| 469 |
+
# B.3 TSALLIS ADVANTAGE WEIGHTED ACTOR CRITIC
|
| 470 |
+
|
| 471 |
+
An advantage of $q$ -exp (resp. exp) policies is it may improve the consistency of algorithms that explicitly mimics a $q$ -exp (resp. exp) policy. For example, Tsallis Advantage Weighted Actor Critic (TAWAC) proposed to use a light-tailed $q$ -exp policy for offline learning (Zhu et al., 2024). However, TAWAC was implemented with Gaussian, which amounts to approximating a light-tailed distribution using one with infinite support. Let $\pi_{\mathcal{D}}$ denote the empirical behavior policy and $\mathcal{D}$ the offline dataset. TAWAC minimizes the following actor loss, where we ignore the parametrization of value functions:
|
| 472 |
+
|
| 473 |
+
$$
|
| 474 |
+
\mathcal {L} (\phi) := \mathbb {E} _ {s \sim \mathcal {D}} \left[ D _ {K L} \left(\pi_ {\mathrm {T K L}, s} \| \pi_ {\phi , s}\right) \right] = \mathbb {E} _ {s \sim \mathcal {D}} _ {a \sim \pi_ {\mathcal {D}}} \left[ - \exp_ {q ^ {\prime}} \left(\frac {Q _ {s} (a) - V _ {s}}{\tau}\right) \ln \pi_ {\phi , s} (a) \right], \tag {18}
|
| 475 |
+
$$
|
| 476 |
+
|
| 477 |
+
where $\pi_{\mathrm{TKL},s}(a)\propto \pi_{\mathcal{D},s}(a)\exp_{q'}\big(\tau^{-1}\left(Q_s(a) - V_s\right)\big)$ denotes the Tsallis KL regularized policy. We can generalize TAWAC to online learning by simply changing the expectation to be w.r.t. arbitrary behavior policy. It is clear that depending on $q^{\prime}$ , choosing Gaussian as $\pi_{\phi}$ may incur inconsistency with the theory. A $q$ exp policy would be more suitable and could improve the performance. As evidenced by our experimental results, heavy tailed policies indeed further improve the performance of TAWAC by a large margin.
|
| 478 |
+
|
| 479 |
+
# C ACTOR LOSSES
|
| 480 |
+
|
| 481 |
+
To help understand when exp-family policies (resp. $q$ -exp) may be more preferable, we compare the actor loss functions of the algorithms in the experiment section.
|
| 482 |
+
|
| 483 |
+
# C.1 ONLINE ALGORITHMS
|
| 484 |
+
|
| 485 |
+
Soft Actor-Critic. SAC minimizes the following KL loss for the actor
|
| 486 |
+
|
| 487 |
+
$$
|
| 488 |
+
\mathcal {L} _ {\mathrm {S A C}} (\phi) := \mathbb {E} _ {s \sim \mathcal {B}} \left[ D _ {K L} (\pi_ {\phi} (\cdot | s) \| \pi_ {\mathrm {B G}} (\cdot | s)) \right] = \mathbb {E} _ {s \sim \mathcal {B}} \left[ \right. D _ {K L} \left( \right.\pi_ {\phi} (\cdot | s) \left\| \right. \frac {\exp \left(\tau^ {- 1} Q (s , \cdot)\right)}{Z _ {s}}\left. \right)\left. \right],
|
| 489 |
+
$$
|
| 490 |
+
|
| 491 |
+
where states are sampled from replay buffer $\mathcal{B}$ . The parametrized policy $\pi_{\phi}$ is projected to be close to the BG policy, therefore it is reasonable to expect that choosing $\pi_{\phi}$ from the exp-family may be more preferable. Depending on action values, BG can be skewed, multi-modal. Therefore, the symmetric, unimodal Gaussian may not be able to fully capture these characteristics.
|
| 492 |
+
|
| 493 |
+
Greedy Actor-Critic. GreedyAC maintains an additional proposal policy besides the actor. The proposal policy is responsible for producing actions from which the top $k\%$ of actions are used to update the actor. The proposal policy itself is updated similarly but with an entropy bonus encouraging exploration. To simplify notations, we use $I(s)$ to denote the set containing top $k\%$ actions given $s$ .
|
| 494 |
+
|
| 495 |
+
$$
|
| 496 |
+
\mathcal{L}_{\text{GreedyAC,prop}}(\phi):= \mathbb{E}_{\substack{s\sim \mathcal{B}\\ a\in I(s)}}\left[-\ln \pi_{\phi}(a|s) - \mathcal{H}\left(\pi_{\phi}(\cdot |s)\right)\right],
|
| 497 |
+
$$
|
| 498 |
+
|
| 499 |
+
$$
|
| 500 |
+
\mathcal{L}_{\text{GreedyAC, actor}}(\bar{\phi}):= \mathbb{E}_{\substack{s\sim \mathcal{B}\\ a\in I(s)}}\left[-\ln \pi_{\bar{\phi}}(a|s)\right].
|
| 501 |
+
$$
|
| 502 |
+
|
| 503 |
+
GreedyAC maximizes log-likelihood of the actor and proposal policy. These policies impose no constraints on the functional form of $\pi$ .
|
| 504 |
+
|
| 505 |
+
Online Tsallis AWAC. Online TAWAC is extended to condition on the behavior policy that collects experiences $\pi_{\mathrm{theory}}(a|s) \propto \pi_{\mathrm{behavior}}(a|s)\exp_q\left(\frac{Q(s,a) - V(s)}{\tau}\right)$ .
|
| 506 |
+
|
| 507 |
+
$$
|
| 508 |
+
\begin{array}{l} \mathcal {L} _ {\mathrm {T A W A C}} (\phi): = \mathbb {E} _ {s \sim \mathcal {B}} \left[ D _ {K L} \left(\pi_ {\text {t h e o r y}} (\cdot | s) \| \pi_ {\phi} (\cdot | s)\right) \right] \\ = \mathbb {E} _ {\stackrel {s \sim \mathcal {B}} {a \sim \pi_ {\bar {\phi}}}} \left[ - \exp_ {q} \left(\frac {Q (s , a) - V (s)}{\tau}\right) \ln \pi_ {\phi} (a | s) \right], \\ \end{array}
|
| 509 |
+
$$
|
| 510 |
+
|
| 511 |
+
where the condition $a \sim \pi_{\bar{\phi}}$ is because the target policy is used to sample actions. Since Tsallis AWAC explicitly minimizes KL loss to a $q$ -exp policy, which can be light-tailed/heavy-tailed depending on $q$ . Therefore, choosing a $q$ -exp $\pi_{\phi}$ could lead to better performance.
|
| 512 |
+
|
| 513 |
+
# C.2 OFFLINE ALGORITHMS
|
| 514 |
+
|
| 515 |
+
AWAC. Advantage Weighted Actor-Critic (AWAC) is the basis of many algorithms. AWAC minimizes the following actor loss:
|
| 516 |
+
|
| 517 |
+
$$
|
| 518 |
+
\mathcal{L}_{\mathrm{AWAC}}(\phi):= \mathbb{E}_{\substack{s\sim \mathcal{D}\\ a\sim \pi_{\mathcal{D}}}}\left[-\exp \left(\frac{Q(s,a) - V(s)}{\tau}\right)\ln \pi_{\phi}(a|s)\right],
|
| 519 |
+
$$
|
| 520 |
+
|
| 521 |
+
which is derived as the result of minimizing KL loss $D_{KL}(\pi_{\mathcal{D}}\| \pi_{\phi})$ and applying the trick in Eq. 18, i.e., $\pi_{\mathrm{theory}}(a|s)\propto \pi_{\mathcal{D}}(a|s)\exp \left(\frac{Q(s,a) - V(s)}{\tau}\right) = \exp \left(\frac{Q(s,a) - V(s)}{\tau} -\ln \pi_{\mathcal{D}}(a|s)\right)$ . However, the shape of this policy can be multi-modal and skewed depending on the values and $\pi_{\mathcal{D}}$ . It is visible from experimental results that Beta and Squashed Gaussian have similar performance.
|
| 522 |
+
|
| 523 |
+
IQL. In contrast to AWAC, Implicit Q-Learning (IQL) does not have an explicit actor learning procedure and uses $\mathcal{L}_{\mathrm{AWAC}}(\phi)$ as a means for policy extraction from the learned value functions. The exponential advantage function acts simply as weights. Therefore, IQL does not assume the functional form of $\pi_{\phi}$ .
|
| 524 |
+
|
| 525 |
+
InAC. In-Sample Actor-Critic (InAC) proposed to impose an in-sample constraint on the entropy-regularized BG policy. As such, the dependence on the behavior policy is moved into the exponential-advantage weighting function:
|
| 526 |
+
|
| 527 |
+
$$
|
| 528 |
+
\mathcal {L} _ {\mathrm {I n A C}} (\phi) := \mathbb {E} _ {\substack {s \sim \mathcal {D} \\ a \sim \pi_ {\mathcal {D}}}} \left[ - \exp \left(\frac {Q (s , a) - V (s)}{\tau} - \ln \pi_ {\mathcal {D}} (a | s)\right) \ln \pi_ {\phi} (a | s) \right].
|
| 529 |
+
$$
|
| 530 |
+
|
| 531 |
+

|
| 532 |
+
Figure 10: Beta distribution with $\alpha < 1, \beta < 1$ takes on a bowl shape rather than a bell shape. The shape can also be skewed as well as symmetric.
|
| 533 |
+
|
| 534 |
+
As a result, InAC is not as sensitive to the advantage weighting as AWAC does, which implies that InAC may favor an exp $\pi_{\phi}$ but less than AWAC.
|
| 535 |
+
|
| 536 |
+
Offline Tsallis AWAC. The offline case of Tsallis AWAC is same as the online case except the change of expectation:
|
| 537 |
+
|
| 538 |
+
$$
|
| 539 |
+
\mathcal {L} _ {\mathrm {T A W A C}} (\phi) := \mathbb {E} _ {\substack {s \sim \mathcal {D} \\ a \sim \pi_ {\mathcal {D}}}} \left[ - \exp_ {q} \left(\frac {Q (s , a) - V (s)}{\tau}\right) \ln \pi_ {\phi} (a | s) \right].
|
| 540 |
+
$$
|
| 541 |
+
|
| 542 |
+
Same with the online case, offline Tsallis AWAC may theoretically prefer a $q$ -exp $\pi_{\phi}$ .
|
| 543 |
+
|
| 544 |
+
TD3BC. In Appendix E we include additional results of TD3BC (Fujimoto & Gu, 2021), whose actor loss is obtained by simply augmenting the TD3 loss with a behavior cloning term:
|
| 545 |
+
|
| 546 |
+
$$
|
| 547 |
+
\mathcal{L}_{\mathrm{TD3BC}}(\phi):= \mathbb{E}_{\substack{s\sim \mathcal{D}\\ a\sim \pi_{\mathcal{D}}}}\left[\lambda Q(s,\pi (s)) - (\pi (s) - a)^{2}\right].
|
| 548 |
+
$$
|
| 549 |
+
|
| 550 |
+
The behavior cloning term is simply minimizing the $L_{2}$ distance to actions in the dataset. Though another interpretation by (Xiao et al., 2023) is that this term can be understood as applying KL regularization to Gaussian policy.
|
| 551 |
+
|
| 552 |
+
# D IMPLEMENTATION DETAILS
|
| 553 |
+
|
| 554 |
+
Details of our implementation is provided in this section. Specifically, we detail our design choices, hyperparameters and network architectures.
|
| 555 |
+
|
| 556 |
+
# D.1 POLICIES
|
| 557 |
+
|
| 558 |
+
We discuss how to parametrize Beta, Student's t and $q$ -Gaussian policies. Specifically, we parametrize $\alpha, \beta$ for Beta policy; $\mu, \Sigma$ for $q$ -Gaussian. In additional to location and scale, Student's t has an additional learnable parameter $\nu$ .
|
| 559 |
+
|
| 560 |
+
For Student's t policy, we initialized a base DOF $\nu_0 = 1$ and learn $\nu$ by the softmax function. The Student's t policy therefore always has DOF $\nu > 1$ , which is equivalent to starting as the Cauchy's distribution. For Beta policy, we similarly constrain $\alpha, \beta$ to be the output of softmax function plus 1. This is because when $\alpha < 1, \beta < 1$ the Beta policy takes on a bowl shape rather than a bell shape, see Figure 10. For Gaussian and $q$ -Gaussian policies, we follow the standard practice to parametrize mean by the tanh activation and scale by the log-standard transform.
|
| 561 |
+
|
| 562 |
+
In the tested off-policy/offline algorithms, it is necessary to evaluate log-probability for off-policy/offline actions stored in the buffer. For light-tailed $q$ -Gaussian this can cause numerical issues since the evaluated actions may fall outside the support, incurring $-\infty$ for log-probability. To avoid this issue, we sample a batch of on-policy actions from the $q$ -Gaussian and replace the out-of-support actions with the nearest action in the $L_{2}$ sense.
|
| 563 |
+
|
| 564 |
+
In our experiments, all environments had bounded action space. Squashed Gaussian and light-tailed q-Gaussian provide bounded output. However, Student's t, heavy tailed q-Gaussian and Gaussian have unbounded support. For these distributions, we clipped the sampled action to fit the action space of the task, without further modification on the density. The mean value is constrained using tanh function in distributions with unbounded support, except the standard Gaussian in offline learning.
|
| 565 |
+
|
| 566 |
+
# D.2 ONLINE EXPERIMENTS
|
| 567 |
+
|
| 568 |
+
We used three classical control environments in the continuous action setting: Mountain Car (Sutton & Barto, 2018), Pendulum (Degris et al., 2012) and Acrobot (Sutton & Barto, 2018). All episodes are truncated at 1000 time steps. In Mountain Car, the action is the force applied to the car in $[-1,1]$ , and the agent receives a reward of -1 at every time step. In Pendulum, the action is the torque applied to the base of the pendulum in $[-2,2]$ and the reward is defined by $r = -(\theta^2 + 0.1 * (\frac{\mathrm{d}\theta}{\mathrm{d}t})^2 + 0.001 * a^2)$ where $\theta$ denotes the angle, $\frac{\mathrm{d}\theta}{\mathrm{d}t}$ is the derivative of time and $a$ the torque applied. Finally, in acrobot, the action is the torque applied on the joint between two links in $[-1,1]$ and the agent receives a reward of -1 per time step.
|
| 569 |
+
|
| 570 |
+
Experiment settings: When sweeping different hyperparameter configurations, we pause the training every 10,000 time steps and then evaluate the learned policy by averaging the total reward over 3 episodes. However, when running the best hyperparameter configuration, we evaluate by freezing the policy every 1000 time steps and then computing the total reward obtained for 1 episode.
|
| 571 |
+
|
| 572 |
+
Parameter sweeping: We sweep the hyperparameters with 5 independent runs and then evaluate the run configuration for 30 seeds. We select the best hyperparameters based on the overall area under curve. When running the best hyperparameter configurations, we discard the original 5 seeds used for the hyperparameter sweep in order to avoid the bias caused by hyperparameter selection. Details regarding the fixed and swept hyperparameters are provided in Table 4.
|
| 573 |
+
|
| 574 |
+
Agent learning: We used a 2-layer network with 64 nodes on each layer and ReLU non-linearities. The batch size was 32. Agents used a target network for the critic, updated with polyak averaging with $\alpha = 0.01$ .
|
| 575 |
+
|
| 576 |
+
<table><tr><td>Hyperparameter</td><td>Value</td></tr><tr><td>Critic Learning rate</td><td>Swept in {1 × 10-2, 1 × 10-3, 1 × 10-4, 1 × 10-5}</td></tr><tr><td>Critic learning rate multiplier for actor</td><td>Swept in {0.1, 1, 10}</td></tr><tr><td>Temperature</td><td>Swept in {0.01, 0.1, 1}</td></tr><tr><td>Discount rate</td><td>0.99</td></tr><tr><td>Hidden size of Value network</td><td>64</td></tr><tr><td>Hidden layers of Value network</td><td>2</td></tr><tr><td>Hidden size of Policy network</td><td>64</td></tr><tr><td>Hidden layers of Policy network</td><td>2</td></tr><tr><td>Minibatch size</td><td>32</td></tr><tr><td>Adam.β1</td><td>0.9</td></tr><tr><td>Adam.β2</td><td>0.999</td></tr><tr><td>Number of seeds for sweeping</td><td>10</td></tr><tr><td>Number of seeds for the best setting</td><td>30</td></tr></table>
|
| 577 |
+
|
| 578 |
+
Table 4: Default hyperparameters and sweeping choices for online experiments.
|
| 579 |
+
|
| 580 |
+
# D.3 OFFLINE EXPERIMENTS
|
| 581 |
+
|
| 582 |
+
We use the MuJoCo suite from D4RL (Apache-2/CC-BY licence) (Fu et al., 2020) for offline experiments. The D4RL offline datasets all contain 1 million samples generated by a partially trained SAC agent. The name reflects the level of the trained agent used to collect the transitions. The Medium dataset contains samples generated by a medium-level (trained halfway) SAC policy. Medium-expert mixes the trajectories from the Medium level and that produced by an expert agent. Medium-replay consists of samples in the replay buffer during training until the policy reaches the
|
| 583 |
+
|
| 584 |
+
medium level of performance. In summary, the ranking of levels is Medium-expert $>$ Medium $>$ Medium-replay.
|
| 585 |
+
|
| 586 |
+
Experiment settings: We conducted the offline experiment using 9 datasets provided in D4RL: halfcheetah-medium-expert, halfcheetah-medium, halfcheetah-medium-replay, hopper-medium-expert, hopper-medium, hopper-medium-replay, walker2d-medium-expert, walker2d-medium, and walker2d-medium-replay. We run 5 agents: TAWAC, AWAC, IQL, InAC, and TD3BC. The results of TD3BC are posted in the appendix. For each agent, we tested 5 distributions: Gaussian, Squashed Gaussian, Beta, Student's t, and Heavy-tailed $q$ -Gaussian. As offline learning algorithms usually require a distribution covering the whole action space, Light-tailed q-Gaussian is not considered in offline learning experiments. Each agent was trained for $1 \times 10^{6}$ steps. The policy was evaluated every 1000 steps. The score was averaged over 5 rollouts in the real environment; each had 1000 steps.
|
| 587 |
+
|
| 588 |
+
Parameter sweeping: All results shown in the paper were generated by the best parameter setting after sweeping. We list the parameter setting in Table 5. Learning rate and temperature in TAWAC + medium datasets were swept as the experiments in their publication did not include the medium dataset. The best learning rates are reported in Table 6, and the temperatures are listed in Table 7.
|
| 589 |
+
|
| 590 |
+
<table><tr><td>Hyperparameter</td><td>Value</td></tr><tr><td>Learning rate</td><td>Swept in {3 × 10-3, 1 × 10-3, 3 × 10-4, 1 × 10-4}See the best setting in Table 6</td></tr><tr><td>Temperature</td><td>Same as the number reported in the publication of each algorithm.Except in TAWAC + medium datasets, the value was swept in {1.0, 0.5, 0.01}.See the setting in Table 7</td></tr><tr><td>IQL Expectile</td><td>0.7</td></tr><tr><td>Discount rate</td><td>0.99</td></tr><tr><td>Hidden size of Value network</td><td>256</td></tr><tr><td>Hidden layers of Value network</td><td>2</td></tr><tr><td>Hidden size of Policy network</td><td>256</td></tr><tr><td>Hidden layers of Policy network</td><td>2</td></tr><tr><td>Minibatch size</td><td>256</td></tr><tr><td>Adam.β1</td><td>0.9</td></tr><tr><td>Adam.β2</td><td>0.99</td></tr><tr><td>Number of seeds for sweeping</td><td>5</td></tr><tr><td>Number of seeds for the best setting</td><td>10</td></tr></table>
|
| 591 |
+
|
| 592 |
+
Table 5: Default hyperparameters and sweeping choices for offline experiments.
|
| 593 |
+
|
| 594 |
+
Agent learning: We used a 2-layer network with 256 nodes on each layer. The batch size was 256. Agents used a target network for the critic, updated with polyak averaging with $\alpha = 0.005$ . The discount rate was set to 0.99.
|
| 595 |
+
|
| 596 |
+
Sampling. To give an intuition for sampling time, we drew $10^{5}$ samples from a randomly initialized actor on two environments: HalfCheetah with 17-dim state and 6-dim action. The sparse $q$ -Gaussian, heavy-tailed $q$ -Gaussian and Gaussian respectively cost (107.12, 72.09, 27.94) seconds. We confirmed that the methods in Alg. 1 were on the same magnitude to the Gaussian, but the sparse $q$ -Gaussian cost more than the heavy-tailed due to more computation to produce low-variance samples. This is further confirmed by Hopper with 11-dim state, 3-dim action, where they costed (98.13, 65.17, 25.17) seconds.
|
| 597 |
+
|
| 598 |
+
# E FURTHER RESULTS
|
| 599 |
+
|
| 600 |
+
Figure 11 shows the Manhattan plot of Soft-Actor-Critic (SAC) with all swept hyperparameters on the online classic control environments. Student-t and Gaussian both seem to have a similar behavior to hyperparameters. Although there is no definitive winner here, we can safely conclude that if we have a problem where Gaussian works, Student-t is very likely to work. Additionally, give the results
|
| 601 |
+
|
| 602 |
+
<table><tr><td>Dataset</td><td>Distribution</td><td>TAWAC</td><td>AWAC</td><td>IQL</td><td>InAC</td><td>TD3BC</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Heavy-Tailed q-Gaussian</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0003</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Squashed Gaussian</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.001</td><td>0.0003</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Gaussian</td><td>0.0003</td><td>0.0001</td><td>0.0003</td><td>0.0003</td><td>0.0003</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Beta</td><td>0.001</td><td>0.0003</td><td>0.001</td><td>0.001</td><td>0.001</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Student's t</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.0003</td><td>0.001</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Heavy-Tailed q-Gaussian</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Squashed Gaussian</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.001</td><td>0.003</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Gaussian</td><td>0.001</td><td>0.0001</td><td>0.0003</td><td>0.001</td><td>0.001</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Beta</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.001</td><td>0.001</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Student's t</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.0003</td><td>0.003</td></tr><tr><td>HalfCheetah-Medium</td><td>Heavy-Tailed q-Gaussian</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0003</td></tr><tr><td>HalfCheetah-Medium</td><td>Squashed Gaussian</td><td>0.001</td><td>0.0003</td><td>0.001</td><td>0.001</td><td>0.001</td></tr><tr><td>HalfCheetah-Medium</td><td>Gaussian</td><td>0.0003</td><td>0.0001</td><td>0.0003</td><td>0.001</td><td>0.001</td></tr><tr><td>HalfCheetah-Medium</td><td>Beta</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0003</td></tr><tr><td>HalfCheetah-Medium</td><td>Student's t</td><td>0.001</td><td>0.0003</td><td>0.001</td><td>0.001</td><td>0.001</td></tr><tr><td>Hopper-Medium-Expert</td><td>Heavy-Tailed q-Gaussian</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr><tr><td>Hopper-Medium-Expert</td><td>Squashed Gaussian</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr><tr><td>Hopper-Medium-Expert</td><td>Gaussian</td><td>0.0003</td><td>0.0003</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr><tr><td>Hopper-Medium-Expert</td><td>Beta</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.003</td><td>0.003</td></tr><tr><td>Hopper-Medium-Expert</td><td>Student's t</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.0003</td><td>0.001</td></tr><tr><td>Hopper-Medium</td><td>Heavy-Tailed q-Gaussian</td><td>0.003</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr><tr><td>Hopper-Medium</td><td>Squashed Gaussian</td><td>0.001</td><td>0.0003</td><td>0.001</td><td>0.0003</td><td>0.0001</td></tr><tr><td>Hopper-Medium</td><td>Gaussian</td><td>0.001</td><td>0.001</td><td>0.0003</td><td>0.001</td><td>0.001</td></tr><tr><td>Hopper-Medium</td><td>Beta</td><td>0.001</td><td>0.001</td><td>0.003</td><td>0.001</td><td>0.001</td></tr><tr><td>Hopper-Medium</td><td>Student's t</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Heavy-Tailed q-Gaussian</td><td>0.0003</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0003</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Squashed Gaussian</td><td>0.001</td><td>0.001</td><td>0.0003</td><td>0.001</td><td>0.0003</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Gaussian</td><td>0.0003</td><td>0.0001</td><td>0.0003</td><td>0.001</td><td>0.001</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Beta</td><td>0.001</td><td>0.0003</td><td>0.001</td><td>0.001</td><td>0.001</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Student's t</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.0003</td><td>0.0003</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Heavy-Tailed q-Gaussian</td><td>0.0003</td><td>0.0003</td><td>0.003</td><td>0.0003</td><td>0.001</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Squashed Gaussian</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.001</td><td>0.001</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Gaussian</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.001</td><td>0.003</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Beta</td><td>0.001</td><td>0.0003</td><td>0.0003</td><td>0.001</td><td>0.0003</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Student's t</td><td>0.0003</td><td>0.0003</td><td>0.0003</td><td>0.001</td><td>0.001</td></tr><tr><td>Walker2d-Medium</td><td>Heavy-Tailed q-Gaussian</td><td>0.003</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr><tr><td>Walker2d-Medium</td><td>Squashed Gaussian</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr><tr><td>Walker2d-Medium</td><td>Gaussian</td><td>0.001</td><td>0.0001</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr><tr><td>Walker2d-Medium</td><td>Beta</td><td>0.001</td><td>0.0003</td><td>0.003</td><td>0.001</td><td>0.0001</td></tr><tr><td>Walker2d-Medium</td><td>Student's t</td><td>0.001</td><td>0.0003</td><td>0.001</td><td>0.001</td><td>0.0001</td></tr></table>
|
| 603 |
+
|
| 604 |
+
Table 6: Best learning rates for offline experiments.
|
| 605 |
+
|
| 606 |
+
in the main text, Student's t more likely to perform better given the same hyperparameter sweeping range.
|
| 607 |
+
|
| 608 |
+
Our additional offline results include all algorithm-policy combination on all environments. We also include TD3BC (Fujimoto & Gu, 2021) for comparison. Figure 12 shows the overall comparison with TD3. It is clear that Squashed Gaussian performs well and Beta can show slight improvements in some cases. Though it is visible that no much difference is shown except on the Medium-Replay data. We conjecture that the better performance of Squashed Gaussian and Beta could be due to the TD3BC behavior cloning loss. It is encouraged that policy closely approximates the actions from the dataset. Therefore, policies like Beta that can concentrate faster may be more advantageous.
|
| 609 |
+
|
| 610 |
+
Figures 13 to 15 display boxplots of the combinations on environments of each level. Consistent observations to that in the main text can be drawn from these plots, but with the exception that in Figure 14 the environment-wise best combination is TAWAC + Student's t. TD3BC does not exhibit strong sensitivity to the choice of policy.
|
| 611 |
+
|
| 612 |
+

|
| 613 |
+
|
| 614 |
+

|
| 615 |
+
Figure 11: Manhattan plot of Soft-Actor-Critic (SAC) with all swept hyperparameters on the online classic control environments. The rewards on the y-axis are averaged over the final $10\%$ of the total steps. Since different policy parameterizations have different numbers of runs in the sweep, we oversampled the smaller sweeps with replacement. From the plot of Acrobot, we observe that Student-t and Gaussian both respond similarly to changing hyper-parameters. Therefore, we hypothesize that if we have an environment where the Gaussian policy works, Student-t is also very likely to work. Additionally, from Figure 5 (left), we know that student-t is $75\%$ more likely to outperform the Gaussian given the same hyperparameter sweeping range.
|
| 616 |
+
|
| 617 |
+

|
| 618 |
+
|
| 619 |
+
<table><tr><td>Dataset</td><td>Distribution</td><td>TAWAC</td><td>AWAC</td><td>IQL</td><td>InAC</td><td>TD3BC</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Heavy-Tailed q-Gaussian</td><td>1.00</td><td>1.00</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Squashed Gaussian</td><td>1.00</td><td>1.00</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Gaussian</td><td>1.00</td><td>1.00</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Beta</td><td>1.00</td><td>1.00</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Expert</td><td>Student's t</td><td>1.00</td><td>1.00</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Heavy-Tailed q-Gaussian</td><td>0.01</td><td>1.00</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Squashed Gaussian</td><td>0.01</td><td>1.00</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Gaussian</td><td>0.01</td><td>1.00</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Beta</td><td>0.01</td><td>1.00</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium-Replay</td><td>Student's t</td><td>0.01</td><td>1.00</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium</td><td>Heavy-Tailed q-Gaussian</td><td>0.01</td><td>0.50</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium</td><td>Squashed Gaussian</td><td>0.01</td><td>0.50</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium</td><td>Gaussian</td><td>0.01</td><td>0.50</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium</td><td>Beta</td><td>0.01</td><td>0.50</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>HalfCheetah-Medium</td><td>Student's t</td><td>0.01</td><td>0.50</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>Hopper-Medium-Expert</td><td>Heavy-Tailed q-Gaussian</td><td>0.50</td><td>1.00</td><td>0.33</td><td>0.01</td><td>2.50</td></tr><tr><td>Hopper-Medium-Expert</td><td>Squashed Gaussian</td><td>0.50</td><td>1.00</td><td>0.33</td><td>0.01</td><td>2.50</td></tr><tr><td>Hopper-Medium-Expert</td><td>Gaussian</td><td>0.50</td><td>1.00</td><td>0.33</td><td>0.01</td><td>2.50</td></tr><tr><td>Hopper-Medium-Expert</td><td>Beta</td><td>0.50</td><td>1.00</td><td>0.33</td><td>0.01</td><td>2.50</td></tr><tr><td>Hopper-Medium-Expert</td><td>Student's t</td><td>0.50</td><td>1.00</td><td>0.33</td><td>0.01</td><td>2.50</td></tr><tr><td>Hopper-Medium-Replay</td><td>Heavy-Tailed q-Gaussian</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Hopper-Medium-Replay</td><td>Squashed Gaussian</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Hopper-Medium-Replay</td><td>Gaussian</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Hopper-Medium-Replay</td><td>Beta</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Hopper-Medium-Replay</td><td>Student's t</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Hopper-Medium</td><td>Heavy-Tailed q-Gaussian</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Hopper-Medium</td><td>Squashed Gaussian</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Hopper-Medium</td><td>Gaussian</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Hopper-Medium</td><td>Beta</td><td>0.50</td><td>0.50</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Hopper-Medium</td><td>Student's t</td><td>0.01</td><td>0.50</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Heavy-Tailed q-Gaussian</td><td>0.01</td><td>0.10</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Squashed Gaussian</td><td>0.01</td><td>0.10</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Gaussian</td><td>0.01</td><td>0.10</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Beta</td><td>0.01</td><td>0.10</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Expert</td><td>Student's t</td><td>0.01</td><td>0.10</td><td>0.33</td><td>0.10</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Heavy-Tailed q-Gaussian</td><td>0.50</td><td>0.10</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Squashed Gaussian</td><td>0.50</td><td>0.10</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Gaussian</td><td>0.50</td><td>0.10</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Beta</td><td>0.50</td><td>0.10</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Walker2d-Medium-Replay</td><td>Student's t</td><td>0.50</td><td>0.10</td><td>0.33</td><td>0.50</td><td>2.50</td></tr><tr><td>Walker2d-Medium</td><td>Heavy-Tailed q-Gaussian</td><td>0.01</td><td>0.10</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>Walker2d-Medium</td><td>Squashed Gaussian</td><td>1.00</td><td>0.10</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>Walker2d-Medium</td><td>Gaussian</td><td>1.00</td><td>0.10</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>Walker2d-Medium</td><td>Beta</td><td>1.00</td><td>0.10</td><td>0.33</td><td>0.33</td><td>2.50</td></tr><tr><td>Walker2d-Medium</td><td>Student's t</td><td>0.01</td><td>0.10</td><td>0.33</td><td>0.33</td><td>2.50</td></tr></table>
|
| 620 |
+
|
| 621 |
+
Table 7: Temperature settings for offline experiments.
|
| 622 |
+
|
| 623 |
+
Table 8 examined the accumulated probabilities that fell on each It can be seen that the Student's t and the Gaussian tended to increasingly put more densities on the boundaries. This is in sheer contrast to the heavy-tailed $q$ -Gaussian that put the majority of probability density within the boundary. This may explain the better performance of TAWAC + heavy-tailed $q$ -Gaussian.
|
| 624 |
+
|
| 625 |
+
Lastly, for all of the results shown above, their learning curves are shown in Figures 16 to 20. We smoothed the curves with window size 10 for better visualization.
|
| 626 |
+
|
| 627 |
+
<table><tr><td>Policy\# Updates</td><td>0</td><td>100</td><td>200</td><td>300</td><td>400</td></tr><tr><td>Heavy-tailed q-Gaussian</td><td>(24.39, 13.19)</td><td>(45.23, 2.36)</td><td>(45.49, 2.04)</td><td>(45.52, 1.98)</td><td>(45.54, 1.89)</td></tr><tr><td>Student's t</td><td>(148.43, 71.23)</td><td>(198.89, 45.30)</td><td>(205.04, 37.04)</td><td>(207.15, 32.96)</td><td>(207.00, 33.84)</td></tr><tr><td>Gaussian</td><td>(190.96, 65.89)</td><td>(206.92, 53.05)</td><td>(211.77, 39.08)</td><td>(213.57, 33.71)</td><td>(214.39, 31.26)</td></tr></table>
|
| 628 |
+
|
| 629 |
+
Table 8: The summation of probability density accumulated on the left and the right edge in Figure 9 before clipping. Each pair indicates the left and right edge. The Student's t and the Gaussian increasing put more densities on the edges as compared to the heavy-tailed $q$ -Gaussian.
|
| 630 |
+
|
| 631 |
+

|
| 632 |
+
Figure 12: Relative improvement to the Squashed Gaussian policy, averaged over environments. Black vertical lines at the top indicate one standard error. For TD3BC, Beta policy outperforms the Squashed Gaussian on Medium-Expert and Medium-Replay.
|
| 633 |
+
|
| 634 |
+

|
| 635 |
+
Figure 13: Normalized scores on Medium-Expert level datasets. The black bar shows the median. Boxes and whiskers show $1 \times$ and $1.5 \times$ interquartile ranges, respectively. Fliers are not plotted for uncluttered visualization. Environment-wise, InAC with heavy-tailed $q$ -Gaussian is the top performer. Algorithm-wise, heavy-tailed or/and Student's t can improve or match the performance of the Squashed Gaussian except AWAC. With TD3BC no significant difference between policies is observed.
|
| 636 |
+
|
| 637 |
+

|
| 638 |
+
Figure 14: Normalized scores on Medium level datasets. The black bar shows the median. Boxes and whiskers show $1 \times$ and $1.5 \times$ interquartile ranges, respectively. Fliers are not plotted for uncluttered visualization. Environment-wise, InAC with heavy-tailed $q$ -Gaussian is the top performer. Algorithm-wise, heavy-tailed $q$ -Gaussian has observed significant performance drop with AWAC and InAC on Hopper and Walker2d. With TD3BC no significant difference between policies is observed.
|
| 639 |
+
|
| 640 |
+

|
| 641 |
+
Figure 15: Normalized scores on Medium-Replay level datasets. The black bar shows the median. Boxes and whiskers show $1 \times$ and $1.5 \times$ interquartile ranges, respectively. Fliers are not plotted for uncluttered visualization. Environment-wise, TAWAC + heavy-tailed $q$ -Gaussian is the best performer. Algorithm-wise, Student's t is stable and can match or improve on the performance of (Squashed) Gaussian.
|
| 642 |
+
|
| 643 |
+

|
| 644 |
+
TAWAC Learning Curves
|
| 645 |
+
|
| 646 |
+

|
| 647 |
+
Figure 16: TAWAC learning curves in all datasets. Columns show different environments and rows are the levels of the environments. x-axis denotes the number of steps $(\times 10^{4})$ , and y-axis is the normalized score. Each curve was smoothed with window size 10.
|
| 648 |
+
Figure 17: AWAC learning curves in all datasets. Columns show different environments and rows are the levels of the environments. x-axis denotes the number of steps $(\times 10^{4})$ , and y-axis is the normalized score. Each curve was smoothed with window size 10.
|
| 649 |
+
|
| 650 |
+

|
| 651 |
+
Figure 18: IQL learning curves in all datasets. Columns show different environments and rows are the levels of the environments. x-axis denotes the number of steps $(\times 10^{4})$ , and y-axis is the normalized score. Each curve was smoothed with window size 10.
|
| 652 |
+
|
| 653 |
+

|
| 654 |
+
Figure 19: InAC learning curves in all datasets. Columns show different environments and rows are the levels of the environments. x-axis denotes the number of steps $(\times 10^{4})$ , and y-axis is the normalized score. Each curve was smoothed with window size 10.
|
| 655 |
+
|
| 656 |
+

|
| 657 |
+
TD3BC Learning Curves
|
| 658 |
+
Figure 20: TD3+BC learning curves in all datasets. Columns show different environments and rows are the levels of the environments. x-axis denotes the number of steps $(\times 10^{4})$ , and y-axis is the normalized score. Each curve was smoothed with window size 10.
|
2025/$q$-exponential family for policy optimization/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:363ae5f3695a3c924e2a909478a24f08b5f8f429694910f99f388ef56adc623c
|
| 3 |
+
size 2237441
|
2025/$q$-exponential family for policy optimization/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/03952676-7c33-4a19-867c-762469c73779_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/03952676-7c33-4a19-867c-762469c73779_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/03952676-7c33-4a19-867c-762469c73779_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94d8c598f208f5f4975e57e6986f780ebe600af1609063166f6fc2930542bfd8
|
| 3 |
+
size 1352375
|
2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f048095a2d9e11edc5f6cbcdeb9b5b1496d21caea1328716ce785922b99ae66
|
| 3 |
+
size 1969672
|
2025/(Mis)Fitting Scaling Laws_ A Survey of Scaling Law Fitting Techniques in Deep Learning/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025/3D StreetUnveiler with Semantic-aware 2DGS - a simple baseline/3f23c088-7d80-4cce-b138-008f4d7a0b93_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|