Add Batch 225a4551-326a-4878-99b6-0bbcba5dfa58 data
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2023/Towards convergence to Nash equilibria in two-team zero-sum games/fca07eac-706b-41aa-8183-c888af669e12_content_list.json +1795 -0
- 2023/Towards convergence to Nash equilibria in two-team zero-sum games/fca07eac-706b-41aa-8183-c888af669e12_model.json +0 -0
- 2023/Towards convergence to Nash equilibria in two-team zero-sum games/fca07eac-706b-41aa-8183-c888af669e12_origin.pdf +3 -0
- 2023/Towards convergence to Nash equilibria in two-team zero-sum games/full.md +345 -0
- 2023/Towards convergence to Nash equilibria in two-team zero-sum games/images.zip +3 -0
- 2023/Towards convergence to Nash equilibria in two-team zero-sum games/layout.json +0 -0
- 2023/Towards the Generalization of Contrastive Self-Supervised Learning/ba91a1ec-18bd-462a-93e5-dee94f15572a_content_list.json +0 -0
- 2023/Towards the Generalization of Contrastive Self-Supervised Learning/ba91a1ec-18bd-462a-93e5-dee94f15572a_model.json +0 -0
- 2023/Towards the Generalization of Contrastive Self-Supervised Learning/ba91a1ec-18bd-462a-93e5-dee94f15572a_origin.pdf +3 -0
- 2023/Towards the Generalization of Contrastive Self-Supervised Learning/full.md +0 -0
- 2023/Towards the Generalization of Contrastive Self-Supervised Learning/images.zip +3 -0
- 2023/Towards the Generalization of Contrastive Self-Supervised Learning/layout.json +0 -0
- 2023/Trading Information between Latents in Hierarchical Variational Autoencoders/1aa93880-e4b4-43df-a6da-767b4c6119cd_content_list.json +0 -0
- 2023/Trading Information between Latents in Hierarchical Variational Autoencoders/1aa93880-e4b4-43df-a6da-767b4c6119cd_model.json +0 -0
- 2023/Trading Information between Latents in Hierarchical Variational Autoencoders/1aa93880-e4b4-43df-a6da-767b4c6119cd_origin.pdf +3 -0
- 2023/Trading Information between Latents in Hierarchical Variational Autoencoders/full.md +465 -0
- 2023/Trading Information between Latents in Hierarchical Variational Autoencoders/images.zip +3 -0
- 2023/Trading Information between Latents in Hierarchical Variational Autoencoders/layout.json +0 -0
- 2023/Trainability Preserving Neural Pruning/e056486d-504c-467f-8ebf-fb592f5e5aa1_content_list.json +0 -0
- 2023/Trainability Preserving Neural Pruning/e056486d-504c-467f-8ebf-fb592f5e5aa1_model.json +0 -0
- 2023/Trainability Preserving Neural Pruning/e056486d-504c-467f-8ebf-fb592f5e5aa1_origin.pdf +3 -0
- 2023/Trainability Preserving Neural Pruning/full.md +501 -0
- 2023/Trainability Preserving Neural Pruning/images.zip +3 -0
- 2023/Trainability Preserving Neural Pruning/layout.json +0 -0
- 2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/76908929-7dc3-4c81-8727-8f6a4180bb3b_content_list.json +0 -0
- 2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/76908929-7dc3-4c81-8727-8f6a4180bb3b_model.json +0 -0
- 2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/76908929-7dc3-4c81-8727-8f6a4180bb3b_origin.pdf +3 -0
- 2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/full.md +401 -0
- 2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/images.zip +3 -0
- 2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/layout.json +0 -0
- 2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/5a46c4a4-3176-4d02-a09b-ba17d6b20cfc_content_list.json +0 -0
- 2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/5a46c4a4-3176-4d02-a09b-ba17d6b20cfc_model.json +0 -0
- 2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/5a46c4a4-3176-4d02-a09b-ba17d6b20cfc_origin.pdf +3 -0
- 2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/full.md +495 -0
- 2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/images.zip +3 -0
- 2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/layout.json +0 -0
- 2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/8496ecb1-f6c2-4cfc-afce-97312b8c1375_content_list.json +2124 -0
- 2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/8496ecb1-f6c2-4cfc-afce-97312b8c1375_model.json +0 -0
- 2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/8496ecb1-f6c2-4cfc-afce-97312b8c1375_origin.pdf +3 -0
- 2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/full.md +369 -0
- 2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/images.zip +3 -0
- 2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/layout.json +0 -0
- 2023/Transfer Learning with Deep Tabular Models/1dc89b4f-36aa-4331-9391-4480874346bd_content_list.json +0 -0
- 2023/Transfer Learning with Deep Tabular Models/1dc89b4f-36aa-4331-9391-4480874346bd_model.json +0 -0
- 2023/Transfer Learning with Deep Tabular Models/1dc89b4f-36aa-4331-9391-4480874346bd_origin.pdf +3 -0
- 2023/Transfer Learning with Deep Tabular Models/full.md +0 -0
- 2023/Transfer Learning with Deep Tabular Models/images.zip +3 -0
- 2023/Transfer Learning with Deep Tabular Models/layout.json +0 -0
- 2023/Transferable Unlearnable Examples/c25345b7-d8be-4162-9c61-be3dc45172d4_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -7480,3 +7480,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 7480 |
2023/Towards[[:space:]]Understanding[[:space:]]Why[[:space:]]Mask[[:space:]]Reconstruction[[:space:]]Pretraining[[:space:]]Helps[[:space:]]in[[:space:]]Downstream[[:space:]]Tasks/d542260f-2731-43c6-8f57-bcc406da8904_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7481 |
2023/Towards[[:space:]]Understanding[[:space:]]and[[:space:]]Mitigating[[:space:]]Dimensional[[:space:]]Collapse[[:space:]]in[[:space:]]Heterogeneous[[:space:]]Federated[[:space:]]Learning/029f82af-4344-40ce-9dbc-ca25b9e3611d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7482 |
2023/Towards[[:space:]]a[[:space:]]Unified[[:space:]]Theoretical[[:space:]]Understanding[[:space:]]of[[:space:]]Non-contrastive[[:space:]]Learning[[:space:]]via[[:space:]]Rank[[:space:]]Differential[[:space:]]Mechanism/9f7237d3-eef7-444f-b5f7-71b5d961d635_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7480 |
2023/Towards[[:space:]]Understanding[[:space:]]Why[[:space:]]Mask[[:space:]]Reconstruction[[:space:]]Pretraining[[:space:]]Helps[[:space:]]in[[:space:]]Downstream[[:space:]]Tasks/d542260f-2731-43c6-8f57-bcc406da8904_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7481 |
2023/Towards[[:space:]]Understanding[[:space:]]and[[:space:]]Mitigating[[:space:]]Dimensional[[:space:]]Collapse[[:space:]]in[[:space:]]Heterogeneous[[:space:]]Federated[[:space:]]Learning/029f82af-4344-40ce-9dbc-ca25b9e3611d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7482 |
2023/Towards[[:space:]]a[[:space:]]Unified[[:space:]]Theoretical[[:space:]]Understanding[[:space:]]of[[:space:]]Non-contrastive[[:space:]]Learning[[:space:]]via[[:space:]]Rank[[:space:]]Differential[[:space:]]Mechanism/9f7237d3-eef7-444f-b5f7-71b5d961d635_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7483 |
+
2023/Towards[[:space:]]convergence[[:space:]]to[[:space:]]Nash[[:space:]]equilibria[[:space:]]in[[:space:]]two-team[[:space:]]zero-sum[[:space:]]games/fca07eac-706b-41aa-8183-c888af669e12_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7484 |
+
2023/Towards[[:space:]]the[[:space:]]Generalization[[:space:]]of[[:space:]]Contrastive[[:space:]]Self-Supervised[[:space:]]Learning/ba91a1ec-18bd-462a-93e5-dee94f15572a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7485 |
+
2023/Trading[[:space:]]Information[[:space:]]between[[:space:]]Latents[[:space:]]in[[:space:]]Hierarchical[[:space:]]Variational[[:space:]]Autoencoders/1aa93880-e4b4-43df-a6da-767b4c6119cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7486 |
+
2023/Trainability[[:space:]]Preserving[[:space:]]Neural[[:space:]]Pruning/e056486d-504c-467f-8ebf-fb592f5e5aa1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7487 |
+
2023/Trainable[[:space:]]Weight[[:space:]]Averaging_[[:space:]]Efficient[[:space:]]Training[[:space:]]by[[:space:]]Optimizing[[:space:]]Historical[[:space:]]Solutions/76908929-7dc3-4c81-8727-8f6a4180bb3b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7488 |
+
2023/Training-Free[[:space:]]Structured[[:space:]]Diffusion[[:space:]]Guidance[[:space:]]for[[:space:]]Compositional[[:space:]]Text-to-Image[[:space:]]Synthesis/5a46c4a4-3176-4d02-a09b-ba17d6b20cfc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7489 |
+
2023/TranSpeech_[[:space:]]Speech-to-Speech[[:space:]]Translation[[:space:]]With[[:space:]]Bilateral[[:space:]]Perturbation/8496ecb1-f6c2-4cfc-afce-97312b8c1375_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7490 |
+
2023/Transfer[[:space:]]Learning[[:space:]]with[[:space:]]Deep[[:space:]]Tabular[[:space:]]Models/1dc89b4f-36aa-4331-9391-4480874346bd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7491 |
+
2023/Transferable[[:space:]]Unlearnable[[:space:]]Examples/c25345b7-d8be-4162-9c61-be3dc45172d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7492 |
+
2023/Transformer[[:space:]]Meets[[:space:]]Boundary[[:space:]]Value[[:space:]]Inverse[[:space:]]Problems/1ec099b3-f201-4d72-b365-9568bef0f665_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7493 |
+
2023/Transformer-Patcher_[[:space:]]One[[:space:]]Mistake[[:space:]]Worth[[:space:]]One[[:space:]]Neuron/770f6992-5610-4dd1-87f5-5cad41ec10de_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7494 |
+
2023/Transformer-based[[:space:]]World[[:space:]]Models[[:space:]]Are[[:space:]]Happy[[:space:]]With[[:space:]]100k[[:space:]]Interactions/17e8a818-64b5-455c-ab5c-86f73da8f2b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7495 |
+
2023/Transformer-based[[:space:]]model[[:space:]]for[[:space:]]symbolic[[:space:]]regression[[:space:]]via[[:space:]]joint[[:space:]]supervised[[:space:]]learning/1597b2fc-905d-4ae9-bb88-5a1559738de4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7496 |
+
2023/Treeformer_[[:space:]]Dense[[:space:]]Gradient[[:space:]]Trees[[:space:]]for[[:space:]]Efficient[[:space:]]Attention[[:space:]]Computation/f3cb1ec7-ce2c-4556-9b1e-fd67c5f8b3c4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7497 |
+
2023/TrojText_[[:space:]]Test-time[[:space:]]Invisible[[:space:]]Textual[[:space:]]Trojan[[:space:]]Insertion/6edcbe90-614d-4559-ae1f-a9a54ba1e652_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7498 |
+
2023/Truncated[[:space:]]Diffusion[[:space:]]Probabilistic[[:space:]]Models[[:space:]]and[[:space:]]Diffusion-based[[:space:]]Adversarial[[:space:]]Auto-Encoders/c36693f9-9299-4bac-99dc-c6176c21e93a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7499 |
+
2023/Truthful[[:space:]]Self-Play/7faf5be5-98f6-441a-bc20-a78c9114ad92_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7500 |
+
2023/Tuning[[:space:]]Frequency[[:space:]]Bias[[:space:]]in[[:space:]]Neural[[:space:]]Network[[:space:]]Training[[:space:]]with[[:space:]]Nonuniform[[:space:]]Data/832fc415-b6ec-40b4-bec9-431e44e5a35a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7501 |
+
2023/TypeT5_[[:space:]]Seq2seq[[:space:]]Type[[:space:]]Inference[[:space:]]using[[:space:]]Static[[:space:]]Analysis/8f9d0ff5-b137-4e5f-80da-1d5125258b09_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7502 |
+
2023/UL2_[[:space:]]Unifying[[:space:]]Language[[:space:]]Learning[[:space:]]Paradigms/91c48397-a1cb-4bfc-b0d2-b0f845f52ca1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7503 |
+
2023/Unbiased[[:space:]]Stochastic[[:space:]]Proximal[[:space:]]Solver[[:space:]]for[[:space:]]Graph[[:space:]]Neural[[:space:]]Networks[[:space:]]with[[:space:]]Equilibrium[[:space:]]States/459142da-a291-4c4d-a15a-919a0049565f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7504 |
+
2023/Unbiased[[:space:]]Supervised[[:space:]]Contrastive[[:space:]]Learning/b12901a3-9b75-426f-8a3a-ea01687e032e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7505 |
+
2023/Understanding[[:space:]]DDPM[[:space:]]Latent[[:space:]]Codes[[:space:]]Through[[:space:]]Optimal[[:space:]]Transport/097558b3-3719-4906-a548-215887fb2663_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7506 |
+
2023/Understanding[[:space:]]Edge-of-Stability[[:space:]]Training[[:space:]]Dynamics[[:space:]]with[[:space:]]a[[:space:]]Minimalist[[:space:]]Example/efab9929-a1a3-49e0-9e92-036b893c011f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7507 |
+
2023/Understanding[[:space:]]Embodied[[:space:]]Reference[[:space:]]with[[:space:]]Touch-Line[[:space:]]Transformer/3279c5c8-e4c3-4c8b-9657-0690e3a4afd0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7508 |
+
2023/Understanding[[:space:]]Influence[[:space:]]Functions[[:space:]]and[[:space:]]Datamodels[[:space:]]via[[:space:]]Harmonic[[:space:]]Analysis/81002f1e-35eb-4069-8c01-f2855e2c89d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7509 |
+
2023/Understanding[[:space:]]Neural[[:space:]]Coding[[:space:]]on[[:space:]]Latent[[:space:]]Manifolds[[:space:]]by[[:space:]]Sharing[[:space:]]Features[[:space:]]and[[:space:]]Dividing[[:space:]]Ensembles/fc1bf7c9-04bb-402f-8d39-f28fc5230907_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7510 |
+
2023/Understanding[[:space:]]The[[:space:]]Robustness[[:space:]]of[[:space:]]Self-supervised[[:space:]]Learning[[:space:]]Through[[:space:]]Topic[[:space:]]Modeling/3ecf79aa-a1e3-4f6f-993c-65ad330ad71a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7511 |
+
2023/Understanding[[:space:]]Train-Validation[[:space:]]Split[[:space:]]in[[:space:]]Meta-Learning[[:space:]]with[[:space:]]Neural[[:space:]]Networks/90032d0e-dd57-436e-b703-1152bc666f8a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7512 |
+
2023/Understanding[[:space:]]Why[[:space:]]Generalized[[:space:]]Reweighting[[:space:]]Does[[:space:]]Not[[:space:]]Improve[[:space:]]Over[[:space:]]ERM/6995370d-e775-4b9f-a371-0c0512012986_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7513 |
+
2023/Understanding[[:space:]]Zero-shot[[:space:]]Adversarial[[:space:]]Robustness[[:space:]]for[[:space:]]Large-Scale[[:space:]]Models/3a4ab02f-7799-4db5-873b-0ae3dea8c915_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7514 |
+
2023/Understanding[[:space:]]new[[:space:]]tasks[[:space:]]through[[:space:]]the[[:space:]]lens[[:space:]]of[[:space:]]training[[:space:]]data[[:space:]]via[[:space:]]exponential[[:space:]]tilting/afecc5cc-8c03-4938-9651-5c291fd3f262_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7515 |
+
2023/Understanding[[:space:]]the[[:space:]]Covariance[[:space:]]Structure[[:space:]]of[[:space:]]Convolutional[[:space:]]Filters/c850f5ed-3c1a-4d1a-818b-78549f281820_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7516 |
+
2023/Understanding[[:space:]]the[[:space:]]Generalization[[:space:]]of[[:space:]]Adam[[:space:]]in[[:space:]]Learning[[:space:]]Neural[[:space:]]Networks[[:space:]]with[[:space:]]Proper[[:space:]]Regularization/4aeee7ef-5596-462f-8e63-74aa3d5edbdd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7517 |
+
2023/Understanding[[:space:]]the[[:space:]]Role[[:space:]]of[[:space:]]Nonlinearity[[:space:]]in[[:space:]]Training[[:space:]]Dynamics[[:space:]]of[[:space:]]Contrastive[[:space:]]Learning/2fdc99a1-9c10-4555-9faf-6ebaa1b4fc93_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7518 |
+
2023/Understanding[[:space:]]weight-magnitude[[:space:]]hyperparameters[[:space:]]in[[:space:]]training[[:space:]]binary[[:space:]]networks/92075308-f185-485b-81a9-3f5f15aeb0f2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7519 |
+
2023/Uni-Mol_[[:space:]]A[[:space:]]Universal[[:space:]]3D[[:space:]]Molecular[[:space:]]Representation[[:space:]]Learning[[:space:]]Framework/3b3b1b0a-134b-4b5a-bc2c-79f762a15927_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7520 |
+
2023/UniKGQA_[[:space:]]Unified[[:space:]]Retrieval[[:space:]]and[[:space:]]Reasoning[[:space:]]for[[:space:]]Solving[[:space:]]Multi-hop[[:space:]]Question[[:space:]]Answering[[:space:]]Over[[:space:]]Knowledge[[:space:]]Graph/ea9ebe35-2c12-414b-a15a-da140a0f36fe_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7521 |
+
2023/UniMax_[[:space:]]Fairer[[:space:]]and[[:space:]]More[[:space:]]Effective[[:space:]]Language[[:space:]]Sampling[[:space:]]for[[:space:]]Large-Scale[[:space:]]Multilingual[[:space:]]Pretraining/9ca96cd6-17d8-4511-926b-2f12c31f5631_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7522 |
+
2023/Unicom_[[:space:]]Universal[[:space:]]and[[:space:]]Compact[[:space:]]Representation[[:space:]]Learning[[:space:]]for[[:space:]]Image[[:space:]]Retrieval/4955bd28-9154-4d84-b28c-d82d4581f134_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7523 |
+
2023/Unified[[:space:]]Detoxifying[[:space:]]and[[:space:]]Debiasing[[:space:]]in[[:space:]]Language[[:space:]]Generation[[:space:]]via[[:space:]]Inference-time[[:space:]]Adaptive[[:space:]]Optimization/dd4178fd-0397-46c3-a674-f0911dee7a47_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7524 |
+
2023/Unified[[:space:]]Discrete[[:space:]]Diffusion[[:space:]]for[[:space:]]Simultaneous[[:space:]]Vision-Language[[:space:]]Generation/5d1ed28d-1395-4da0-b0a3-b0d9282f14c7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7525 |
+
2023/Uniform-in-time[[:space:]]propagation[[:space:]]of[[:space:]]chaos[[:space:]]for[[:space:]]the[[:space:]]mean-field[[:space:]]gradient[[:space:]]Langevin[[:space:]]dynamics/b6abd121-b0f2-4aef-b6c7-b6d9f99821db_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7526 |
+
2023/Universal[[:space:]]Vision-Language[[:space:]]Dense[[:space:]]Retrieval_[[:space:]]Learning[[:space:]]A[[:space:]]Unified[[:space:]]Representation[[:space:]]Space[[:space:]]for[[:space:]]Multi-Modal[[:space:]]Retrieval/f6564f5e-5153-4c47-838f-716b1196152f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7527 |
+
2023/Unsupervised[[:space:]]3D[[:space:]]Object[[:space:]]Learning[[:space:]]through[[:space:]]Neuron[[:space:]]Activity[[:space:]]aware[[:space:]]Plasticity/3cfcbe29-5f6c-4564-a56b-c441496f38b5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7528 |
+
2023/Unsupervised[[:space:]]Learning[[:space:]]for[[:space:]]Combinatorial[[:space:]]Optimization[[:space:]]Needs[[:space:]]Meta[[:space:]]Learning/4705db6e-4633-46e4-b41e-c44cdecc0dcb_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7529 |
+
2023/Unsupervised[[:space:]]Manifold[[:space:]]Alignment[[:space:]]with[[:space:]]Joint[[:space:]]Multidimensional[[:space:]]Scaling/d09921a4-d573-4e75-98e6-f1fc56e18dec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7530 |
+
2023/Unsupervised[[:space:]]visualization[[:space:]]of[[:space:]]image[[:space:]]datasets[[:space:]]using[[:space:]]contrastive[[:space:]]learning/a5a94bb0-b8b2-447f-ab06-7e5dbbc4c64e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7531 |
+
2023/Unveiling[[:space:]]the[[:space:]]sampling[[:space:]]density[[:space:]]in[[:space:]]non-uniform[[:space:]]geometric[[:space:]]graphs/b1e92d22-4265-41ae-85e9-a6fcc5a6033f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7532 |
+
2023/User-Interactive[[:space:]]Offline[[:space:]]Reinforcement[[:space:]]Learning/d29f2046-101a-4b0e-842a-6f087051ef6d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7533 |
+
2023/Using[[:space:]]Both[[:space:]]Demonstrations[[:space:]]and[[:space:]]Language[[:space:]]Instructions[[:space:]]to[[:space:]]Efficiently[[:space:]]Learn[[:space:]]Robotic[[:space:]]Tasks/1281f587-ca51-45af-b3a6-6619c12b4b70_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7534 |
+
2023/Valid[[:space:]]P-Value[[:space:]]for[[:space:]]Deep[[:space:]]Learning-driven[[:space:]]Salient[[:space:]]Region/32c6d578-509d-4bec-b0b0-233fc9acca11_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7535 |
+
2023/Value[[:space:]]Memory[[:space:]]Graph_[[:space:]]A[[:space:]]Graph-Structured[[:space:]]World[[:space:]]Model[[:space:]]for[[:space:]]Offline[[:space:]]Reinforcement[[:space:]]Learning/98619cee-0b12-48cd-9910-2a4ae393abbb_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7536 |
+
2023/Variance[[:space:]]Reduction[[:space:]]is[[:space:]]an[[:space:]]Antidote[[:space:]]to[[:space:]]Byzantines_[[:space:]]Better[[:space:]]Rates,[[:space:]]Weaker[[:space:]]Assumptions[[:space:]]and[[:space:]]Communication[[:space:]]Compression[[:space:]]as[[:space:]]a[[:space:]]Cherry[[:space:]]on[[:space:]]the[[:space:]]Top/2fceac1f-244a-402e-80ab-2c730f2d3fe0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7537 |
+
2023/Variance-Aware[[:space:]]Sparse[[:space:]]Linear[[:space:]]Bandits/c65aec82-84d9-4795-9886-3fa1eb90de0a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7538 |
+
2023/Variational[[:space:]]Information[[:space:]]Pursuit[[:space:]]for[[:space:]]Interpretable[[:space:]]Predictions/121fc387-bf6e-433b-b4ca-4b7ad174f7d0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7539 |
+
2023/Variational[[:space:]]Latent[[:space:]]Branching[[:space:]]Model[[:space:]]for[[:space:]]Off-Policy[[:space:]]Evaluation/5bb638f8-a59c-44f9-9831-13d4f94f6e3a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7540 |
+
2023/Verifying[[:space:]]the[[:space:]]Union[[:space:]]of[[:space:]]Manifolds[[:space:]]Hypothesis[[:space:]]for[[:space:]]Image[[:space:]]Data/3a3bc33b-e1fc-46fa-bac6-46a3a581f46c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7541 |
+
2023/Versatile[[:space:]]Neural[[:space:]]Processes[[:space:]]for[[:space:]]Learning[[:space:]]Implicit[[:space:]]Neural[[:space:]]Representations/f42c4882-f9c1-4fe0-b94b-c1e5a14c3675_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7542 |
+
2023/Video[[:space:]]Scene[[:space:]]Graph[[:space:]]Generation[[:space:]]from[[:space:]]Single-Frame[[:space:]]Weak[[:space:]]Supervision/ac55b638-216a-4d75-af9b-7cae02d8b545_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7543 |
+
2023/ViewCo_[[:space:]]Discovering[[:space:]]Text-Supervised[[:space:]]Segmentation[[:space:]]Masks[[:space:]]via[[:space:]]Multi-View[[:space:]]Semantic[[:space:]]Consistency/10dd817a-33bc-4ab3-ae1a-55942ee20ae4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7544 |
+
2023/Visual[[:space:]]Imitation[[:space:]]Learning[[:space:]]with[[:space:]]Patch[[:space:]]Rewards/7ff3492f-7d60-4240-bcc1-cecd00ae1b72_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7545 |
+
2023/Visually-Augmented[[:space:]]Language[[:space:]]Modeling/fea11c19-f3c1-4765-aaca-6a4d7a8ff1ea_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7546 |
+
2023/VoGE_[[:space:]]A[[:space:]]Differentiable[[:space:]]Volume[[:space:]]Renderer[[:space:]]using[[:space:]]Gaussian[[:space:]]Ellipsoids[[:space:]]for[[:space:]]Analysis-by-Synthesis/babfe1c6-0687-43cf-a603-79217b17846f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2023/Towards convergence to Nash equilibria in two-team zero-sum games/fca07eac-706b-41aa-8183-c888af669e12_content_list.json
ADDED
|
@@ -0,0 +1,1795 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "TOWARDS CONVERGENCE TO NASH EQUILIBRIA IN TWO-TEAM ZERO-SUM GAMES",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
171,
|
| 8 |
+
99,
|
| 9 |
+
823,
|
| 10 |
+
146
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Fivos Kalogiannis \nUC Irvine",
|
| 17 |
+
"bbox": [
|
| 18 |
+
181,
|
| 19 |
+
169,
|
| 20 |
+
312,
|
| 21 |
+
196
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Ioannis Panageas \nUC Irvine",
|
| 28 |
+
"bbox": [
|
| 29 |
+
351,
|
| 30 |
+
169,
|
| 31 |
+
478,
|
| 32 |
+
196
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Emmanouil V. Vlatakis-Gkaragkounis \nColumbia University",
|
| 39 |
+
"bbox": [
|
| 40 |
+
517,
|
| 41 |
+
169,
|
| 42 |
+
787,
|
| 43 |
+
198
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "ABSTRACT",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
450,
|
| 53 |
+
234,
|
| 54 |
+
547,
|
| 55 |
+
250
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Contemporary applications of machine learning in two-team e-sports and the superior expressivity of multi-agent generative adversarial networks raise important and overlooked theoretical questions regarding optimization in two-team games. Formally, two-team zero-sum games are defined as multi-player games where players are split into two competing sets of agents, each experiencing a utility identical to that of their teammates and opposite to that of the opposing team. We focus on the solution concept of Nash equilibria (NE). We first show that computing NE for this class of games is hard for the complexity class CLS. To further examine the capabilities of online learning algorithms in games with full-information feedback, we propose a benchmark of a simple -yet nontrivial-- family of such games. These games do not enjoy the properties used to prove convergence for relevant algorithms. In particular, we use a dynamical systems perspective to demonstrate that gradient descent-ascent, its optimistic variant, optimistic multiplicative weights update, and extra gradient fail to converge (even locally) to a Nash equilibrium. On a brighter note, we propose a first-order method that leverages control theory techniques and under some conditions enjoys last-iterate local convergence to a Nash equilibrium. We also believe our proposed method is of independent interest for general min-max optimization.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
228,
|
| 64 |
+
267,
|
| 65 |
+
767,
|
| 66 |
+
518
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "1 INTRODUCTION",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
173,
|
| 76 |
+
547,
|
| 77 |
+
336,
|
| 78 |
+
561
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Online learning shares an enduring relationship with game theory that has a very early onset dating back to the analysis of fictitious play by (Robinson, 1951) and Blackwell's approachability theorem (Blackwell, 1956). A key question within this context is whether self-interested agents can arrive at a game-theoretic equilibrium in an independent and decentralized manner with only limited feedback from their environment. Learning dynamics that converge to different notions of equilibria are known to exist for two-player zero-sum games (Robinson, 1951; Arora et al., 2012; Daskalakis et al., 2011), potential games (Monderer & Shapley, 1996), near-potential games (Anagnostides et al., 2022b), socially concave games (Golowich et al., 2020), and extensive form games (Anagnostides et al., 2022a). We try to push the boundary further and explore whether equilibria—in particular, Nash equilibria—can be reached by agents that follow decentralized learning algorithms in two-team zero-sum games.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
169,
|
| 87 |
+
579,
|
| 88 |
+
826,
|
| 89 |
+
733
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "Team competition has played a central role in the development of game theory (Marschak, 1955; von Stengel & Koller, 1997; Bacharach, 1999; Gold, 2005), economics (Marschak, 1955; Gottinger, 1974), and evolutionary biology (Nagylaki, 1993; Nowak et al., 2004). Recently, competition among teams has attracted the interest of the machine learning community due to the advances that multi-agent systems have accomplished: e.g., multi-GAN's (Hoang et al., 2017; Hardy et al., 2019) for generative tasks, adversarial regression with multiple learners (Tong et al., 2018), or AI agents competing in e-sports (e.g., CTF (Jaderberg et al., 2019) or Starcraft (Vinyals et al., 2019)) as well as card games (Moravčík et al., 2017; Brown & Sandholm, 2018; Bowling et al., 2015).",
|
| 96 |
+
"bbox": [
|
| 97 |
+
169,
|
| 98 |
+
739,
|
| 99 |
+
826,
|
| 100 |
+
851
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "Our class of games. We turn our attention to two-team zero-sum games a quite general class of min-max optimization problems that include bilinear games and a wide range of nonconvex-nonconcave games as well. In this class of games, players fall into two teams of size $n$ , $m$ and submit their own randomized strategy vectors independently. We note that the games that we focus on are",
|
| 107 |
+
"bbox": [
|
| 108 |
+
169,
|
| 109 |
+
868,
|
| 110 |
+
823,
|
| 111 |
+
925
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "header",
|
| 117 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 118 |
+
"bbox": [
|
| 119 |
+
171,
|
| 120 |
+
32,
|
| 121 |
+
478,
|
| 122 |
+
47
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "page_number",
|
| 128 |
+
"text": "1",
|
| 129 |
+
"bbox": [
|
| 130 |
+
493,
|
| 131 |
+
948,
|
| 132 |
+
504,
|
| 133 |
+
959
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "not restricted to team games in the narrow sense of the term \"team\" as we use it in sports, games, and so on; the players play independently and do not follow a central coordinating authority. Rather, for the purpose of this paper, teams are constituted by agents that merely enjoy the same utility function. This might already hint that the solution concept that we engage with is the Nash equilibrium (NE). Another class of games that is captured by this framework is the class of adversarial potential games. In these games, the condition that all players of the same team experience the same utility is weakened as long as there exists a potential function that can track differences in the utility of each player when they unilaterally deviate from a given strategy profile (see Appendix A.2 for a formal definition). A similar setting has been studied in the context of nonatomic games (Babaioff et al., 2009).",
|
| 140 |
+
"bbox": [
|
| 141 |
+
169,
|
| 142 |
+
104,
|
| 143 |
+
826,
|
| 144 |
+
243
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 1
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "text",
|
| 150 |
+
"text": "Positive duality gap. In two-player zero-sum games, i.e., $n = m = 1$ , min-max (respectively max-min) strategies are guaranteed to form a Nash equilibrium due to Von Neumann's minmax theorem (Von Neumann, 1928); ultimately endowing the game with a unique value. The challenges arise for the case of $n, m > 1$ ; Schulman & Vazirani (2019b) prove that, in general, two-team games do not have a unique value. They do so by presenting a family of team games with a positive duality gap, together with bounds concerning this gap. These bounds quantify the effect of exchanging the order of commitment to their strategy either between the teams as a whole or the individual players.",
|
| 151 |
+
"bbox": [
|
| 152 |
+
169,
|
| 153 |
+
258,
|
| 154 |
+
823,
|
| 155 |
+
357
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 1
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"text": "Solution concept. In this work, we examine the solution concept of Nash equilibrium (NE). Under a Nash equilibrium, no player can improve their utility by unilaterally deviating. The main downside of a NE for team games is the fact that such an equilibrium can be arbitrarily suboptimal for the team (Basilico et al., 2017a).",
|
| 162 |
+
"bbox": [
|
| 163 |
+
169,
|
| 164 |
+
372,
|
| 165 |
+
823,
|
| 166 |
+
429
|
| 167 |
+
],
|
| 168 |
+
"page_idx": 1
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"text": "This is one of the reasons that the solution concept of team-maxmin equilibrium with a coordination device (TMECor) has dominated contemporary literature of team games, especially in regard to applications (Farina et al., 2018; Zhang et al., 2020; Cacciamani et al., 2021). Under a TMECor, players are allowed to communicate before the game and decide upon combinations of strategies to be played during the game using an external source of randomness.",
|
| 173 |
+
"bbox": [
|
| 174 |
+
169,
|
| 175 |
+
435,
|
| 176 |
+
823,
|
| 177 |
+
505
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 1
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "The undeniable advantage of a TMECor is that the expected utility of the team under it is greater than the expected utility under a NE (Basilico et al., 2017a). Nevertheless, this favorable property of TMECor can by no means render the study of NE irrelevant. In fact, the study of NE is always of independent interest within the literature of algorithmic game theory —especially questions corresponding to computational complexity. Moreover, there exist settings in which ex ante coordination cannot be expected to be possible or even sensible; for example in (i) environments where the external sources of randomness are unreliable or nonexistent or visible to the adversarial team, (ii) games in which players cannot know in advance who they share a common utility with, (iii) adversarial potential games. These games can model naturally occurring settings such as (a) security games with multiple uncoordinated defenders versus multiple similarly uncoordinated attackers, (b) the load balancing “game” between telecommunication service providers trying to minimize the maximum delay of service experienced by their customers versus the service users that try to individually utilize the maximum amount of broadband possible, and (c) the weak selection model of evolutionary biology where a species as a whole is a team, the genes of its population are the players and the alleles of each gene are in turn the actions of a player; the allele frequencies are independent across genes (Nagylaki, 1993; Nowak et al., 2004; Mehta et al., 2015).",
|
| 184 |
+
"bbox": [
|
| 185 |
+
169,
|
| 186 |
+
511,
|
| 187 |
+
826,
|
| 188 |
+
734
|
| 189 |
+
],
|
| 190 |
+
"page_idx": 1
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"text": "Concluding, we could not possibly argue for a single correct solution concept for two-team games; there is no silver bullet. In contrast, one has to assess which is the most fitting based on the constraints of a given setting. A Nash equilibrium is a cornerstone concept of game theory and examining its properties in different games is always important.",
|
| 195 |
+
"bbox": [
|
| 196 |
+
169,
|
| 197 |
+
739,
|
| 198 |
+
823,
|
| 199 |
+
797
|
| 200 |
+
],
|
| 201 |
+
"page_idx": 1
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"text": "The optimization point of view. We focus on the solution concept of NE and we first note that computing local-NE in general nonconvex-nonconcave games is PPAD-complete (Daskalakis et al., 2009; 2021). Thus, all well-celebrated online learning, first-order methods like gradient descent-ascent (Lin et al., 2020; Daskalakis & Panageas, 2019), its optimistic (Popov, 1980; Chiang et al., 2012; Sridharan & Tewari, 2010), optimistic multiplicative weights update (Sridharan, 2012), and the extra gradient method (Korpelevich, 1976) would require an exponential number of steps in the parameters of the problem in order to compute an approximate NE under the oracle optimization model of (Nemirovskij & Yudin, 1983). Additionally, in the continuous time regime, similar classes",
|
| 206 |
+
"bbox": [
|
| 207 |
+
169,
|
| 208 |
+
811,
|
| 209 |
+
826,
|
| 210 |
+
925
|
| 211 |
+
],
|
| 212 |
+
"page_idx": 1
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "header",
|
| 216 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 217 |
+
"bbox": [
|
| 218 |
+
171,
|
| 219 |
+
32,
|
| 220 |
+
478,
|
| 221 |
+
47
|
| 222 |
+
],
|
| 223 |
+
"page_idx": 1
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "page_number",
|
| 227 |
+
"text": "2",
|
| 228 |
+
"bbox": [
|
| 229 |
+
493,
|
| 230 |
+
946,
|
| 231 |
+
504,
|
| 232 |
+
959
|
| 233 |
+
],
|
| 234 |
+
"page_idx": 1
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"text": "of games exhibit behaviors antithetical to convergence like cycling, recurrence, or chaos (Vlatakis-Gkaragkounis et al., 2019). Second, even if a regret notion within the context of team-competition could be defined, no-regret dynamics are guaranteed to converge only to the set of coarse correlated equilibria (CCE) (Fudenberg, 1991; Hannan, 2016). CCE is a weaker equilibrium notion whose solutions could potentially be exclusively supported on strictly dominated strategies, even for simple symmetric two-player games (See also (Viossat & Zapechelyuk, 2013)).",
|
| 239 |
+
"bbox": [
|
| 240 |
+
169,
|
| 241 |
+
103,
|
| 242 |
+
823,
|
| 243 |
+
188
|
| 244 |
+
],
|
| 245 |
+
"page_idx": 2
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"text": "Surely, the aforementioned intractability remarks for the general case of nonconvex-nonconcave min-max problems provide a significant insight. But, they cannot per se address the issue of computing Nash equilibria when the game is equipped with a particular structure, i.e., that of two-team zero-sum games. In fact, our paper addresses the following questions:",
|
| 250 |
+
"bbox": [
|
| 251 |
+
169,
|
| 252 |
+
194,
|
| 253 |
+
826,
|
| 254 |
+
251
|
| 255 |
+
],
|
| 256 |
+
"page_idx": 2
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"text": "Can we get provable convergence guarantees to NE of decentralized first-order methods in two-team zero-sum games?",
|
| 261 |
+
"bbox": [
|
| 262 |
+
200,
|
| 263 |
+
260,
|
| 264 |
+
799,
|
| 265 |
+
289
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 2
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"text": "Our results. First, with regards to computational complexity, we prove that computing an approximate (and possibly mixed) NE in two-team zero-sum games is CLS-hard (Theorem 3.1); i.e., it is computationally harder than finding pure NE in a congestion game or computing an approximate fixed point of gradient descent.",
|
| 272 |
+
"bbox": [
|
| 273 |
+
169,
|
| 274 |
+
303,
|
| 275 |
+
823,
|
| 276 |
+
359
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 2
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"text": "Second, regarding online learning for equilibrium computation, we prove that a number of established, decentralized, first-order methods are not fit for the purpose and fail to converge even asymptotically. Specifically, we present a simple -yet nontrivial family of two-team zero-sum games (with each team consisting of two players) where projected gradient descent-ascent (GDA), optimistic gradient descent-ascent (OGDA), optimistic multiplicative weights update (OMWU), and the extra gradient method (EG) fail to locally converge to a mixed NE (Theorem 3.3). More broadly, in the case of GDA in nondegenerate team games with unique mixed NE, one could acquire an even stronger result for any high-dimensional configuration of actions and players (Theorem 3.2). To the best of our knowledge, the described family of games is the first-of-its-kind in which all these methods provably fail to converge at the same time.",
|
| 283 |
+
"bbox": [
|
| 284 |
+
169,
|
| 285 |
+
364,
|
| 286 |
+
826,
|
| 287 |
+
505
|
| 288 |
+
],
|
| 289 |
+
"page_idx": 2
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"text": "Third, we propose a novel first-order method inspired by adaptive control (Bazanella et al., 1997; Hassouneh et al., 2004). In particular, we use a technique that manages to stabilize unstable fixed points of a dynamical system without prior knowledge of their position and without introducing new ones. It is important to note that this method is a modification of GDA that uses a stabilizing feedback which maintains the decentralized nature of GDA.",
|
| 294 |
+
"bbox": [
|
| 295 |
+
169,
|
| 296 |
+
511,
|
| 297 |
+
825,
|
| 298 |
+
580
|
| 299 |
+
],
|
| 300 |
+
"page_idx": 2
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"text": "Finally, in Section 4 we provide a series of experiments in simple two-team zero-sum GAN's. We also show that multi-GAN architectures achieve better performance than single-agent ones, relative to the network capacity when they are trained on synthetic or real-world datasets like CIFAR10.",
|
| 305 |
+
"bbox": [
|
| 306 |
+
169,
|
| 307 |
+
588,
|
| 308 |
+
823,
|
| 309 |
+
631
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 2
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"text": "Existing algorithms for NE in multiplayer games. The focus of the present paper is examining algorithms for the setting of repeated games (Cesa-Bianchi & Lugosi, 1999, Chapter 7). If we do not restrict ourselves to this setting, there are numerous centralized algorithms (Lipton et al., 2003; Berg & Sandholm, 2017) and heuristics (Gemp et al., 2021) that solve the problem of computing Nash equilibria in general multi-player games.",
|
| 316 |
+
"bbox": [
|
| 317 |
+
169,
|
| 318 |
+
643,
|
| 319 |
+
826,
|
| 320 |
+
715
|
| 321 |
+
],
|
| 322 |
+
"page_idx": 2
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"text": "2 PRELIMINARIES",
|
| 327 |
+
"text_level": 1,
|
| 328 |
+
"bbox": [
|
| 329 |
+
171,
|
| 330 |
+
734,
|
| 331 |
+
341,
|
| 332 |
+
750
|
| 333 |
+
],
|
| 334 |
+
"page_idx": 2
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"type": "text",
|
| 338 |
+
"text": "Our setting. A two-team game in normal form is defined as a tuple $\\Gamma(\\mathcal{N},\\mathcal{M},\\mathcal{A},\\mathcal{B},\\{U_A,U_B\\})$ . The tuple is defined by",
|
| 339 |
+
"bbox": [
|
| 340 |
+
169,
|
| 341 |
+
763,
|
| 342 |
+
823,
|
| 343 |
+
792
|
| 344 |
+
],
|
| 345 |
+
"page_idx": 2
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"type": "list",
|
| 349 |
+
"sub_type": "text",
|
| 350 |
+
"list_items": [
|
| 351 |
+
"(i) a finite set of $n = |\\mathcal{N}|$ players belonging to team $A$ , as well as a finite set of $m = |\\mathcal{M}|$ players belonging to team $B$ ;",
|
| 352 |
+
"(ii) a finite set of actions (or pure strategies) $\\mathcal{A}_i = \\{\\alpha_1, \\ldots, \\alpha_{n_i}\\}$ per player $i \\in \\mathcal{N}$ ; where $\\mathcal{A} := \\prod_i \\mathcal{A}_i$ denotes the ensemble of all possible action profiles of team $A$ , and respectively, a finite set of actions (or pure strategies) $\\mathcal{B}_i = \\{\\beta_1, \\ldots, \\beta_{n_i}\\}$ per player $i \\in \\mathcal{M}$ , where $\\mathcal{B} := \\prod_i \\mathcal{B}_i$ .",
|
| 353 |
+
"(iii) a utility function for team $A$ , $U_A: \\mathcal{A} \\times \\mathcal{B} \\to \\mathbb{R}$ (resp. $U_B: \\mathcal{A} \\times \\mathcal{B} \\to \\mathbb{R}$ for team $B$ )"
|
| 354 |
+
],
|
| 355 |
+
"bbox": [
|
| 356 |
+
173,
|
| 357 |
+
794,
|
| 358 |
+
823,
|
| 359 |
+
893
|
| 360 |
+
],
|
| 361 |
+
"page_idx": 2
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"type": "text",
|
| 365 |
+
"text": "We also use $\\alpha = (\\alpha_{1},\\ldots ,\\alpha_{n})$ to denote the strategy profile of team $A$ players and $\\beta = (\\beta_{1},\\dots,\\beta_{m})$ the strategy profile of team $B$ players.",
|
| 366 |
+
"bbox": [
|
| 367 |
+
169,
|
| 368 |
+
895,
|
| 369 |
+
823,
|
| 370 |
+
925
|
| 371 |
+
],
|
| 372 |
+
"page_idx": 2
|
| 373 |
+
},
|
| 374 |
+
{
|
| 375 |
+
"type": "header",
|
| 376 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 377 |
+
"bbox": [
|
| 378 |
+
171,
|
| 379 |
+
32,
|
| 380 |
+
478,
|
| 381 |
+
47
|
| 382 |
+
],
|
| 383 |
+
"page_idx": 2
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"type": "page_number",
|
| 387 |
+
"text": "3",
|
| 388 |
+
"bbox": [
|
| 389 |
+
493,
|
| 390 |
+
948,
|
| 391 |
+
504,
|
| 392 |
+
959
|
| 393 |
+
],
|
| 394 |
+
"page_idx": 2
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"type": "text",
|
| 398 |
+
"text": "Finally, each team's payoff function is denoted by $U_A, U_B: \\mathcal{A} \\times \\mathcal{B} \\to \\mathbb{R}$ , where the individual utility of a player is identical to her teammates, i.e., $U_i = U_A \\& U_j = U_B \\forall i \\in \\mathcal{N}$ and $j \\in \\mathcal{M}$ . In this general context, players could also submit mixed strategies, i.e., probability distributions over actions. Correspondingly, we define the product distributions $\\boldsymbol{x} = (\\boldsymbol{x}_1, \\dots, \\boldsymbol{x}_n)$ , $\\boldsymbol{y} = (\\boldsymbol{y}_1, \\dots, \\boldsymbol{y}_m)$ as team $A$ and $B$ 's strategies respectively, in which $\\boldsymbol{x}_i \\in \\Delta(\\mathcal{A}_i)$ and $\\boldsymbol{y}_j \\in \\Delta(\\mathcal{B}_j)$ . Conclusively, we will write $\\mathcal{X} := \\prod_{i \\in \\mathcal{N}} \\mathcal{X}_i = \\prod_{i \\in \\mathcal{N}} \\Delta(\\mathcal{A}_i)$ , $\\mathcal{Y} := \\prod_{i \\in \\mathcal{M}} \\mathcal{Y}_i = \\prod_{i \\in \\mathcal{M}} \\Delta(\\mathcal{B}_i)$ the space of mixed strategy profiles of teams $A, B$ . A two-team game is called two-team zero-sum if $U_B = -U_A = U$ which is the main focus of this paper. Moreover, we assume that the game is succinctly representable and satisfies the polynomial expectation property (Daskalakis et al., 2006). This means that given a mixed strategy profile, the utility of each player can be computed in polynomial time in the number of agents, the sum of the number of strategies of each player, and the bit number required to represent the mixed strategy profile.",
|
| 399 |
+
"bbox": [
|
| 400 |
+
169,
|
| 401 |
+
103,
|
| 402 |
+
826,
|
| 403 |
+
271
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 3
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "text",
|
| 409 |
+
"text": "A Nash equilibrium (NE) is a strategy profile $(\\pmb{x}^{*},\\pmb{y}^{*})\\in \\mathcal{X}\\times \\mathcal{Y}$ such that",
|
| 410 |
+
"bbox": [
|
| 411 |
+
171,
|
| 412 |
+
277,
|
| 413 |
+
661,
|
| 414 |
+
292
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 3
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "equation",
|
| 420 |
+
"text": "\n$$\n\\left\\{ \\begin{array}{l} U (\\boldsymbol {x} ^ {*}, \\boldsymbol {y} ^ {*}) \\leq U \\left(\\boldsymbol {x} _ {i}, \\boldsymbol {x} _ {- i} ^ {*}, \\boldsymbol {y} ^ {*}\\right), \\forall \\boldsymbol {x} _ {i} \\in \\mathcal {X} _ {i} \\\\ U \\left(\\boldsymbol {x} ^ {*}, \\boldsymbol {y} ^ {*}\\right) \\geq U \\left(\\boldsymbol {x} ^ {*}, \\boldsymbol {y} _ {j}, \\boldsymbol {y} _ {- j} ^ {*}\\right), \\forall \\boldsymbol {y} _ {j} \\in \\mathcal {Y} _ {j} \\end{array} \\right. \\tag {NE}\n$$\n",
|
| 421 |
+
"text_format": "latex",
|
| 422 |
+
"bbox": [
|
| 423 |
+
341,
|
| 424 |
+
295,
|
| 425 |
+
823,
|
| 426 |
+
329
|
| 427 |
+
],
|
| 428 |
+
"page_idx": 3
|
| 429 |
+
},
|
| 430 |
+
{
|
| 431 |
+
"type": "text",
|
| 432 |
+
"text": "A first approach to computing NE in Two-Team Zero-Sum games. Due to the multilinearity of the utility and the existence of a duality gap, the linear programming method used in two-player zero-sum games cannot be used to compute a Nash equilibrium. For the goal of computing Nash equilibrium in two-team zero-sum games, we have experimented with a selection of online learning, first-order methods that have been utilized with varying success in the setting of the two-person zero-sum case. Namely, we analyze the following methods: (i) gradient descent-ascent (GDA) (ii) optimistic gradient descent-ascent (OGDA) (iii) extra gradient method (EG) (iv) optimistic multiplicative weights update method (OMWU). For their precise definitions, we refer to Appendix B.",
|
| 433 |
+
"bbox": [
|
| 434 |
+
169,
|
| 435 |
+
338,
|
| 436 |
+
823,
|
| 437 |
+
450
|
| 438 |
+
],
|
| 439 |
+
"page_idx": 3
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"type": "text",
|
| 443 |
+
"text": "The below folklore fact will play a key role hereafter.",
|
| 444 |
+
"bbox": [
|
| 445 |
+
171,
|
| 446 |
+
455,
|
| 447 |
+
524,
|
| 448 |
+
470
|
| 449 |
+
],
|
| 450 |
+
"page_idx": 3
|
| 451 |
+
},
|
| 452 |
+
{
|
| 453 |
+
"type": "text",
|
| 454 |
+
"text": "Fact 2.1. Any fixed point of the aforementioned discrete-time dynamics (apart from OMWU) on the utility function necessarily corresponds to a Nash Equilibrium of the game.",
|
| 455 |
+
"bbox": [
|
| 456 |
+
169,
|
| 457 |
+
473,
|
| 458 |
+
823,
|
| 459 |
+
502
|
| 460 |
+
],
|
| 461 |
+
"page_idx": 3
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"type": "text",
|
| 465 |
+
"text": "Hence, an important test for the asymptotic behavior of GDA, OGDA, EG, and OMWU methods is to examine whether these methods stabilize around their fixed points which effectively constitute the Nash equilibria of the game. In Section 3.2, we show that in the absence of pure Nash equilibria, all the above methods fail to stabilize on their fixed points even for a simple class of two-team games with $(n = 2, m = 2)$ . Consequently, they fail to converge to the mixed Nash equilibrium of the game.",
|
| 466 |
+
"bbox": [
|
| 467 |
+
169,
|
| 468 |
+
511,
|
| 469 |
+
823,
|
| 470 |
+
595
|
| 471 |
+
],
|
| 472 |
+
"page_idx": 3
|
| 473 |
+
},
|
| 474 |
+
{
|
| 475 |
+
"type": "text",
|
| 476 |
+
"text": "The presence of these results demonstrates the need for a different approach that lies outside the scope of traditional optimization techniques. Inspired by the applications of washout filters to stabilize unknown fixed points and the adaptive control generalizations of the former, we design a new variant of GDA \"vanized\" with a feedback loop dictated by a pair of two matrices. In contrast to the aforementioned conventional methods, our proposed technique surprisingly accomplishes asymptotic last-iterate convergence to its fixed point, i.e., the mixed Nash equilibria of the team game.",
|
| 477 |
+
"bbox": [
|
| 478 |
+
169,
|
| 479 |
+
602,
|
| 480 |
+
823,
|
| 481 |
+
686
|
| 482 |
+
],
|
| 483 |
+
"page_idx": 3
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"type": "text",
|
| 487 |
+
"text": "$(\\mathbf{K}, \\mathbf{P})$ -vaned GDA Method. After concatenating the vectors of the minimizing and the maximizing agents — $z^{(k)} = (x^{(k)}, y^{(k)})$ — our method for appropriate matrices $\\mathbf{K}, \\mathbf{P}$ reads:",
|
| 488 |
+
"bbox": [
|
| 489 |
+
169,
|
| 490 |
+
700,
|
| 491 |
+
823,
|
| 492 |
+
732
|
| 493 |
+
],
|
| 494 |
+
"page_idx": 3
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"type": "equation",
|
| 498 |
+
"text": "\n$$\n\\left\\{ \\begin{array}{l} \\boldsymbol {z} ^ {(k + 1)} = \\Pi_ {\\mathcal {Z}} \\left\\{\\boldsymbol {z} ^ {(k)} + \\eta \\binom {- \\nabla_ {\\boldsymbol {x}} f (\\boldsymbol {z} ^ {(k)})} {\\nabla_ {\\boldsymbol {y}} f (\\boldsymbol {z} ^ {(k)})} + \\eta \\mathbf {K} \\left(\\boldsymbol {z} ^ {(k)} - \\boldsymbol {\\theta} ^ {(k)}\\right) \\right\\} \\\\ \\boldsymbol {\\theta} ^ {(k + 1)} = \\Pi_ {\\mathcal {Z}} \\left\\{\\boldsymbol {\\theta} ^ {(k)} + \\eta \\mathbf {P} \\left(\\boldsymbol {z} ^ {(k)} - \\boldsymbol {\\theta} ^ {(k)}\\right) \\right\\} \\end{array} \\right. \\tag {KPV-GDA}\n$$\n",
|
| 499 |
+
"text_format": "latex",
|
| 500 |
+
"bbox": [
|
| 501 |
+
246,
|
| 502 |
+
734,
|
| 503 |
+
823,
|
| 504 |
+
782
|
| 505 |
+
],
|
| 506 |
+
"page_idx": 3
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"type": "text",
|
| 510 |
+
"text": "Intuitively, the additional variable $\\pmb{\\theta}^{(k)}$ holds an estimate of the fixed point, and through the feedback law $\\eta \\mathbf{K}(\\pmb{z}^{(k)} - \\pmb{\\theta}^{(k)})$ the vector $\\pmb{z}$ stabilizes around that estimate which slowly moves towards the real fixed point of the GDA dynamic.",
|
| 511 |
+
"bbox": [
|
| 512 |
+
169,
|
| 513 |
+
787,
|
| 514 |
+
823,
|
| 515 |
+
832
|
| 516 |
+
],
|
| 517 |
+
"page_idx": 3
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"type": "text",
|
| 521 |
+
"text": "2.1 TWO ILLUSTRATIVE EXAMPLES",
|
| 522 |
+
"text_level": 1,
|
| 523 |
+
"bbox": [
|
| 524 |
+
171,
|
| 525 |
+
847,
|
| 526 |
+
437,
|
| 527 |
+
861
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 3
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "text",
|
| 533 |
+
"text": "Our first example plays a dual role: first, it demonstrates how two-team min-max competition can capture the formulation of multi-agent GAN architectures; second, it hints at the discrepancy be-",
|
| 534 |
+
"bbox": [
|
| 535 |
+
169,
|
| 536 |
+
873,
|
| 537 |
+
823,
|
| 538 |
+
902
|
| 539 |
+
],
|
| 540 |
+
"page_idx": 3
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"type": "header",
|
| 544 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 545 |
+
"bbox": [
|
| 546 |
+
171,
|
| 547 |
+
32,
|
| 548 |
+
478,
|
| 549 |
+
47
|
| 550 |
+
],
|
| 551 |
+
"page_idx": 3
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"type": "page_footnote",
|
| 555 |
+
"text": "<sup>1</sup>We are using here the shorthand $\\pmb{x}_{-i}$ (or $\\pmb{y}_{-i}$ ) to highlight the strategy of all agents $\\mathcal{N}$ (or $\\mathcal{M}$ ) but $i$ .",
|
| 556 |
+
"bbox": [
|
| 557 |
+
189,
|
| 558 |
+
909,
|
| 559 |
+
792,
|
| 560 |
+
924
|
| 561 |
+
],
|
| 562 |
+
"page_idx": 3
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"type": "page_number",
|
| 566 |
+
"text": "4",
|
| 567 |
+
"bbox": [
|
| 568 |
+
493,
|
| 569 |
+
948,
|
| 570 |
+
504,
|
| 571 |
+
959
|
| 572 |
+
],
|
| 573 |
+
"page_idx": 3
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"type": "image",
|
| 577 |
+
"img_path": "images/acd7860bf75c1da39a4e403a4470681b60f961c65df700f8754a468c05decf4d.jpg",
|
| 578 |
+
"image_caption": [
|
| 579 |
+
"Figure 1: Parameter training of the configuration under different algorithms"
|
| 580 |
+
],
|
| 581 |
+
"image_footnote": [],
|
| 582 |
+
"bbox": [
|
| 583 |
+
383,
|
| 584 |
+
99,
|
| 585 |
+
614,
|
| 586 |
+
189
|
| 587 |
+
],
|
| 588 |
+
"page_idx": 4
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "text",
|
| 592 |
+
"text": "tween the results of optimization methods, since—as we will see—GDA will not converge to the Nash equilibrium/ground-truth distribution. Generally, the solution that is sought after is the min max solution of the objective function (Goodfellow et al., 2014) which are NP-hard to compute in the general case (Borgs et al., 2008); nevertheless, applications of GANs have proven that first-order stationary points of the objective function suffice to produce samples of very good quality.",
|
| 593 |
+
"bbox": [
|
| 594 |
+
169,
|
| 595 |
+
233,
|
| 596 |
+
823,
|
| 597 |
+
303
|
| 598 |
+
],
|
| 599 |
+
"page_idx": 4
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"type": "text",
|
| 603 |
+
"text": "2.1.1 LEARNING A MIXTURE OF GAUSSIANS WITH MULTI-AGENT GAN'S",
|
| 604 |
+
"text_level": 1,
|
| 605 |
+
"bbox": [
|
| 606 |
+
171,
|
| 607 |
+
316,
|
| 608 |
+
694,
|
| 609 |
+
330
|
| 610 |
+
],
|
| 611 |
+
"page_idx": 4
|
| 612 |
+
},
|
| 613 |
+
{
|
| 614 |
+
"type": "text",
|
| 615 |
+
"text": "Consider the case of $\\mathcal{O}$ , a mixture of gaussian distribution with two components, $C_1 \\sim \\mathcal{N}(\\pmb{\\mu}, \\mathbf{I})$ and $C_2 \\sim \\mathcal{N}(-\\pmb{\\mu}, \\mathbf{I})$ and mixture weights $\\pi_1, \\pi_2$ to be positive such that $\\pi_1 + \\pi_2 = 1$ and $\\pi_1, \\pi_2 \\neq \\frac{1}{2}$ .",
|
| 616 |
+
"bbox": [
|
| 617 |
+
169,
|
| 618 |
+
340,
|
| 619 |
+
823,
|
| 620 |
+
372
|
| 621 |
+
],
|
| 622 |
+
"page_idx": 4
|
| 623 |
+
},
|
| 624 |
+
{
|
| 625 |
+
"type": "text",
|
| 626 |
+
"text": "To learn the distribution above, we utilize an instance of a Team-WGAN in which there exists a generating team of agents $G_{p}:\\mathbb{R}\\to \\mathbb{R},G_{\\theta}:\\mathbb{R}^{n}\\to \\mathbb{R}^{n}$ , and a discriminating team of agents $D_{\\boldsymbol{v}}:\\mathbb{R}^n\\to \\mathbb{R},D_{\\boldsymbol{w}}:\\mathbb{R}^n\\to \\mathbb{R}$ , all described by the following equations:",
|
| 627 |
+
"bbox": [
|
| 628 |
+
169,
|
| 629 |
+
377,
|
| 630 |
+
823,
|
| 631 |
+
419
|
| 632 |
+
],
|
| 633 |
+
"page_idx": 4
|
| 634 |
+
},
|
| 635 |
+
{
|
| 636 |
+
"type": "equation",
|
| 637 |
+
"text": "\n$$\n\\begin{array}{l} \\text {G e n e r a t o r s :} G _ {p} (\\zeta) = p + \\zeta , G _ {\\theta} (\\boldsymbol {\\xi}) = \\boldsymbol {\\xi} + \\boldsymbol {\\theta} \\\\ \\text {D i s c r i m i n a t o r s :} D _ {\\boldsymbol {v}} (\\boldsymbol {y}) = \\left\\langle \\boldsymbol {v}, \\boldsymbol {y} \\right\\rangle , D _ {\\boldsymbol {w}} (\\boldsymbol {y}) = \\sum_ {i} w _ {i} y _ {i} ^ {2} \\end{array} \\tag {1}\n$$\n",
|
| 638 |
+
"text_format": "latex",
|
| 639 |
+
"bbox": [
|
| 640 |
+
321,
|
| 641 |
+
425,
|
| 642 |
+
823,
|
| 643 |
+
455
|
| 644 |
+
],
|
| 645 |
+
"page_idx": 4
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"text": "The generating agent $G_{\\theta}$ maps random noise $\\xi \\sim \\mathcal{N}(0, \\mathbf{I})$ to samples while generating agent $G_{p}(\\zeta)$ , utilizing an independent source of randomness $\\zeta \\sim \\mathcal{N}(0, 1)$ , probabilistically controls the sign of the output of the generator $G_{\\theta}$ . The probability of ultimately generating a sample $y = \\xi + \\theta$ is in expectation equal to $p$ , while the probability of the sample being $y = -z - \\theta$ is equal to $1 - p$ .",
|
| 650 |
+
"bbox": [
|
| 651 |
+
169,
|
| 652 |
+
469,
|
| 653 |
+
823,
|
| 654 |
+
527
|
| 655 |
+
],
|
| 656 |
+
"page_idx": 4
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"text": "On the other end, there stands the discriminating team of $D_v, D_w$ . Discriminators, $D_v(\\pmb{y}), D_w(\\pmb{y})$ map any given sample $\\pmb{y}$ to a scalar value accounting for the \"realness\" or \"fakeness\" of it – negative meaning fake, positive meaning real. The discriminators are disparate in the way they measure the realness of samples as seen in their definitions.",
|
| 661 |
+
"bbox": [
|
| 662 |
+
169,
|
| 663 |
+
532,
|
| 664 |
+
823,
|
| 665 |
+
588
|
| 666 |
+
],
|
| 667 |
+
"page_idx": 4
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "text",
|
| 671 |
+
"text": "We follow the formalism of the Wasserstein GAN to form the optimization objective:",
|
| 672 |
+
"bbox": [
|
| 673 |
+
171,
|
| 674 |
+
595,
|
| 675 |
+
733,
|
| 676 |
+
609
|
| 677 |
+
],
|
| 678 |
+
"page_idx": 4
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "equation",
|
| 682 |
+
"text": "\n$$\n\\left. \\right. \\max _ {\\boldsymbol {v}, \\boldsymbol {w}} \\min _ {\\boldsymbol {\\theta}, p} \\left\\{\\mathbb {E} _ {\\boldsymbol {y} \\sim \\mathcal {O}} \\left[ D _ {\\boldsymbol {v}} (\\boldsymbol {y}) + D _ {\\boldsymbol {w}} (\\boldsymbol {y}) \\right] - \\mathbb {E} _ {\\substack {\\boldsymbol {\\xi} \\sim \\mathcal {N} (0, \\mathbf {I}),\\\\\\zeta \\sim \\mathcal {N} (0, 1)}} \\left[\\begin{array}{c}G _ {p} (\\zeta) \\cdot \\left(D _ {\\boldsymbol {v}} \\left(G _ {\\boldsymbol {\\theta}} (\\boldsymbol {y})\\right) + D _ {\\boldsymbol {w}} \\left(G _ {\\boldsymbol {\\theta}} (\\boldsymbol {y})\\right)\\right)\\\\+\\\\\\left(1 - G _ {p} (\\zeta)\\right) \\cdot \\left(D _ {\\boldsymbol {v}} \\left(- G _ {\\boldsymbol {\\theta}} (\\boldsymbol {y})\\right) + D _ {\\boldsymbol {w}} \\left(- G _ {\\boldsymbol {\\theta}} (\\boldsymbol {y})\\right)\\right)\\end{array}\\right]\\right\\} \\tag{2}\n$$\n",
|
| 683 |
+
"text_format": "latex",
|
| 684 |
+
"bbox": [
|
| 685 |
+
176,
|
| 686 |
+
614,
|
| 687 |
+
823,
|
| 688 |
+
691
|
| 689 |
+
],
|
| 690 |
+
"page_idx": 4
|
| 691 |
+
},
|
| 692 |
+
{
|
| 693 |
+
"type": "text",
|
| 694 |
+
"text": "Equation (2) yields the simpler form:",
|
| 695 |
+
"bbox": [
|
| 696 |
+
171,
|
| 697 |
+
699,
|
| 698 |
+
419,
|
| 699 |
+
713
|
| 700 |
+
],
|
| 701 |
+
"page_idx": 4
|
| 702 |
+
},
|
| 703 |
+
{
|
| 704 |
+
"type": "equation",
|
| 705 |
+
"text": "\n$$\n\\max _ {\\boldsymbol {v}, \\boldsymbol {w}} \\min _ {\\boldsymbol {\\theta}, p} \\left(\\pi_ {1} - \\pi_ {2}\\right) \\boldsymbol {v} ^ {T} \\boldsymbol {\\mu} - 2 p \\boldsymbol {v} ^ {T} \\boldsymbol {\\theta} + \\boldsymbol {v} ^ {T} \\boldsymbol {\\theta} + \\sum_ {i} ^ {n} w _ {i} \\left(\\mu_ {i} ^ {2} - \\theta_ {i} ^ {2}\\right) \\tag {3}\n$$\n",
|
| 706 |
+
"text_format": "latex",
|
| 707 |
+
"bbox": [
|
| 708 |
+
292,
|
| 709 |
+
718,
|
| 710 |
+
823,
|
| 711 |
+
742
|
| 712 |
+
],
|
| 713 |
+
"page_idx": 4
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "text",
|
| 717 |
+
"text": "It is easy to check that Nash equilibria of (2) must satisfy:",
|
| 718 |
+
"bbox": [
|
| 719 |
+
171,
|
| 720 |
+
755,
|
| 721 |
+
553,
|
| 722 |
+
770
|
| 723 |
+
],
|
| 724 |
+
"page_idx": 4
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "equation",
|
| 728 |
+
"text": "\n$$\n\\left\\{ \\begin{array}{r c l} \\boldsymbol {\\theta} & = & \\boldsymbol {\\mu}, \\quad p = 1 - \\pi_ {2} = \\pi_ {1} \\\\ \\boldsymbol {\\theta} & = & - \\boldsymbol {\\mu}, \\quad p = 1 - \\pi_ {1} = \\pi_ {2}. \\end{array} \\right\\}\n$$\n",
|
| 729 |
+
"text_format": "latex",
|
| 730 |
+
"bbox": [
|
| 731 |
+
374,
|
| 732 |
+
776,
|
| 733 |
+
620,
|
| 734 |
+
809
|
| 735 |
+
],
|
| 736 |
+
"page_idx": 4
|
| 737 |
+
},
|
| 738 |
+
{
|
| 739 |
+
"type": "text",
|
| 740 |
+
"text": "Figure 1 demonstrates both GDA's failure and OGDA, EG, and our proposed method, KPV-GDA succeeding to converge to the above Nash equilibria and simultaneously discovering the mixture of the ground-truth.",
|
| 741 |
+
"bbox": [
|
| 742 |
+
169,
|
| 743 |
+
814,
|
| 744 |
+
826,
|
| 745 |
+
856
|
| 746 |
+
],
|
| 747 |
+
"page_idx": 4
|
| 748 |
+
},
|
| 749 |
+
{
|
| 750 |
+
"type": "text",
|
| 751 |
+
"text": "2.1.2 MULTIPLAYER MATCHING PENNIES",
|
| 752 |
+
"text_level": 1,
|
| 753 |
+
"bbox": [
|
| 754 |
+
171,
|
| 755 |
+
871,
|
| 756 |
+
478,
|
| 757 |
+
883
|
| 758 |
+
],
|
| 759 |
+
"page_idx": 4
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "text",
|
| 763 |
+
"text": "Interestingly enough, there are non-trivial instances of two-team competition settings in which even OGDA and EG fail to converge. Such is the case for a team version of the well-known game of",
|
| 764 |
+
"bbox": [
|
| 765 |
+
169,
|
| 766 |
+
895,
|
| 767 |
+
825,
|
| 768 |
+
924
|
| 769 |
+
],
|
| 770 |
+
"page_idx": 4
|
| 771 |
+
},
|
| 772 |
+
{
|
| 773 |
+
"type": "header",
|
| 774 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 775 |
+
"bbox": [
|
| 776 |
+
171,
|
| 777 |
+
32,
|
| 778 |
+
478,
|
| 779 |
+
47
|
| 780 |
+
],
|
| 781 |
+
"page_idx": 4
|
| 782 |
+
},
|
| 783 |
+
{
|
| 784 |
+
"type": "page_number",
|
| 785 |
+
"text": "5",
|
| 786 |
+
"bbox": [
|
| 787 |
+
493,
|
| 788 |
+
948,
|
| 789 |
+
503,
|
| 790 |
+
959
|
| 791 |
+
],
|
| 792 |
+
"page_idx": 4
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "text",
|
| 796 |
+
"text": "matching pennies. The game can be shortly described as such: \"coordinate with your teammates to play a game of matching pennies against the opposing team, coordinate not and pay a penalty\". (We note that this game is a special case of the game presented in Section 3.3.) As we can see in Figures 2a and 2b, this multiplayer generalized matching pennies game constitutes an excellent benchmark on which all traditional gradient flow discretizations fail under the perfect competition setting. Interestingly, we are not aware of a similar example in min-max literature and it has been our starting point for seeking new optimization techniques inspired by Control theory. Indeed, the KPV-GDA variation with $(\\mathbf{K},\\mathbf{P}) = (-1.1\\cdot \\mathbf{I},0.3\\cdot \\mathbf{I})$ achieves to converge to the unique mixed Nash Equilibrium of the game. In the following sections, we provide theorems that explain formally the behavior of the examined dynamics.",
|
| 797 |
+
"bbox": [
|
| 798 |
+
169,
|
| 799 |
+
104,
|
| 800 |
+
826,
|
| 801 |
+
243
|
| 802 |
+
],
|
| 803 |
+
"page_idx": 5
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "image",
|
| 807 |
+
"img_path": "images/4e0ff145a5c93396adafd32683a02614232efc294b270cc2fb6a81e8cb72ccbf.jpg",
|
| 808 |
+
"image_caption": [
|
| 809 |
+
"(a) Generalized matching pennies under different algorithms. For the precise definition of the game, we refer to appendix C.4"
|
| 810 |
+
],
|
| 811 |
+
"image_footnote": [],
|
| 812 |
+
"bbox": [
|
| 813 |
+
228,
|
| 814 |
+
265,
|
| 815 |
+
434,
|
| 816 |
+
335
|
| 817 |
+
],
|
| 818 |
+
"page_idx": 5
|
| 819 |
+
},
|
| 820 |
+
{
|
| 821 |
+
"type": "image",
|
| 822 |
+
"img_path": "images/7e27dfe24497c08c870f0e0d7db721fd154f9b76a0118eea7ae3ab5763b50354.jpg",
|
| 823 |
+
"image_caption": [
|
| 824 |
+
"(b) Projected Trajectory of Team A under different algorithms. The sketched surface is not part of the feasible team strategy profiles (product of distributions)."
|
| 825 |
+
],
|
| 826 |
+
"image_footnote": [],
|
| 827 |
+
"bbox": [
|
| 828 |
+
593,
|
| 829 |
+
234,
|
| 830 |
+
712,
|
| 831 |
+
323
|
| 832 |
+
],
|
| 833 |
+
"page_idx": 5
|
| 834 |
+
},
|
| 835 |
+
{
|
| 836 |
+
"type": "text",
|
| 837 |
+
"text": "3 MAIN RESULTS",
|
| 838 |
+
"text_level": 1,
|
| 839 |
+
"bbox": [
|
| 840 |
+
171,
|
| 841 |
+
402,
|
| 842 |
+
333,
|
| 843 |
+
417
|
| 844 |
+
],
|
| 845 |
+
"page_idx": 5
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "text",
|
| 849 |
+
"text": "In this section, we will prove that computing a Nash equilibrium in two-team zero-sum games is computationally hard and thus getting a polynomial-time algorithm that computes a Nash equilibrium is unlikely. Next, we will demonstrate the shortcomings of an array of commonly used online learning, first-order methods, and then we will provide a novel, decentralized, first-order method that locally converges to NE under some conditions.",
|
| 850 |
+
"bbox": [
|
| 851 |
+
169,
|
| 852 |
+
436,
|
| 853 |
+
823,
|
| 854 |
+
506
|
| 855 |
+
],
|
| 856 |
+
"page_idx": 5
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "text",
|
| 860 |
+
"text": "3.1 ON THE HARDNESS OF COMPUTING NE IN TWO-TEAM ZERO-SUM GAMES",
|
| 861 |
+
"text_level": 1,
|
| 862 |
+
"bbox": [
|
| 863 |
+
171,
|
| 864 |
+
525,
|
| 865 |
+
720,
|
| 866 |
+
539
|
| 867 |
+
],
|
| 868 |
+
"page_idx": 5
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"type": "text",
|
| 872 |
+
"text": "As promised, our first statement characterizes the hardness of NE computation in two-team zero-sum games:",
|
| 873 |
+
"bbox": [
|
| 874 |
+
169,
|
| 875 |
+
551,
|
| 876 |
+
823,
|
| 877 |
+
580
|
| 878 |
+
],
|
| 879 |
+
"page_idx": 5
|
| 880 |
+
},
|
| 881 |
+
{
|
| 882 |
+
"type": "text",
|
| 883 |
+
"text": "Theorem 3.1 (CLS-hard). Computing a Nash equilibrium in a succinctly represented two-team zero-sum game is CLS-hard.",
|
| 884 |
+
"bbox": [
|
| 885 |
+
169,
|
| 886 |
+
585,
|
| 887 |
+
823,
|
| 888 |
+
613
|
| 889 |
+
],
|
| 890 |
+
"page_idx": 5
|
| 891 |
+
},
|
| 892 |
+
{
|
| 893 |
+
"type": "text",
|
| 894 |
+
"text": "The main idea of the proof of Theorem 3.1 relies on a reduction of approximating Nash equilibria in congestion games, which has been shown to be complete for the CLS complexity class. The class CLS contains the problem of continuous optimization. We defer the proof of the above theorem to the paper's supplement.",
|
| 895 |
+
"bbox": [
|
| 896 |
+
169,
|
| 897 |
+
626,
|
| 898 |
+
823,
|
| 899 |
+
681
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 5
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "text",
|
| 905 |
+
"text": "3.2 FAILURE OF COMMON ONLINE, FIRST-ORDER METHODS",
|
| 906 |
+
"text_level": 1,
|
| 907 |
+
"bbox": [
|
| 908 |
+
171,
|
| 909 |
+
702,
|
| 910 |
+
599,
|
| 911 |
+
715
|
| 912 |
+
],
|
| 913 |
+
"page_idx": 5
|
| 914 |
+
},
|
| 915 |
+
{
|
| 916 |
+
"type": "text",
|
| 917 |
+
"text": "The negative computational complexity result we proved for two-team zero-sum games (Theorem 3.1) does not preclude the prospect of attaining algorithms (learning first-order methods) that converge to Nash equilibria. Unfortunately, we prove that these methods cannot guarantee convergence to Nash equilibria in two-team zero-sum games in general.",
|
| 918 |
+
"bbox": [
|
| 919 |
+
169,
|
| 920 |
+
728,
|
| 921 |
+
823,
|
| 922 |
+
785
|
| 923 |
+
],
|
| 924 |
+
"page_idx": 5
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"type": "text",
|
| 928 |
+
"text": "In this subsection, we are going to construct a family of two-team zero-sum games with the property that the dynamics of GDA, OGDA, OMWU, and EG are unstable on Nash equilibria. This result is indicative of the challenges that lie in the min-max optimization of two-team zero-sum games and the reason that provable, nonasymptotic convergence guarantees of online learning have not yet been established.",
|
| 929 |
+
"bbox": [
|
| 930 |
+
169,
|
| 931 |
+
791,
|
| 932 |
+
823,
|
| 933 |
+
859
|
| 934 |
+
],
|
| 935 |
+
"page_idx": 5
|
| 936 |
+
},
|
| 937 |
+
{
|
| 938 |
+
"type": "text",
|
| 939 |
+
"text": "Before defining our benchmark game, we prove an important theorem which states that GDA does not converge to mixed Nash equilibria. This fact is a stepping stone in constructing the family of team-zero sum games later. We present the proof of all of the below statements in detail in the paper's appendix (Appendix B).",
|
| 940 |
+
"bbox": [
|
| 941 |
+
169,
|
| 942 |
+
868,
|
| 943 |
+
825,
|
| 944 |
+
925
|
| 945 |
+
],
|
| 946 |
+
"page_idx": 5
|
| 947 |
+
},
|
| 948 |
+
{
|
| 949 |
+
"type": "header",
|
| 950 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 951 |
+
"bbox": [
|
| 952 |
+
171,
|
| 953 |
+
32,
|
| 954 |
+
478,
|
| 955 |
+
47
|
| 956 |
+
],
|
| 957 |
+
"page_idx": 5
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"type": "page_number",
|
| 961 |
+
"text": "6",
|
| 962 |
+
"bbox": [
|
| 963 |
+
493,
|
| 964 |
+
948,
|
| 965 |
+
504,
|
| 966 |
+
959
|
| 967 |
+
],
|
| 968 |
+
"page_idx": 5
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "text",
|
| 972 |
+
"text": "Weakly-stable Nash equilibrium. (Kleinberg et al., 2009; Mehta et al., 2015) Consider the set of Nash equilibria with the property that if any single randomizing agent of one team is forced to play any strategy in their current support with probability one, all other agents of the same team must remain indifferent between the strategies in their support. This type of Nash equilibria is called weakly-stable. We note that pure Nash equilibria are trivially weakly-stable. It has been shown that mixed Nash equilibria are not weakly-stable in generic games<sup>2</sup>",
|
| 973 |
+
"bbox": [
|
| 974 |
+
169,
|
| 975 |
+
103,
|
| 976 |
+
826,
|
| 977 |
+
188
|
| 978 |
+
],
|
| 979 |
+
"page_idx": 6
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "text",
|
| 983 |
+
"text": "We can show that Nash equilibria that are not weakly-stable Nash are actually unstable for GDA. Moreover, through standard dynamical systems machinery, that the set of initial conditions that converges to Nash equilibria that are not weakly-stable should be of Lebesgue measure zero. Formally, we prove that:",
|
| 984 |
+
"bbox": [
|
| 985 |
+
169,
|
| 986 |
+
193,
|
| 987 |
+
823,
|
| 988 |
+
251
|
| 989 |
+
],
|
| 990 |
+
"page_idx": 6
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"type": "text",
|
| 994 |
+
"text": "Theorem 3.2 (Non weakly-stable Nash are unstable). Consider a two-team zero-sum game with the utility function of Team $B$ ( $y$ vector) being $U(\\boldsymbol{x}, \\boldsymbol{y})$ and Team $A$ ( $x$ vector) being $-U(\\boldsymbol{x}, \\boldsymbol{y})$ . Moreover, assume that $(\\boldsymbol{x}^*, \\boldsymbol{y}^*)$ is a Nash equilibrium of full support that is not weakly-stable. It follows that the set of initial conditions so that GDA converges to $(\\boldsymbol{x}^*, \\boldsymbol{y}^*)$ is of measure zero for step size $\\eta < \\frac{1}{L}$ where $L$ is the Lipschitz constant of $\\nabla U$ .",
|
| 995 |
+
"bbox": [
|
| 996 |
+
168,
|
| 997 |
+
253,
|
| 998 |
+
826,
|
| 999 |
+
325
|
| 1000 |
+
],
|
| 1001 |
+
"page_idx": 6
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"type": "text",
|
| 1005 |
+
"text": "3.3 GENERALIZED MATCHING PENNIES (GMP)",
|
| 1006 |
+
"text_level": 1,
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
171,
|
| 1009 |
+
340,
|
| 1010 |
+
519,
|
| 1011 |
+
357
|
| 1012 |
+
],
|
| 1013 |
+
"page_idx": 6
|
| 1014 |
+
},
|
| 1015 |
+
{
|
| 1016 |
+
"type": "text",
|
| 1017 |
+
"text": "Inspired by Theorem 3.2, in this section we construct a family of team zero-sum games so that GDA, OGDA, OMWU, and EG methods fail to converge (if the initialization is a random point in the simplex, the probability of convergence of the aforementioned methods is zero). The intuition is to construct a family of games, each of which has only mixed Nash equilibria (that are not weakly-stable), i.e., the constructed games should lack pure Nash equilibria; using Theorem 3.2, it would immediately imply our claim for GDA. It turns out that OGDA, OMWU, and EG also fail to converge for the same family.",
|
| 1018 |
+
"bbox": [
|
| 1019 |
+
169,
|
| 1020 |
+
367,
|
| 1021 |
+
823,
|
| 1022 |
+
465
|
| 1023 |
+
],
|
| 1024 |
+
"page_idx": 6
|
| 1025 |
+
},
|
| 1026 |
+
{
|
| 1027 |
+
"type": "text",
|
| 1028 |
+
"text": "Definition of GMP. Consider a setting with two teams (Team $A$ , Team $B$ ), each of which has $n = 2$ players. Inspired by the standard matching pennies game and the game defined in (Schulman & Vazirani, 2019a), we allow each agent $i$ to have two strategies/actions that is $S = \\{H, T\\}$ for both teams with $2^4$ possible strategy profiles. In case all",
|
| 1029 |
+
"bbox": [
|
| 1030 |
+
169,
|
| 1031 |
+
479,
|
| 1032 |
+
562,
|
| 1033 |
+
563
|
| 1034 |
+
],
|
| 1035 |
+
"page_idx": 6
|
| 1036 |
+
},
|
| 1037 |
+
{
|
| 1038 |
+
"type": "image",
|
| 1039 |
+
"img_path": "images/6e4b46f958a13cdac580fdbb68d36bbdb9152d16650aee635b1ca5be06581b93.jpg",
|
| 1040 |
+
"image_caption": [],
|
| 1041 |
+
"image_footnote": [],
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
570,
|
| 1044 |
+
494,
|
| 1045 |
+
620,
|
| 1046 |
+
537
|
| 1047 |
+
],
|
| 1048 |
+
"page_idx": 6
|
| 1049 |
+
},
|
| 1050 |
+
{
|
| 1051 |
+
"type": "table",
|
| 1052 |
+
"img_path": "images/7e93086ecf58926df0ed9347c3f50d1546d5e5760890942b95b67e0df30db482.jpg",
|
| 1053 |
+
"table_caption": [],
|
| 1054 |
+
"table_footnote": [],
|
| 1055 |
+
"table_body": "<table><tr><td>HH</td><td>HT/TH</td><td>TT</td></tr><tr><td>1,-1</td><td>ω,-ω</td><td>-1,1</td></tr><tr><td>-ω,ω</td><td>0,0</td><td>-ω,ω</td></tr><tr><td>-1,1</td><td>ω,-ω</td><td>1,-1</td></tr></table>",
|
| 1056 |
+
"bbox": [
|
| 1057 |
+
622,
|
| 1058 |
+
479,
|
| 1059 |
+
807,
|
| 1060 |
+
540
|
| 1061 |
+
],
|
| 1062 |
+
"page_idx": 6
|
| 1063 |
+
},
|
| 1064 |
+
{
|
| 1065 |
+
"type": "text",
|
| 1066 |
+
"text": "the members of a Team choose the same strategy say $H$ or $T$ then the Team \"agrees\" to play $H$ or $T$ (otherwise the Team \"does not agree\").",
|
| 1067 |
+
"bbox": [
|
| 1068 |
+
169,
|
| 1069 |
+
564,
|
| 1070 |
+
823,
|
| 1071 |
+
592
|
| 1072 |
+
],
|
| 1073 |
+
"page_idx": 6
|
| 1074 |
+
},
|
| 1075 |
+
{
|
| 1076 |
+
"type": "text",
|
| 1077 |
+
"text": "Thus, in the case that both teams \"agree\", the payoff of each team is actually the payoff for the two-player matching pennies. If one team \"agrees\" and the other does not, the team that \"agrees\" enjoys the payoff $\\omega \\in (0,1)$ and the other team suffers a penalty $\\omega$ . If both teams fail to \"agree\", both teams get payoff zero. Let $x_{i}$ with $i \\in \\{1,2\\}$ be the probability that agent $i$ of Team $A$ chooses $H$ and $1 - x_{i}$ the probability that she chooses $T$ . We also denote $\\mathbf{x}$ as the vector of probabilities for Team $A$ . Similarly, we denote $y_{i}$ for $i \\in \\{1,2\\}$ be the probability that agent $i$ of Team $B$ chooses $H$ and $1 - y_{i}$ the probability that she chooses $T$ and $\\mathbf{y}$ the probability vector.",
|
| 1078 |
+
"bbox": [
|
| 1079 |
+
169,
|
| 1080 |
+
598,
|
| 1081 |
+
823,
|
| 1082 |
+
698
|
| 1083 |
+
],
|
| 1084 |
+
"page_idx": 6
|
| 1085 |
+
},
|
| 1086 |
+
{
|
| 1087 |
+
"type": "text",
|
| 1088 |
+
"text": "Properties of GMP. An important remark on the properties of our presented game is due. Existing literature tackles settings with (i) (weak-)monotonocity (Mertikopoulos et al., 2019; Diakonikolas et al., 2021), (ii) cocoercivity (Zhu & Marcotte, 1996), (iii) zero-duality gap (Von Neumann, 1928), (iv) unconstrained solution space (Golowich et al., 2020). Our game is carefully crafted and – although it has a distinct structure and is nonconvex-nonconcave only due to multilinearity– satisfies none of the latter properties. This makes the (local) convergence of our proposed method even more surprising. (See also Appendix B.6.)",
|
| 1089 |
+
"bbox": [
|
| 1090 |
+
169,
|
| 1091 |
+
710,
|
| 1092 |
+
823,
|
| 1093 |
+
809
|
| 1094 |
+
],
|
| 1095 |
+
"page_idx": 6
|
| 1096 |
+
},
|
| 1097 |
+
{
|
| 1098 |
+
"type": "text",
|
| 1099 |
+
"text": "The first fact about the game that we defined is that for $\\omega \\in (0,1)$ , there is only one Nash equilibrium $(\\pmb{x}^{*},\\pmb{y}^{*})$ , which is the uniform strategy, i.e., $x_{1}^{*} = x_{2}^{*} = y_{1}^{*} = y_{2}^{*} = \\frac{1}{2}$ for all agents $i$ .",
|
| 1100 |
+
"bbox": [
|
| 1101 |
+
169,
|
| 1102 |
+
816,
|
| 1103 |
+
823,
|
| 1104 |
+
847
|
| 1105 |
+
],
|
| 1106 |
+
"page_idx": 6
|
| 1107 |
+
},
|
| 1108 |
+
{
|
| 1109 |
+
"type": "text",
|
| 1110 |
+
"text": "Lemma 3.1 (GMP has a unique Nash). The Generalized Matching Pennies game exhibits a unique Nash equilibrium which is $(\\pmb{x}^{*},\\pmb{y}^{*}) = ((\\frac{1}{2},\\frac{1}{2}),(\\frac{1}{2},\\frac{1}{2}))$",
|
| 1111 |
+
"bbox": [
|
| 1112 |
+
169,
|
| 1113 |
+
848,
|
| 1114 |
+
823,
|
| 1115 |
+
880
|
| 1116 |
+
],
|
| 1117 |
+
"page_idx": 6
|
| 1118 |
+
},
|
| 1119 |
+
{
|
| 1120 |
+
"type": "header",
|
| 1121 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1122 |
+
"bbox": [
|
| 1123 |
+
171,
|
| 1124 |
+
32,
|
| 1125 |
+
478,
|
| 1126 |
+
47
|
| 1127 |
+
],
|
| 1128 |
+
"page_idx": 6
|
| 1129 |
+
},
|
| 1130 |
+
{
|
| 1131 |
+
"type": "page_footnote",
|
| 1132 |
+
"text": "<sup>2</sup>Roughly speaking, generic games where we add small Gaussian noise to perturb slightly every payoff only so that we preclude any payoff ties. In these games, all Nash equilibria in all but a measure-zero set of games exhibit the property that all pure best responses are played with positive probability.",
|
| 1133 |
+
"bbox": [
|
| 1134 |
+
169,
|
| 1135 |
+
883,
|
| 1136 |
+
823,
|
| 1137 |
+
925
|
| 1138 |
+
],
|
| 1139 |
+
"page_idx": 6
|
| 1140 |
+
},
|
| 1141 |
+
{
|
| 1142 |
+
"type": "page_number",
|
| 1143 |
+
"text": "7",
|
| 1144 |
+
"bbox": [
|
| 1145 |
+
493,
|
| 1146 |
+
948,
|
| 1147 |
+
503,
|
| 1148 |
+
959
|
| 1149 |
+
],
|
| 1150 |
+
"page_idx": 6
|
| 1151 |
+
},
|
| 1152 |
+
{
|
| 1153 |
+
"type": "text",
|
| 1154 |
+
"text": "Remark 1. The fact that the game we defined has a unique Nash equilibrium that is in the interior of $[0,1]^4$ is really crucial for our negative convergence results later in the section as we will show that it is not a weakly-stable Nash equilibrium and the negative result about GDA will be a corollary due to Theorem 3.2. We also note that if $\\omega = 1$ then there are more Nash equilibria, in particular the $(0,0), (1,0), (0,1), (1,1)$ which are pure.",
|
| 1155 |
+
"bbox": [
|
| 1156 |
+
169,
|
| 1157 |
+
103,
|
| 1158 |
+
826,
|
| 1159 |
+
176
|
| 1160 |
+
],
|
| 1161 |
+
"page_idx": 7
|
| 1162 |
+
},
|
| 1163 |
+
{
|
| 1164 |
+
"type": "text",
|
| 1165 |
+
"text": "The following Theorem is the main (negative) result of this section.",
|
| 1166 |
+
"bbox": [
|
| 1167 |
+
169,
|
| 1168 |
+
186,
|
| 1169 |
+
617,
|
| 1170 |
+
200
|
| 1171 |
+
],
|
| 1172 |
+
"page_idx": 7
|
| 1173 |
+
},
|
| 1174 |
+
{
|
| 1175 |
+
"type": "text",
|
| 1176 |
+
"text": "Theorem 3.3 (GDA, OGDA, EG, and OMWU fail). Consider GMP game with $\\omega \\in (0,1)$ . Assume that $\\eta_{GDA} < \\frac{1}{4}$ , $\\eta_{OGDA} < \\min(\\omega, \\frac{1}{8})$ , $\\eta_{EG} < \\frac{\\omega}{2}$ , and $\\eta_{OMWU} < \\min\\left(\\frac{1}{4}, \\frac{\\omega}{2}\\right)$ (bound on the stepsize for GDA, OGDA, OMWU, and EG methods respectively). It holds that the set of initial conditions so that GDA, OGDA, OMWU, and EG converge (stabilize to any point) is of measure zero.",
|
| 1177 |
+
"bbox": [
|
| 1178 |
+
168,
|
| 1179 |
+
204,
|
| 1180 |
+
823,
|
| 1181 |
+
262
|
| 1182 |
+
],
|
| 1183 |
+
"page_idx": 7
|
| 1184 |
+
},
|
| 1185 |
+
{
|
| 1186 |
+
"type": "text",
|
| 1187 |
+
"text": "Remark 2. Theorem 3.3 formally demonstrates that the behavior of algorithms mentioned in Section 2.1.2 are not a result of \"bad parametrization\", and in fact, the probability that any of them converges to the NE is equal to the probability that the initialization of the variables coincides with the NE (Lebesgue measure zero).",
|
| 1188 |
+
"bbox": [
|
| 1189 |
+
169,
|
| 1190 |
+
267,
|
| 1191 |
+
825,
|
| 1192 |
+
324
|
| 1193 |
+
],
|
| 1194 |
+
"page_idx": 7
|
| 1195 |
+
},
|
| 1196 |
+
{
|
| 1197 |
+
"type": "text",
|
| 1198 |
+
"text": "Remark 3 (Average iterate also fails). One might ask what happens when we consider average iterates instead of the last iterate. It is a well-known fact (Syrgkanis et al., 2015) that the average iterate of no-regret algorithms converges to coarse correlated equilibria (CCE) so we expect that the average iterate stabilizes. Nevertheless, CCE might not be Nash equilibria. Indeed we can construct examples in which the average iterate of GDA, OGDA, OMWU, and EG experimentally fail to stabilize to Nash equilibria. In particular, we consider a slight modification of GMP; players and strategies are the same but the payoff matrix has changed and can be found reads",
|
| 1199 |
+
"bbox": [
|
| 1200 |
+
168,
|
| 1201 |
+
327,
|
| 1202 |
+
826,
|
| 1203 |
+
426
|
| 1204 |
+
],
|
| 1205 |
+
"page_idx": 7
|
| 1206 |
+
},
|
| 1207 |
+
{
|
| 1208 |
+
"type": "image",
|
| 1209 |
+
"img_path": "images/3eea1e7b15363c681928233f23c08b4158a722aa9bdfae37c08c7c2edc7b2d5b.jpg",
|
| 1210 |
+
"image_caption": [
|
| 1211 |
+
"Figure 3: GDA, OGDA, OMWU, & EG fail to converge to a Nash Equilibrium even in average"
|
| 1212 |
+
],
|
| 1213 |
+
"image_footnote": [],
|
| 1214 |
+
"bbox": [
|
| 1215 |
+
207,
|
| 1216 |
+
450,
|
| 1217 |
+
689,
|
| 1218 |
+
536
|
| 1219 |
+
],
|
| 1220 |
+
"page_idx": 7
|
| 1221 |
+
},
|
| 1222 |
+
{
|
| 1223 |
+
"type": "text",
|
| 1224 |
+
"text": "Figure 3 shows that the average iterates of GDA, OGDA, OMWU, and EG stabilize to points that are not Nash equilibria. We note that since our method (see next subsection) converges locally, the average iterate should converge locally to a Nash equilibrium.",
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
169,
|
| 1227 |
+
575,
|
| 1228 |
+
823,
|
| 1229 |
+
619
|
| 1230 |
+
],
|
| 1231 |
+
"page_idx": 7
|
| 1232 |
+
},
|
| 1233 |
+
{
|
| 1234 |
+
"type": "text",
|
| 1235 |
+
"text": "3.4 OUR PROPOSED METHOD",
|
| 1236 |
+
"text_level": 1,
|
| 1237 |
+
"bbox": [
|
| 1238 |
+
171,
|
| 1239 |
+
636,
|
| 1240 |
+
390,
|
| 1241 |
+
648
|
| 1242 |
+
],
|
| 1243 |
+
"page_idx": 7
|
| 1244 |
+
},
|
| 1245 |
+
{
|
| 1246 |
+
"type": "text",
|
| 1247 |
+
"text": "The aforementioned results prove that the challenging goal of computing two-team zero-sum games calls for an expansion of existing optimization techniques. The mainstay of this effort and our positive result is the KPV-GDA method defined in (KPV-GDA) which is inspired by techniques of adaptive control literature. The first statement we make is that KPV-GDA stabilizes around any Nash equilibrium for appropriate choices of matrices $\\mathbf{K},\\mathbf{P}$ :",
|
| 1248 |
+
"bbox": [
|
| 1249 |
+
169,
|
| 1250 |
+
662,
|
| 1251 |
+
823,
|
| 1252 |
+
734
|
| 1253 |
+
],
|
| 1254 |
+
"page_idx": 7
|
| 1255 |
+
},
|
| 1256 |
+
{
|
| 1257 |
+
"type": "text",
|
| 1258 |
+
"text": "Theorem 3.4 (KPV-GDA stabilizes). Consider a team zero-sum game so that the utility of Team $B$ is $U(\\pmb{x},\\pmb{y})$ and hence the utility of Team $A$ is $-U(\\pmb{x},\\pmb{y})$ and a Nash equilibrium $(\\pmb{x}^*,\\pmb{y}^*)$ of the game. Moreover, we assume",
|
| 1259 |
+
"bbox": [
|
| 1260 |
+
169,
|
| 1261 |
+
736,
|
| 1262 |
+
823,
|
| 1263 |
+
779
|
| 1264 |
+
],
|
| 1265 |
+
"page_idx": 7
|
| 1266 |
+
},
|
| 1267 |
+
{
|
| 1268 |
+
"type": "equation",
|
| 1269 |
+
"text": "\n$$\n\\left( \\begin{array}{l l} - \\nabla_ {\\pmb {x x}} ^ {2} U (\\pmb {x} ^ {*}, \\pmb {y} ^ {*}) & - \\nabla_ {\\pmb {x y}} ^ {2} U (\\pmb {x} ^ {*}, \\pmb {y} ^ {*}) \\\\ \\nabla_ {\\pmb {y x}} ^ {2} U (\\pmb {x} ^ {*}, \\pmb {y} ^ {*}) & \\nabla_ {\\pmb {y y}} ^ {2} U (\\pmb {x} ^ {*}, \\pmb {y} ^ {*}) \\end{array} \\right) i s i n v e r t i b l e.\n$$\n",
|
| 1270 |
+
"text_format": "latex",
|
| 1271 |
+
"bbox": [
|
| 1272 |
+
312,
|
| 1273 |
+
786,
|
| 1274 |
+
684,
|
| 1275 |
+
821
|
| 1276 |
+
],
|
| 1277 |
+
"page_idx": 7
|
| 1278 |
+
},
|
| 1279 |
+
{
|
| 1280 |
+
"type": "text",
|
| 1281 |
+
"text": "For any fixed step size $\\eta > 0$ , we can always find matrices $K, P$ so that KPV-GDA method defined in (KPV-GDA) converges locally to $(\\pmb{x}^{*}, \\pmb{y}^{*})$ .",
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
169,
|
| 1284 |
+
827,
|
| 1285 |
+
826,
|
| 1286 |
+
858
|
| 1287 |
+
],
|
| 1288 |
+
"page_idx": 7
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"type": "text",
|
| 1292 |
+
"text": "This is an existential theorem and cannot be generally useful in practice. Further, this dynamic would not be necessarily uncoupled and the design of matrices $\\mathbf{K}$ and $\\mathbf{P}$ could necessitate knowledge of the NE we are trying to compute. Instead, our next statement provides sufficient conditions under which a simple parametrization of matrices $\\mathbf{K},\\mathbf{P}$ results in an uncoupled, converging dynamic:",
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
169,
|
| 1295 |
+
867,
|
| 1296 |
+
825,
|
| 1297 |
+
925
|
| 1298 |
+
],
|
| 1299 |
+
"page_idx": 7
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"type": "header",
|
| 1303 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
171,
|
| 1306 |
+
32,
|
| 1307 |
+
478,
|
| 1308 |
+
47
|
| 1309 |
+
],
|
| 1310 |
+
"page_idx": 7
|
| 1311 |
+
},
|
| 1312 |
+
{
|
| 1313 |
+
"type": "page_number",
|
| 1314 |
+
"text": "8",
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
493,
|
| 1317 |
+
948,
|
| 1318 |
+
503,
|
| 1319 |
+
959
|
| 1320 |
+
],
|
| 1321 |
+
"page_idx": 7
|
| 1322 |
+
},
|
| 1323 |
+
{
|
| 1324 |
+
"type": "text",
|
| 1325 |
+
"text": "Theorem 3.5. Consider a two-team zero-sum game so that the utility of Team $B$ is $U(\\pmb{x},\\pmb{y})$ , the utility of Team $A$ is $-U(\\pmb{x},\\pmb{y})$ , and a Nash equilibrium $(\\pmb{x}^*,\\pmb{y}^*)$ . Moreover, let",
|
| 1326 |
+
"bbox": [
|
| 1327 |
+
169,
|
| 1328 |
+
103,
|
| 1329 |
+
823,
|
| 1330 |
+
133
|
| 1331 |
+
],
|
| 1332 |
+
"page_idx": 8
|
| 1333 |
+
},
|
| 1334 |
+
{
|
| 1335 |
+
"type": "equation",
|
| 1336 |
+
"text": "\n$$\n\\mathbf {H} := \\left( \\begin{array}{c c} - \\nabla_ {\\boldsymbol {x x}} ^ {2} U (\\boldsymbol {x} ^ {*}, \\boldsymbol {y} ^ {*}) & - \\nabla_ {\\boldsymbol {x y}} ^ {2} U (\\boldsymbol {x} ^ {*}, \\boldsymbol {y} ^ {*}) \\\\ \\nabla_ {\\boldsymbol {y x}} ^ {2} U (\\boldsymbol {x} ^ {*}, \\boldsymbol {y} ^ {*}) & \\nabla_ {\\boldsymbol {y y}} ^ {2} U (\\boldsymbol {x} ^ {*}, \\boldsymbol {y} ^ {*}) \\end{array} \\right).\n$$\n",
|
| 1337 |
+
"text_format": "latex",
|
| 1338 |
+
"bbox": [
|
| 1339 |
+
351,
|
| 1340 |
+
138,
|
| 1341 |
+
643,
|
| 1342 |
+
167
|
| 1343 |
+
],
|
| 1344 |
+
"page_idx": 8
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "text",
|
| 1348 |
+
"text": "and $E$ be the set of eigenvalues $\\rho$ of $\\mathbf{H}$ with real part positive, that is $E = \\{ \\text{Eigenvalues of matrix } \\mathbf{H}, \\rho : Re(\\rho) > 0 \\}$ . We assume that $\\mathbf{H}$ is invertible and moreover",
|
| 1349 |
+
"bbox": [
|
| 1350 |
+
169,
|
| 1351 |
+
172,
|
| 1352 |
+
823,
|
| 1353 |
+
202
|
| 1354 |
+
],
|
| 1355 |
+
"page_idx": 8
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "equation",
|
| 1359 |
+
"text": "\n$$\n\\beta = \\min _ {\\rho \\in E} \\frac {R e (\\rho) ^ {2} + I m (\\rho) ^ {2}}{R e (\\rho)} > \\max _ {\\rho \\in E} R e (\\rho) = \\alpha . \\tag {4}\n$$\n",
|
| 1360 |
+
"text_format": "latex",
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
339,
|
| 1363 |
+
209,
|
| 1364 |
+
823,
|
| 1365 |
+
244
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 8
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "text",
|
| 1371 |
+
"text": "We set $\\mathbf{K} = k\\cdot \\mathbf{I}$ , $\\mathbf{P} = p\\cdot \\mathbf{I}$ . There exist small enough step size $\\eta >0$ and scalar $p > 0$ and for any $k\\in (-\\beta , - \\alpha)$ so that (KPV-GDA) with chosen $\\mathbf{K},\\mathbf{P}$ converges locally to $(\\boldsymbol {x}^{*},\\boldsymbol{y}^{*})$",
|
| 1372 |
+
"bbox": [
|
| 1373 |
+
169,
|
| 1374 |
+
250,
|
| 1375 |
+
823,
|
| 1376 |
+
280
|
| 1377 |
+
],
|
| 1378 |
+
"page_idx": 8
|
| 1379 |
+
},
|
| 1380 |
+
{
|
| 1381 |
+
"type": "text",
|
| 1382 |
+
"text": "4 EXPERIMENTS",
|
| 1383 |
+
"text_level": 1,
|
| 1384 |
+
"bbox": [
|
| 1385 |
+
171,
|
| 1386 |
+
299,
|
| 1387 |
+
326,
|
| 1388 |
+
313
|
| 1389 |
+
],
|
| 1390 |
+
"page_idx": 8
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "text",
|
| 1394 |
+
"text": "In this section, we perform a series of experiments to further motivate the study of two-team zero-sum games, especially in the context of multi-agent generative adversarial networks (multi-GANs). A multi-agent generative adversarial network (multi-GAN) (Arora et al., 2017; Hoang et al., 2017; Zhang et al., 2018; Tang, 2020; Hardy et al., 2019; Albuquerque et al., 2019) is a generative adversarial network (GAN) that leverages multiple \"agents\" (generators and/or discriminators) in order to achieve statistical and computational benefits. In particular, Arora et al. formally proved the expressive superiority of multi-generator adversarial network architectures something that we empirically verify in Section 4. In this direction, researchers strive to harness the efficacy of distributed processing by utilizing shallower networks that can collectively learn more diverse datasets<sup>4</sup>.",
|
| 1395 |
+
"bbox": [
|
| 1396 |
+
169,
|
| 1397 |
+
330,
|
| 1398 |
+
823,
|
| 1399 |
+
455
|
| 1400 |
+
],
|
| 1401 |
+
"page_idx": 8
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "text",
|
| 1405 |
+
"text": "At first, the superiority of multi-GANs might appear to contrast our theoretical findings; but in reality, the superiority comes from the quality of solutions that are attainable from multi-agent architectures (expressivity) and the fact that hardness (complexity) translates to rates of convergence but not non-convergence. Single agent GANs quickly converge to critical points that are not guaranteed to capture the distribution very well. In figure 4 we see the fast convergence of a single-agent GAN to solutions of bad quality versus the convergence of a multi-GAN to an obviously better solution. Due to space constraints, we defer further discussion of the experiments at Section D.1.",
|
| 1406 |
+
"bbox": [
|
| 1407 |
+
169,
|
| 1408 |
+
462,
|
| 1409 |
+
823,
|
| 1410 |
+
561
|
| 1411 |
+
],
|
| 1412 |
+
"page_idx": 8
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "image",
|
| 1416 |
+
"img_path": "images/edd74f1ec4609a57f05699cfd6d51519a0675419361994dd350943352b7d56b6.jpg",
|
| 1417 |
+
"image_caption": [
|
| 1418 |
+
"Figure 4: From left to right: (i) Each generator of MGAN learns one mode of the mixture of 8 gaussians, (ii) Mode Collapse of single-agent GANs, (iii) Single-agent GAN can't discriminate between the modes."
|
| 1419 |
+
],
|
| 1420 |
+
"image_footnote": [],
|
| 1421 |
+
"bbox": [
|
| 1422 |
+
284,
|
| 1423 |
+
570,
|
| 1424 |
+
746,
|
| 1425 |
+
642
|
| 1426 |
+
],
|
| 1427 |
+
"page_idx": 8
|
| 1428 |
+
},
|
| 1429 |
+
{
|
| 1430 |
+
"type": "text",
|
| 1431 |
+
"text": "5 CONCLUSIONS AND OPEN PROBLEMS",
|
| 1432 |
+
"text_level": 1,
|
| 1433 |
+
"bbox": [
|
| 1434 |
+
171,
|
| 1435 |
+
703,
|
| 1436 |
+
519,
|
| 1437 |
+
719
|
| 1438 |
+
],
|
| 1439 |
+
"page_idx": 8
|
| 1440 |
+
},
|
| 1441 |
+
{
|
| 1442 |
+
"type": "text",
|
| 1443 |
+
"text": "In this work we study the wide class of nonconvex-nonconcave games that express the two-team competition, inspired broadly by the structure of the complex competition between multi-agent generators and discriminators in GAN's. Furthermore, in this setting of two-team zero-sum games, we have presented a number of negative results about the problem of computing a Nash equilibrium. Moreover, through a simple family of games that we construct, we prove the inability of commonly used methods for min-max optimization such as GDA, OGDA, OMWU, and EG to converge both in average and in the last iterate to Nash Equilibria which comes to a stark contrast to recent literature that is concerned with simpler games. We have also presented an optimization method (called KPV-GDA) that manages to stabilize around Nash equilibria.",
|
| 1444 |
+
"bbox": [
|
| 1445 |
+
169,
|
| 1446 |
+
734,
|
| 1447 |
+
823,
|
| 1448 |
+
861
|
| 1449 |
+
],
|
| 1450 |
+
"page_idx": 8
|
| 1451 |
+
},
|
| 1452 |
+
{
|
| 1453 |
+
"type": "header",
|
| 1454 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1455 |
+
"bbox": [
|
| 1456 |
+
171,
|
| 1457 |
+
32,
|
| 1458 |
+
478,
|
| 1459 |
+
47
|
| 1460 |
+
],
|
| 1461 |
+
"page_idx": 8
|
| 1462 |
+
},
|
| 1463 |
+
{
|
| 1464 |
+
"type": "page_footnote",
|
| 1465 |
+
"text": "<sup>3</sup>As long as aforementioned conditions are satisfied, (KPV-GDA) locally converges in any nonconvex-nonconcave game. Indeed, GMP with any $\\omega$ satisfies the sufficient conditions of 3.5. See also, Appendix B.7.",
|
| 1466 |
+
"bbox": [
|
| 1467 |
+
169,
|
| 1468 |
+
869,
|
| 1469 |
+
823,
|
| 1470 |
+
897
|
| 1471 |
+
],
|
| 1472 |
+
"page_idx": 8
|
| 1473 |
+
},
|
| 1474 |
+
{
|
| 1475 |
+
"type": "page_footnote",
|
| 1476 |
+
"text": "Indeed, it is preferable from a computational standpoint to back-propagate through two equally sized neural networks rather than through a single one that would be twice as deep (Tang, 2020).",
|
| 1477 |
+
"bbox": [
|
| 1478 |
+
169,
|
| 1479 |
+
896,
|
| 1480 |
+
823,
|
| 1481 |
+
924
|
| 1482 |
+
],
|
| 1483 |
+
"page_idx": 8
|
| 1484 |
+
},
|
| 1485 |
+
{
|
| 1486 |
+
"type": "page_number",
|
| 1487 |
+
"text": "9",
|
| 1488 |
+
"bbox": [
|
| 1489 |
+
493,
|
| 1490 |
+
948,
|
| 1491 |
+
503,
|
| 1492 |
+
959
|
| 1493 |
+
],
|
| 1494 |
+
"page_idx": 8
|
| 1495 |
+
},
|
| 1496 |
+
{
|
| 1497 |
+
"type": "text",
|
| 1498 |
+
"text": "ACKNOWLEDGEMENTS",
|
| 1499 |
+
"text_level": 1,
|
| 1500 |
+
"bbox": [
|
| 1501 |
+
171,
|
| 1502 |
+
102,
|
| 1503 |
+
369,
|
| 1504 |
+
118
|
| 1505 |
+
],
|
| 1506 |
+
"page_idx": 9
|
| 1507 |
+
},
|
| 1508 |
+
{
|
| 1509 |
+
"type": "text",
|
| 1510 |
+
"text": "Ioannis Panageas would like to acknowledge a start-up grant. Emmanouil V. VG is grateful for the financial support by FODSI Postdoctoral Fellowship. This work was partially completed while IP and EVVG were visiting research fellows at the Simons Institute for Theory of Computing during Learning and Games Semester.",
|
| 1511 |
+
"bbox": [
|
| 1512 |
+
171,
|
| 1513 |
+
132,
|
| 1514 |
+
826,
|
| 1515 |
+
191
|
| 1516 |
+
],
|
| 1517 |
+
"page_idx": 9
|
| 1518 |
+
},
|
| 1519 |
+
{
|
| 1520 |
+
"type": "text",
|
| 1521 |
+
"text": "REFERENCES",
|
| 1522 |
+
"text_level": 1,
|
| 1523 |
+
"bbox": [
|
| 1524 |
+
173,
|
| 1525 |
+
209,
|
| 1526 |
+
287,
|
| 1527 |
+
224
|
| 1528 |
+
],
|
| 1529 |
+
"page_idx": 9
|
| 1530 |
+
},
|
| 1531 |
+
{
|
| 1532 |
+
"type": "list",
|
| 1533 |
+
"sub_type": "ref_text",
|
| 1534 |
+
"list_items": [
|
| 1535 |
+
"Isabela Albuquerque, João Monteiro, Thang Doan, Breandan Considine, Tiago Falk, and Ioannis Mitliagkas. Multi-objective training of generative adversarial networks with multiple discriminators. In International Conference on Machine Learning, pp. 202-211. PMLR, 2019.",
|
| 1536 |
+
"Ioannis Anagnostides, Gabriele Farina, Christian Kroer, Andrea Celli, and Tuomas Sandholm. Faster no-regret learning dynamics for extensive-form correlated and coarse correlated equilibria. arXiv preprint arXiv:2202.05446, 2022a.",
|
| 1537 |
+
"Ioannis Anagnostides, Ioannis Panageas, Gabriele Farina, and Tuomas Sandholm. On last-iterate convergence beyond zero-sum games. arXiv preprint arXiv:2203.12056, 2022b.",
|
| 1538 |
+
"Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 214-223, 2017.",
|
| 1539 |
+
"Sanjeev Arora, Elad Hazan, and Satyen Kale. The multiplicative weights update method: a meta-algorithm and applications. Theory of Computing, 8(1):121-164, 2012.",
|
| 1540 |
+
"Sanjeev Arora, Rong Ge, Yingyu Liang, Tengyu Ma, and Yi Zhang. Generalization and equilibrium in generative adversarial nets (gans). In International Conference on Machine Learning, pp. 224-232. PMLR, 2017.",
|
| 1541 |
+
"Moshe Babaioff, Robert Kleinberg, and Christos H Papadimitriou. Congestion games with malicious players. Games and Economic Behavior, 67(1):22-35, 2009.",
|
| 1542 |
+
"Yakov Babichenko and Aviad Rubinstein. Settling the complexity of nash equilibrium in congestion games. In Samir Khuller and Virginia Vassilevska Williams (eds.), STOC '21: 53rd Annual ACM SIGACT Symposium on Theory of Computing, Virtual Event, Italy, June 21-25, 2021, pp. 1426-1437. ACM, 2021.",
|
| 1543 |
+
"Michael Bacharach. Interactive team reasoning: A contribution to the theory of co-operation. Research in Economics, 53(2):117-147, 1999. ISSN 1090-9443. doi: https://doi.org/10.1006/reec.1999.0188. URL https://www.sciencedirect.com/science/article/pii/S1090944399901886.",
|
| 1544 |
+
"Nicola Basilico, Andrea Celli, Giuseppe De Nittis, and Nicola Gatti. Computing the team-maxmin equilibrium in single-team single-adversary team games. Intelligenza Artificiale, 11(1):67-79, 2017a.",
|
| 1545 |
+
"Nicola Basilico, Andrea Celli, Giuseppe De Nittis, and Nicola Gatti. Team-maxmin equilibrium: efficiency bounds and algorithms. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 31, 2017b.",
|
| 1546 |
+
"A. S. Bazanella, P. V. Kokotovic, and A. S. e Silva. On the control of dynamic systems with unknown operating point. In 1997 European Control Conference (ECC), pp. 3434-3439, 1997. doi: 10.23919/ECC.1997.7082644.",
|
| 1547 |
+
"Alexandre S. Bazanella, Petar V. Kokotovic, and Aguinaldo S. E Silva. On the control of dynamic systems with unknown operating point. International Journal of Control, 73(7): 600-605, 2000. doi: 10.1080/002071700219443. URL https://doi.org/10.1080/ 002071700219443.",
|
| 1548 |
+
"Kimmo Berg and Tuomas Sandholm. Exclusion method for finding nash equilibrium in multiplayer games. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 31, 2017."
|
| 1549 |
+
],
|
| 1550 |
+
"bbox": [
|
| 1551 |
+
171,
|
| 1552 |
+
232,
|
| 1553 |
+
825,
|
| 1554 |
+
924
|
| 1555 |
+
],
|
| 1556 |
+
"page_idx": 9
|
| 1557 |
+
},
|
| 1558 |
+
{
|
| 1559 |
+
"type": "header",
|
| 1560 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1561 |
+
"bbox": [
|
| 1562 |
+
171,
|
| 1563 |
+
32,
|
| 1564 |
+
478,
|
| 1565 |
+
47
|
| 1566 |
+
],
|
| 1567 |
+
"page_idx": 9
|
| 1568 |
+
},
|
| 1569 |
+
{
|
| 1570 |
+
"type": "page_number",
|
| 1571 |
+
"text": "10",
|
| 1572 |
+
"bbox": [
|
| 1573 |
+
490,
|
| 1574 |
+
946,
|
| 1575 |
+
509,
|
| 1576 |
+
960
|
| 1577 |
+
],
|
| 1578 |
+
"page_idx": 9
|
| 1579 |
+
},
|
| 1580 |
+
{
|
| 1581 |
+
"type": "list",
|
| 1582 |
+
"sub_type": "ref_text",
|
| 1583 |
+
"list_items": [
|
| 1584 |
+
"David Blackwell. An analog of the minimax theorem for vector payoffs. Pacific Journal of Mathematics, 6(1):1-8, 1956.",
|
| 1585 |
+
"Christian Borgs, Jennifer Chayes, Nicole Immorlica, Adam Tauman Kalai, Vahab Mirrokni, and Christos Papadimitriou. The myth of the folk theorem. In Proceedings of the fortieth annual ACM symposium on Theory of computing, pp. 365-372, 2008.",
|
| 1586 |
+
"Michael Bowling, Neil Burch, Michael Johanson, and Oskari Tammelin. Heads-up limit hold'em poker is solved. Science, 347(6218):145-149, 2015.",
|
| 1587 |
+
"Noam Brown and Tuomas Sandholm. Superhuman ai for heads-up no-limit poker: Libratus beats top professionals. Science, 359(6374):418-424, 2018.",
|
| 1588 |
+
"Federico Cacciamani, Andrea Celli, Marco Ciccone, and Nicola Gatti. Multi-agent coordination in adversarial environments through signal mediated strategies. In Proceedings of the 20th International Conference on Autonomous Agents and MultiAgent Systems, pp. 269-278, 2021.",
|
| 1589 |
+
"Yongcan Cao, Wenwu Yu, Wei Ren, and Guanrong Chen. An overview of recent progress in the study of distributed multi-agent coordination. IEEE Transactions on Industrial informatics, 9(1): 427-438, 2012.",
|
| 1590 |
+
"Andrea Celli and Nicola Gatti. Computational results for extensive-form adversarial team games. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018.",
|
| 1591 |
+
"N. Cesa-Bianchi and G. Lugosi. On prediction of individual sequences. Annals of Statistics, pp. 1865-1895, 1999.",
|
| 1592 |
+
"Chao-Kai Chiang, Tianbao Yang, Chia-Jung Lee, Mehrdad Mahdavi, Chi-Jen Lu, Rong Jin, and Shenghuo Zhu. Online optimization with gradual variations. In Conference on Learning Theory, pp. 6-1. JMLR Workshop and Conference Proceedings, 2012.",
|
| 1593 |
+
"Constantinos Daskalakis and Ioannis Panageas. The limit points of (optimistic) gradient descent in min-max optimization. Advances in Neural Information Processing Systems, 31, 2018.",
|
| 1594 |
+
"Constantinos Daskalakis and Ioannis Panageas. Last-iterate convergence: Zero-sum games and constrained min-max optimization. Innovations in Theoretical Computer Science, 2019.",
|
| 1595 |
+
"Constantinos Daskalakis and Christos Papadimitriou. Continuous local search. In Proceedings of the twenty-second annual ACM-SIAM symposium on Discrete Algorithms, pp. 790-804. SIAM, 2011.",
|
| 1596 |
+
"Constantinos Daskalakis, Alex Fabrikant, and Christos H Papadimitriou. The game world is flat: The complexity of nash equilibria in succinct games. In International Colloquium on Automata, Languages, and Programming, pp. 513-524. Springer, 2006.",
|
| 1597 |
+
"Constantinos Daskalakis, Paul W Goldberg, and Christos H Papadimitriou. The complexity of computing a nash equilibrium. SIAM Journal on Computing, 39(1):195-259, 2009.",
|
| 1598 |
+
"Constantinos Daskalakis, Alan Deckelbaum, and Anthony Kim. Near-optimal no-regret algorithms for zero-sum games. In Proceedings of the twenty-second annual ACM-SIAM symposium on Discrete Algorithms, pp. 235-254. SIAM, 2011.",
|
| 1599 |
+
"Constantinos Daskalakis, Stratis Skoulakis, and Manolis Zampetakis. The complexity of constrained min-max optimization. In Proceedings of the 53rd Annual ACM SIGACT Symposium on Theory of Computing, pp. 1466-1478, 2021.",
|
| 1600 |
+
"Jelena Diakonikolas, Constantinos Daskalakis, and Michael I Jordan. Efficient methods for structured nonconvex-nonconcave min-max optimization. In International Conference on Artificial Intelligence and Statistics, pp. 2746-2754. PMLR, 2021.",
|
| 1601 |
+
"Ishan Durugkar, Ian Gemp, and Sridhar Mahadevan. Generative multi-adversarial networks. arXiv preprint arXiv:1611.01673, 2016."
|
| 1602 |
+
],
|
| 1603 |
+
"bbox": [
|
| 1604 |
+
171,
|
| 1605 |
+
102,
|
| 1606 |
+
825,
|
| 1607 |
+
924
|
| 1608 |
+
],
|
| 1609 |
+
"page_idx": 10
|
| 1610 |
+
},
|
| 1611 |
+
{
|
| 1612 |
+
"type": "header",
|
| 1613 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1614 |
+
"bbox": [
|
| 1615 |
+
171,
|
| 1616 |
+
32,
|
| 1617 |
+
478,
|
| 1618 |
+
47
|
| 1619 |
+
],
|
| 1620 |
+
"page_idx": 10
|
| 1621 |
+
},
|
| 1622 |
+
{
|
| 1623 |
+
"type": "page_number",
|
| 1624 |
+
"text": "11",
|
| 1625 |
+
"bbox": [
|
| 1626 |
+
490,
|
| 1627 |
+
946,
|
| 1628 |
+
506,
|
| 1629 |
+
959
|
| 1630 |
+
],
|
| 1631 |
+
"page_idx": 10
|
| 1632 |
+
},
|
| 1633 |
+
{
|
| 1634 |
+
"type": "list",
|
| 1635 |
+
"sub_type": "ref_text",
|
| 1636 |
+
"list_items": [
|
| 1637 |
+
"Alex Fabrikant, Christos Papadimitriou, and Kunal Talwar. The complexity of pure nash equilibria. In Proceedings of the thirty-sixth annual ACM symposium on Theory of computing, pp. 604-612, 2004.",
|
| 1638 |
+
"Gabriele Farina, Andrea Celli, Nicola Gatti, and Tuomas Sandholm. Ex ante coordination and collusion in zero-sum multi-player extensive-form games. Advances in Neural Information Processing Systems, 31, 2018.",
|
| 1639 |
+
"John Fearnley, Paul W Goldberg, Alexandros Hollender, and Rahul Savani. The complexity of gradient descent: CLS = PPAD∩PLS. In Proceedings of the 53rd Annual ACM SIGACT Symposium on Theory of Computing, pp. 46-59, 2021.",
|
| 1640 |
+
"Lampros Flokas, Emmanouil-Vasileios Vlatakis-Gkaragkounis, and Georgios Piliouras. Solving min-max optimization with hidden structure via gradient descent ascent. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 2021, virtual, 2021.",
|
| 1641 |
+
"Drew Fudenberg. Jean tirole game theory, 1991.",
|
| 1642 |
+
"Ian Gemp, Rahul Savani, Marc Lanctot, Yoram Bachrach, Thomas Anthony, Richard Everett, Andrea Tacchetti, Tom Eccles, and János Kramár. Sample-based approximation of nash in large many-player games via gradient descent. arXiv preprint arXiv:2106.01285, 2021.",
|
| 1643 |
+
"Natalie Gold. Introduction: Teamwork in theory and in practice. In Teamwork, pp. 1-21. Springer, 2005.",
|
| 1644 |
+
"Noah Golowich, Sarath Pattathil, and Constantinos Daskalakis. Tight last-iterate convergence rates for no-regret learning in multi-player games. Advances in neural information processing systems, 33:20766-20778, 2020.",
|
| 1645 |
+
"Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron C. Courville, and Yoshua Bengio. Generative Adversarial Nets. In Advances in Neural Information Processing Systems 27: Annual Conference on Neural Information Processing Systems 2014, December 8-13 2014, Montreal, Quebec, Canada, pp. 2672-2680, 2014.",
|
| 1646 |
+
"Hans W Gottinger. J. marschak and roy radner,\"economic theory of teams\"(book review).Theory and Decision,5(3):349,1974.",
|
| 1647 |
+
"James Hannan. 4. approximation to rayes risk in repeated play. In Contributions to the Theory of Games (AM-39), Volume III, pp. 97-140. Princeton University Press, 2016.",
|
| 1648 |
+
"Coretin Hardy, Erwan Le Merrer, and Bruno Sericola. Md-gan: Multi-discriminator generative adversarial networks for distributed datasets. In 2019 IEEE international parallel and distributed processing symposium (IPDPS), pp. 866-877. IEEE, 2019.",
|
| 1649 |
+
"Munther A Hassouneh, Hsien-Chiarn Lee, and Eyad H Abed. Washout filters in feedback control: Benefits, limitations and extensions. In Proceedings of the 2004 American control conference, volume 5, pp. 3950-3955. IEEE, 2004.",
|
| 1650 |
+
"Quan Hoang, Tu Dinh Nguyen, Trung Le, and Dinh Phung. Multi-generator generative adversarial nets. arXiv preprint arXiv:1708.02556, 2017.",
|
| 1651 |
+
"Quan Hoang, Tu Dinh Nguyen, Trung Le, and Dinh Phung. Mgan: Training generative adversarial nets with multiple generators. In International conference on learning representations, 2018.",
|
| 1652 |
+
"Max Jaderberg, Wojciech M Czarnecki, Iain Dunning, Luke Marris, Guy Lever, Antonio Garcia Castaneda, Charles Beattie, Neil C Rabinowitz, Ari S Morcos, Avraham Ruderman, et al. Human-level performance in 3d multiplayer games with population-based reinforcement learning. Science, 364(6443):859-865, 2019.",
|
| 1653 |
+
"Jeongbin Kim, Thomas R Palfrey, and Jeffrey R Zeidel. A theory of games played by teams of players. 2019."
|
| 1654 |
+
],
|
| 1655 |
+
"bbox": [
|
| 1656 |
+
171,
|
| 1657 |
+
102,
|
| 1658 |
+
826,
|
| 1659 |
+
924
|
| 1660 |
+
],
|
| 1661 |
+
"page_idx": 11
|
| 1662 |
+
},
|
| 1663 |
+
{
|
| 1664 |
+
"type": "header",
|
| 1665 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1666 |
+
"bbox": [
|
| 1667 |
+
171,
|
| 1668 |
+
32,
|
| 1669 |
+
478,
|
| 1670 |
+
47
|
| 1671 |
+
],
|
| 1672 |
+
"page_idx": 11
|
| 1673 |
+
},
|
| 1674 |
+
{
|
| 1675 |
+
"type": "page_number",
|
| 1676 |
+
"text": "12",
|
| 1677 |
+
"bbox": [
|
| 1678 |
+
490,
|
| 1679 |
+
946,
|
| 1680 |
+
508,
|
| 1681 |
+
959
|
| 1682 |
+
],
|
| 1683 |
+
"page_idx": 11
|
| 1684 |
+
},
|
| 1685 |
+
{
|
| 1686 |
+
"type": "list",
|
| 1687 |
+
"sub_type": "ref_text",
|
| 1688 |
+
"list_items": [
|
| 1689 |
+
"R. Kleinberg, G. Piliouras, and E. Tardos. Multiplicative updates outperform generic no-regret learning in congestion games. In STOC, 2009.",
|
| 1690 |
+
"GM Korpelevich. The extragradient method for finding saddle points and other problems. Matecon, 12:747-756, 1976.",
|
| 1691 |
+
"Jason D. Lee, Ioannis Panageas, Georgios Piliouras, Max Simchowitz, Michael I. Jordan, and Benjamin Recht. First-order methods almost always avoid strict saddle points. Math. Program., 176(1-2):311-337, 2019. doi: 10.1007/s10107-019-01374-3. URL https://doi.org/10.1007/s10107-019-01374-3.",
|
| 1692 |
+
"Joel Z Leibo, Vinicius Zambaldi, Marc Lanctot, Janusz Marecki, and Thore Graepel. Multi-agent reinforcement learning in sequential social dilemmas. arXiv preprint arXiv:1702.03037, 2017.",
|
| 1693 |
+
"Dan Li, Dacheng Chen, Baihong Jin, Lei Shi, Jonathan Goh, and See-Kiong Ng. Mad-gan: Multivariate anomaly detection for time series data with generative adversarial networks. In International Conference on Artificial Neural Networks, pp. 703-716. Springer, 2019.",
|
| 1694 |
+
"Tianyi Lin, Chi Jin, and Michael Jordan. On gradient descent ascent for nonconvex-concave minimax problems. In International Conference on Machine Learning, pp. 6083-6093. PMLR, 2020.",
|
| 1695 |
+
"Richard J Lipton, Evangelos Markakis, and Aranyak Mehta. Playing large games using simple strategies. In Proceedings of the 4th ACM Conference on Electronic Commerce, pp. 36-41, 2003.",
|
| 1696 |
+
"Jakob Marschak. Elements for a theory of teams. Management science, 1(2):127-137, 1955.",
|
| 1697 |
+
"H Brendan McMahan, Geoffrey J Gordon, and Avrim Blum. Planning in the presence of cost functions controlled by an adversary. In Proceedings of the 20th International Conference on Machine Learning (ICML-03), pp. 536-543, 2003.",
|
| 1698 |
+
"R. Mehta, I. Panageas, and G. Piliouras. Natural selection as an inhibitor of genetic diversity: Multiplicative weights updates algorithm and a conjecture of haploid genetics. In ITCS, 2015.",
|
| 1699 |
+
"Panayotis Mertikopoulos, Christos Papadimitriou, and Georgios Piliouras. Cycles in adversarial regularized learning. In Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, pp. 2703-2717. SIAM, 2018.",
|
| 1700 |
+
"Panayotis Mertikopoulos, Houssam Zenati, Bruno Lecouat, Chuan-Sheng Foo, Vijay Chandrasekhar, and Georgios Piliouras. Optimistic mirror descent in saddle-point problems: Going the extra (gradient) mile. In ICLR'19-International Conference on Learning Representations, pp. 1-23, 2019.",
|
| 1701 |
+
"Dov Monderer and Lloyd S Shapley. Potential games. Games and economic behavior, 14(1):124-143, 1996.",
|
| 1702 |
+
"Matej Moravcik, Martin Schmid, Neil Burch, Viliam Lisy, Dustin Morrill, Nolan Bard, Trevor Davis, Kevin Waugh, Michael Johanson, and Michael Bowling. Deepstack: Expert-level artificial intelligence in heads-up no-limit poker. Science, 356(6337):508-513, 2017.",
|
| 1703 |
+
"Mohammad Sal Moslehian. Ky fan inequalities. CoRR, abs/1108.1467, 2011.",
|
| 1704 |
+
"Thomas Nagylaki. The evolution of multilocus systems under weak selection. Genetics, 134(2): 627-647, 1993.",
|
| 1705 |
+
"Arkadj Semenovič Nemirovskij and David Borisovich Yudin. Problem complexity and method efficiency in optimization. 1983.",
|
| 1706 |
+
"Martin A Nowak, Akira Sasaki, Christine Taylor, and Drew Fudenberg. Emergence of cooperation and evolutionary stability in finite populations. Nature, 428(6983):646-650, 2004.",
|
| 1707 |
+
"Christos H Papadimitriou. The complexity of the lin-kernighan heuristic for the traveling salesman problem. SIAM Journal on Computing, 21(3):450-465, 1992.",
|
| 1708 |
+
"Leonid Denisovich Popov. A modification of the arrow-hurwicz method for search of saddle points. Mathematical notes of the Academy of Sciences of the USSR, 28(5):845-848, 1980."
|
| 1709 |
+
],
|
| 1710 |
+
"bbox": [
|
| 1711 |
+
171,
|
| 1712 |
+
102,
|
| 1713 |
+
825,
|
| 1714 |
+
924
|
| 1715 |
+
],
|
| 1716 |
+
"page_idx": 12
|
| 1717 |
+
},
|
| 1718 |
+
{
|
| 1719 |
+
"type": "header",
|
| 1720 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1721 |
+
"bbox": [
|
| 1722 |
+
171,
|
| 1723 |
+
32,
|
| 1724 |
+
478,
|
| 1725 |
+
47
|
| 1726 |
+
],
|
| 1727 |
+
"page_idx": 12
|
| 1728 |
+
},
|
| 1729 |
+
{
|
| 1730 |
+
"type": "page_number",
|
| 1731 |
+
"text": "13",
|
| 1732 |
+
"bbox": [
|
| 1733 |
+
490,
|
| 1734 |
+
946,
|
| 1735 |
+
508,
|
| 1736 |
+
959
|
| 1737 |
+
],
|
| 1738 |
+
"page_idx": 12
|
| 1739 |
+
},
|
| 1740 |
+
{
|
| 1741 |
+
"type": "list",
|
| 1742 |
+
"sub_type": "ref_text",
|
| 1743 |
+
"list_items": [
|
| 1744 |
+
"Julia Robinson. An iterative method of solving a game. Annals of mathematics, pp. 296-301, 1951.",
|
| 1745 |
+
"R.W. Rosenthal. A class of games possessing pure-strategy Nash equilibria. International Journal of Game Theory, 2(1):65-67, 1973.",
|
| 1746 |
+
"Tim Roughgarden. Intrinsic robustness of the price of anarchy. In Proceedings of the forty-first annual ACM symposium on Theory of computing, pp. 513-522, 2009.",
|
| 1747 |
+
"Leonard J. Schulman and Umesh V. Vazirani. The duality gap for two-team zero-sum games. Games Econ. Behav., 115:336-345, 2019a. doi: 10.1016/j.geb.2019.03.011. URL https://doi.org/10.1016/j.geb.2019.03.011.",
|
| 1748 |
+
"Leonard J Schulman and Umesh V Vazirani. The duality gap for two-team zero-sum games. Games and Economic Behavior, 115:336-345, 2019b.",
|
| 1749 |
+
"Hassam Ullah Sheikh, Mina Razghandi, and Ladislau Boloni. Learning distributed cooperative policies for security games via deep reinforcement learning. In 2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC), volume 1, pp. 489-494. IEEE, 2019.",
|
| 1750 |
+
"Karthik Sridharan. Learning from an optimization viewpoint. arXiv preprint arXiv:1204.4145, 2012.",
|
| 1751 |
+
"Karthik Sridharan and Ambuj Tewari. Convex games in banach spaces. $COLT$ 2010, pp. 1, 2010.",
|
| 1752 |
+
"Vasilis Syrgkanis, Alekh Agarwal, Haipeng Luo, and Robert E Schapire. Fast convergence of regularized learning in games. In Advances in Neural Information Processing Systems, pp. 2989-2997, 2015.",
|
| 1753 |
+
"Shichang Tang. Lessons learned from the training of gans on artificial datasets. IEEE Access, 8: 165044-165055, 2020.",
|
| 1754 |
+
"Liang Tong, Sixie Yu, Scott Alfeld, et al. Adversarial regression with multiple learners. In International Conference on Machine Learning, pp. 4946-4954. PMLR, 2018.",
|
| 1755 |
+
"Oriol Vinyals, Igor Babuschkin, Wojciech M Czarnecki, Michael Mathieu, Andrew Dudzik, Junyoung Chung, David H Choi, Richard Powell, Timo Ewalds, Petko Georgiev, et al. Grandmaster level in starcraft ii using multi-agent reinforcement learning. Nature, 575(7782):350-354, 2019.",
|
| 1756 |
+
"Yannick Viossat and Andriy Zapechelyuk. No-regret dynamics and fictitious play. Journal of Economic Theory, 148(2):825-842, 2013.",
|
| 1757 |
+
"Emmanouil-Vasileios Vlatakis-Gkaragkounis, Lampros Flokas, and Georgios Piliouras. Poincaré recurrence, cycles and spurious equilibria in gradient-descent-ascent for non-convex nonconcave zero-sum games. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 10450-10461, 2019. URL https://proceedings.neurips.cc/paper/2019/ hash/6c7cd904122e623ce625613d6af337c4-Abstract.html.",
|
| 1758 |
+
"John Von Neumann. Zur theorie der gesellschaftsspiele. Math, 100:295-320, 1928.",
|
| 1759 |
+
"Bernhard von Stengel and Daphne Koller. Team-maxmin equilibria. Games and Economic Behavior, 21(1-2):309-321, 1997.",
|
| 1760 |
+
"Hongyang Zhang, Susu Xu, Jiantao Jiao, Pengtao Xie, Ruslan Salakhutdinov, and Eric P Xing. Stackelberg gan: Towards provable minimax equilibrium via multi-generator architectures. arXiv preprint arXiv:1811.08010, 2018.",
|
| 1761 |
+
"Youzhi Zhang and Bo An. Converging to team-maxmin equilibria in zero-sum multiplayer games. In International Conference on Machine Learning, pp. 11033-11043. PMLR, 2020.",
|
| 1762 |
+
"Youzhi Zhang, Bo An, and Jakub Černý. Computing ex ante coordinated team-maxmin equilibria in zero-sum multiplayer extensive-form games. arXiv preprint arXiv:2009.12629, 2020.",
|
| 1763 |
+
"Dao Li Zhu and Patrice Marcotte. Co-coercivity and its role in the convergence of iterative schemes for solving variational inequalities. SIAM Journal on Optimization, 6(3):714-726, 1996."
|
| 1764 |
+
],
|
| 1765 |
+
"bbox": [
|
| 1766 |
+
171,
|
| 1767 |
+
102,
|
| 1768 |
+
826,
|
| 1769 |
+
925
|
| 1770 |
+
],
|
| 1771 |
+
"page_idx": 13
|
| 1772 |
+
},
|
| 1773 |
+
{
|
| 1774 |
+
"type": "header",
|
| 1775 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1776 |
+
"bbox": [
|
| 1777 |
+
171,
|
| 1778 |
+
32,
|
| 1779 |
+
478,
|
| 1780 |
+
47
|
| 1781 |
+
],
|
| 1782 |
+
"page_idx": 13
|
| 1783 |
+
},
|
| 1784 |
+
{
|
| 1785 |
+
"type": "page_number",
|
| 1786 |
+
"text": "14",
|
| 1787 |
+
"bbox": [
|
| 1788 |
+
490,
|
| 1789 |
+
946,
|
| 1790 |
+
508,
|
| 1791 |
+
959
|
| 1792 |
+
],
|
| 1793 |
+
"page_idx": 13
|
| 1794 |
+
}
|
| 1795 |
+
]
|
2023/Towards convergence to Nash equilibria in two-team zero-sum games/fca07eac-706b-41aa-8183-c888af669e12_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards convergence to Nash equilibria in two-team zero-sum games/fca07eac-706b-41aa-8183-c888af669e12_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc52755506ca5223ad2d40aa1f825ac32c824301bff31c5c2dc0c6b53a152396
|
| 3 |
+
size 2745110
|
2023/Towards convergence to Nash equilibria in two-team zero-sum games/full.md
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TOWARDS CONVERGENCE TO NASH EQUILIBRIA IN TWO-TEAM ZERO-SUM GAMES
|
| 2 |
+
|
| 3 |
+
Fivos Kalogiannis
|
| 4 |
+
UC Irvine
|
| 5 |
+
|
| 6 |
+
Ioannis Panageas
|
| 7 |
+
UC Irvine
|
| 8 |
+
|
| 9 |
+
Emmanouil V. Vlatakis-Gkaragkounis
|
| 10 |
+
Columbia University
|
| 11 |
+
|
| 12 |
+
# ABSTRACT
|
| 13 |
+
|
| 14 |
+
Contemporary applications of machine learning in two-team e-sports and the superior expressivity of multi-agent generative adversarial networks raise important and overlooked theoretical questions regarding optimization in two-team games. Formally, two-team zero-sum games are defined as multi-player games where players are split into two competing sets of agents, each experiencing a utility identical to that of their teammates and opposite to that of the opposing team. We focus on the solution concept of Nash equilibria (NE). We first show that computing NE for this class of games is hard for the complexity class CLS. To further examine the capabilities of online learning algorithms in games with full-information feedback, we propose a benchmark of a simple -yet nontrivial-- family of such games. These games do not enjoy the properties used to prove convergence for relevant algorithms. In particular, we use a dynamical systems perspective to demonstrate that gradient descent-ascent, its optimistic variant, optimistic multiplicative weights update, and extra gradient fail to converge (even locally) to a Nash equilibrium. On a brighter note, we propose a first-order method that leverages control theory techniques and under some conditions enjoys last-iterate local convergence to a Nash equilibrium. We also believe our proposed method is of independent interest for general min-max optimization.
|
| 15 |
+
|
| 16 |
+
# 1 INTRODUCTION
|
| 17 |
+
|
| 18 |
+
Online learning shares an enduring relationship with game theory that has a very early onset dating back to the analysis of fictitious play by (Robinson, 1951) and Blackwell's approachability theorem (Blackwell, 1956). A key question within this context is whether self-interested agents can arrive at a game-theoretic equilibrium in an independent and decentralized manner with only limited feedback from their environment. Learning dynamics that converge to different notions of equilibria are known to exist for two-player zero-sum games (Robinson, 1951; Arora et al., 2012; Daskalakis et al., 2011), potential games (Monderer & Shapley, 1996), near-potential games (Anagnostides et al., 2022b), socially concave games (Golowich et al., 2020), and extensive form games (Anagnostides et al., 2022a). We try to push the boundary further and explore whether equilibria—in particular, Nash equilibria—can be reached by agents that follow decentralized learning algorithms in two-team zero-sum games.
|
| 19 |
+
|
| 20 |
+
Team competition has played a central role in the development of game theory (Marschak, 1955; von Stengel & Koller, 1997; Bacharach, 1999; Gold, 2005), economics (Marschak, 1955; Gottinger, 1974), and evolutionary biology (Nagylaki, 1993; Nowak et al., 2004). Recently, competition among teams has attracted the interest of the machine learning community due to the advances that multi-agent systems have accomplished: e.g., multi-GAN's (Hoang et al., 2017; Hardy et al., 2019) for generative tasks, adversarial regression with multiple learners (Tong et al., 2018), or AI agents competing in e-sports (e.g., CTF (Jaderberg et al., 2019) or Starcraft (Vinyals et al., 2019)) as well as card games (Moravčík et al., 2017; Brown & Sandholm, 2018; Bowling et al., 2015).
|
| 21 |
+
|
| 22 |
+
Our class of games. We turn our attention to two-team zero-sum games a quite general class of min-max optimization problems that include bilinear games and a wide range of nonconvex-nonconcave games as well. In this class of games, players fall into two teams of size $n$ , $m$ and submit their own randomized strategy vectors independently. We note that the games that we focus on are
|
| 23 |
+
|
| 24 |
+
not restricted to team games in the narrow sense of the term "team" as we use it in sports, games, and so on; the players play independently and do not follow a central coordinating authority. Rather, for the purpose of this paper, teams are constituted by agents that merely enjoy the same utility function. This might already hint that the solution concept that we engage with is the Nash equilibrium (NE). Another class of games that is captured by this framework is the class of adversarial potential games. In these games, the condition that all players of the same team experience the same utility is weakened as long as there exists a potential function that can track differences in the utility of each player when they unilaterally deviate from a given strategy profile (see Appendix A.2 for a formal definition). A similar setting has been studied in the context of nonatomic games (Babaioff et al., 2009).
|
| 25 |
+
|
| 26 |
+
Positive duality gap. In two-player zero-sum games, i.e., $n = m = 1$ , min-max (respectively max-min) strategies are guaranteed to form a Nash equilibrium due to Von Neumann's minmax theorem (Von Neumann, 1928); ultimately endowing the game with a unique value. The challenges arise for the case of $n, m > 1$ ; Schulman & Vazirani (2019b) prove that, in general, two-team games do not have a unique value. They do so by presenting a family of team games with a positive duality gap, together with bounds concerning this gap. These bounds quantify the effect of exchanging the order of commitment to their strategy either between the teams as a whole or the individual players.
|
| 27 |
+
|
| 28 |
+
Solution concept. In this work, we examine the solution concept of Nash equilibrium (NE). Under a Nash equilibrium, no player can improve their utility by unilaterally deviating. The main downside of a NE for team games is the fact that such an equilibrium can be arbitrarily suboptimal for the team (Basilico et al., 2017a).
|
| 29 |
+
|
| 30 |
+
This is one of the reasons that the solution concept of team-maxmin equilibrium with a coordination device (TMECor) has dominated contemporary literature of team games, especially in regard to applications (Farina et al., 2018; Zhang et al., 2020; Cacciamani et al., 2021). Under a TMECor, players are allowed to communicate before the game and decide upon combinations of strategies to be played during the game using an external source of randomness.
|
| 31 |
+
|
| 32 |
+
The undeniable advantage of a TMECor is that the expected utility of the team under it is greater than the expected utility under a NE (Basilico et al., 2017a). Nevertheless, this favorable property of TMECor can by no means render the study of NE irrelevant. In fact, the study of NE is always of independent interest within the literature of algorithmic game theory —especially questions corresponding to computational complexity. Moreover, there exist settings in which ex ante coordination cannot be expected to be possible or even sensible; for example in (i) environments where the external sources of randomness are unreliable or nonexistent or visible to the adversarial team, (ii) games in which players cannot know in advance who they share a common utility with, (iii) adversarial potential games. These games can model naturally occurring settings such as (a) security games with multiple uncoordinated defenders versus multiple similarly uncoordinated attackers, (b) the load balancing “game” between telecommunication service providers trying to minimize the maximum delay of service experienced by their customers versus the service users that try to individually utilize the maximum amount of broadband possible, and (c) the weak selection model of evolutionary biology where a species as a whole is a team, the genes of its population are the players and the alleles of each gene are in turn the actions of a player; the allele frequencies are independent across genes (Nagylaki, 1993; Nowak et al., 2004; Mehta et al., 2015).
|
| 33 |
+
|
| 34 |
+
Concluding, we could not possibly argue for a single correct solution concept for two-team games; there is no silver bullet. In contrast, one has to assess which is the most fitting based on the constraints of a given setting. A Nash equilibrium is a cornerstone concept of game theory and examining its properties in different games is always important.
|
| 35 |
+
|
| 36 |
+
The optimization point of view. We focus on the solution concept of NE and we first note that computing local-NE in general nonconvex-nonconcave games is PPAD-complete (Daskalakis et al., 2009; 2021). Thus, all well-celebrated online learning, first-order methods like gradient descent-ascent (Lin et al., 2020; Daskalakis & Panageas, 2019), its optimistic (Popov, 1980; Chiang et al., 2012; Sridharan & Tewari, 2010), optimistic multiplicative weights update (Sridharan, 2012), and the extra gradient method (Korpelevich, 1976) would require an exponential number of steps in the parameters of the problem in order to compute an approximate NE under the oracle optimization model of (Nemirovskij & Yudin, 1983). Additionally, in the continuous time regime, similar classes
|
| 37 |
+
|
| 38 |
+
of games exhibit behaviors antithetical to convergence like cycling, recurrence, or chaos (Vlatakis-Gkaragkounis et al., 2019). Second, even if a regret notion within the context of team-competition could be defined, no-regret dynamics are guaranteed to converge only to the set of coarse correlated equilibria (CCE) (Fudenberg, 1991; Hannan, 2016). CCE is a weaker equilibrium notion whose solutions could potentially be exclusively supported on strictly dominated strategies, even for simple symmetric two-player games (See also (Viossat & Zapechelyuk, 2013)).
|
| 39 |
+
|
| 40 |
+
Surely, the aforementioned intractability remarks for the general case of nonconvex-nonconcave min-max problems provide a significant insight. But, they cannot per se address the issue of computing Nash equilibria when the game is equipped with a particular structure, i.e., that of two-team zero-sum games. In fact, our paper addresses the following questions:
|
| 41 |
+
|
| 42 |
+
Can we get provable convergence guarantees to NE of decentralized first-order methods in two-team zero-sum games?
|
| 43 |
+
|
| 44 |
+
Our results. First, with regards to computational complexity, we prove that computing an approximate (and possibly mixed) NE in two-team zero-sum games is CLS-hard (Theorem 3.1); i.e., it is computationally harder than finding pure NE in a congestion game or computing an approximate fixed point of gradient descent.
|
| 45 |
+
|
| 46 |
+
Second, regarding online learning for equilibrium computation, we prove that a number of established, decentralized, first-order methods are not fit for the purpose and fail to converge even asymptotically. Specifically, we present a simple -yet nontrivial family of two-team zero-sum games (with each team consisting of two players) where projected gradient descent-ascent (GDA), optimistic gradient descent-ascent (OGDA), optimistic multiplicative weights update (OMWU), and the extra gradient method (EG) fail to locally converge to a mixed NE (Theorem 3.3). More broadly, in the case of GDA in nondegenerate team games with unique mixed NE, one could acquire an even stronger result for any high-dimensional configuration of actions and players (Theorem 3.2). To the best of our knowledge, the described family of games is the first-of-its-kind in which all these methods provably fail to converge at the same time.
|
| 47 |
+
|
| 48 |
+
Third, we propose a novel first-order method inspired by adaptive control (Bazanella et al., 1997; Hassouneh et al., 2004). In particular, we use a technique that manages to stabilize unstable fixed points of a dynamical system without prior knowledge of their position and without introducing new ones. It is important to note that this method is a modification of GDA that uses a stabilizing feedback which maintains the decentralized nature of GDA.
|
| 49 |
+
|
| 50 |
+
Finally, in Section 4 we provide a series of experiments in simple two-team zero-sum GAN's. We also show that multi-GAN architectures achieve better performance than single-agent ones, relative to the network capacity when they are trained on synthetic or real-world datasets like CIFAR10.
|
| 51 |
+
|
| 52 |
+
Existing algorithms for NE in multiplayer games. The focus of the present paper is examining algorithms for the setting of repeated games (Cesa-Bianchi & Lugosi, 1999, Chapter 7). If we do not restrict ourselves to this setting, there are numerous centralized algorithms (Lipton et al., 2003; Berg & Sandholm, 2017) and heuristics (Gemp et al., 2021) that solve the problem of computing Nash equilibria in general multi-player games.
|
| 53 |
+
|
| 54 |
+
# 2 PRELIMINARIES
|
| 55 |
+
|
| 56 |
+
Our setting. A two-team game in normal form is defined as a tuple $\Gamma(\mathcal{N},\mathcal{M},\mathcal{A},\mathcal{B},\{U_A,U_B\})$ . The tuple is defined by
|
| 57 |
+
|
| 58 |
+
(i) a finite set of $n = |\mathcal{N}|$ players belonging to team $A$ , as well as a finite set of $m = |\mathcal{M}|$ players belonging to team $B$ ;
|
| 59 |
+
(ii) a finite set of actions (or pure strategies) $\mathcal{A}_i = \{\alpha_1, \ldots, \alpha_{n_i}\}$ per player $i \in \mathcal{N}$ ; where $\mathcal{A} := \prod_i \mathcal{A}_i$ denotes the ensemble of all possible action profiles of team $A$ , and respectively, a finite set of actions (or pure strategies) $\mathcal{B}_i = \{\beta_1, \ldots, \beta_{n_i}\}$ per player $i \in \mathcal{M}$ , where $\mathcal{B} := \prod_i \mathcal{B}_i$ .
|
| 60 |
+
(iii) a utility function for team $A$ , $U_A: \mathcal{A} \times \mathcal{B} \to \mathbb{R}$ (resp. $U_B: \mathcal{A} \times \mathcal{B} \to \mathbb{R}$ for team $B$ )
|
| 61 |
+
|
| 62 |
+
We also use $\alpha = (\alpha_{1},\ldots ,\alpha_{n})$ to denote the strategy profile of team $A$ players and $\beta = (\beta_{1},\dots,\beta_{m})$ the strategy profile of team $B$ players.
|
| 63 |
+
|
| 64 |
+
Finally, each team's payoff function is denoted by $U_A, U_B: \mathcal{A} \times \mathcal{B} \to \mathbb{R}$ , where the individual utility of a player is identical to her teammates, i.e., $U_i = U_A \& U_j = U_B \forall i \in \mathcal{N}$ and $j \in \mathcal{M}$ . In this general context, players could also submit mixed strategies, i.e., probability distributions over actions. Correspondingly, we define the product distributions $\boldsymbol{x} = (\boldsymbol{x}_1, \dots, \boldsymbol{x}_n)$ , $\boldsymbol{y} = (\boldsymbol{y}_1, \dots, \boldsymbol{y}_m)$ as team $A$ and $B$ 's strategies respectively, in which $\boldsymbol{x}_i \in \Delta(\mathcal{A}_i)$ and $\boldsymbol{y}_j \in \Delta(\mathcal{B}_j)$ . Conclusively, we will write $\mathcal{X} := \prod_{i \in \mathcal{N}} \mathcal{X}_i = \prod_{i \in \mathcal{N}} \Delta(\mathcal{A}_i)$ , $\mathcal{Y} := \prod_{i \in \mathcal{M}} \mathcal{Y}_i = \prod_{i \in \mathcal{M}} \Delta(\mathcal{B}_i)$ the space of mixed strategy profiles of teams $A, B$ . A two-team game is called two-team zero-sum if $U_B = -U_A = U$ which is the main focus of this paper. Moreover, we assume that the game is succinctly representable and satisfies the polynomial expectation property (Daskalakis et al., 2006). This means that given a mixed strategy profile, the utility of each player can be computed in polynomial time in the number of agents, the sum of the number of strategies of each player, and the bit number required to represent the mixed strategy profile.
|
| 65 |
+
|
| 66 |
+
A Nash equilibrium (NE) is a strategy profile $(\pmb{x}^{*},\pmb{y}^{*})\in \mathcal{X}\times \mathcal{Y}$ such that
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
\left\{ \begin{array}{l} U (\boldsymbol {x} ^ {*}, \boldsymbol {y} ^ {*}) \leq U \left(\boldsymbol {x} _ {i}, \boldsymbol {x} _ {- i} ^ {*}, \boldsymbol {y} ^ {*}\right), \forall \boldsymbol {x} _ {i} \in \mathcal {X} _ {i} \\ U \left(\boldsymbol {x} ^ {*}, \boldsymbol {y} ^ {*}\right) \geq U \left(\boldsymbol {x} ^ {*}, \boldsymbol {y} _ {j}, \boldsymbol {y} _ {- j} ^ {*}\right), \forall \boldsymbol {y} _ {j} \in \mathcal {Y} _ {j} \end{array} \right. \tag {NE}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
A first approach to computing NE in Two-Team Zero-Sum games. Due to the multilinearity of the utility and the existence of a duality gap, the linear programming method used in two-player zero-sum games cannot be used to compute a Nash equilibrium. For the goal of computing Nash equilibrium in two-team zero-sum games, we have experimented with a selection of online learning, first-order methods that have been utilized with varying success in the setting of the two-person zero-sum case. Namely, we analyze the following methods: (i) gradient descent-ascent (GDA) (ii) optimistic gradient descent-ascent (OGDA) (iii) extra gradient method (EG) (iv) optimistic multiplicative weights update method (OMWU). For their precise definitions, we refer to Appendix B.
|
| 73 |
+
|
| 74 |
+
The below folklore fact will play a key role hereafter.
|
| 75 |
+
|
| 76 |
+
Fact 2.1. Any fixed point of the aforementioned discrete-time dynamics (apart from OMWU) on the utility function necessarily corresponds to a Nash Equilibrium of the game.
|
| 77 |
+
|
| 78 |
+
Hence, an important test for the asymptotic behavior of GDA, OGDA, EG, and OMWU methods is to examine whether these methods stabilize around their fixed points which effectively constitute the Nash equilibria of the game. In Section 3.2, we show that in the absence of pure Nash equilibria, all the above methods fail to stabilize on their fixed points even for a simple class of two-team games with $(n = 2, m = 2)$ . Consequently, they fail to converge to the mixed Nash equilibrium of the game.
|
| 79 |
+
|
| 80 |
+
The presence of these results demonstrates the need for a different approach that lies outside the scope of traditional optimization techniques. Inspired by the applications of washout filters to stabilize unknown fixed points and the adaptive control generalizations of the former, we design a new variant of GDA "vanized" with a feedback loop dictated by a pair of two matrices. In contrast to the aforementioned conventional methods, our proposed technique surprisingly accomplishes asymptotic last-iterate convergence to its fixed point, i.e., the mixed Nash equilibria of the team game.
|
| 81 |
+
|
| 82 |
+
$(\mathbf{K}, \mathbf{P})$ -vaned GDA Method. After concatenating the vectors of the minimizing and the maximizing agents — $z^{(k)} = (x^{(k)}, y^{(k)})$ — our method for appropriate matrices $\mathbf{K}, \mathbf{P}$ reads:
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\left\{ \begin{array}{l} \boldsymbol {z} ^ {(k + 1)} = \Pi_ {\mathcal {Z}} \left\{\boldsymbol {z} ^ {(k)} + \eta \binom {- \nabla_ {\boldsymbol {x}} f (\boldsymbol {z} ^ {(k)})} {\nabla_ {\boldsymbol {y}} f (\boldsymbol {z} ^ {(k)})} + \eta \mathbf {K} \left(\boldsymbol {z} ^ {(k)} - \boldsymbol {\theta} ^ {(k)}\right) \right\} \\ \boldsymbol {\theta} ^ {(k + 1)} = \Pi_ {\mathcal {Z}} \left\{\boldsymbol {\theta} ^ {(k)} + \eta \mathbf {P} \left(\boldsymbol {z} ^ {(k)} - \boldsymbol {\theta} ^ {(k)}\right) \right\} \end{array} \right. \tag {KPV-GDA}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
Intuitively, the additional variable $\pmb{\theta}^{(k)}$ holds an estimate of the fixed point, and through the feedback law $\eta \mathbf{K}(\pmb{z}^{(k)} - \pmb{\theta}^{(k)})$ the vector $\pmb{z}$ stabilizes around that estimate which slowly moves towards the real fixed point of the GDA dynamic.
|
| 89 |
+
|
| 90 |
+
# 2.1 TWO ILLUSTRATIVE EXAMPLES
|
| 91 |
+
|
| 92 |
+
Our first example plays a dual role: first, it demonstrates how two-team min-max competition can capture the formulation of multi-agent GAN architectures; second, it hints at the discrepancy be-
|
| 93 |
+
|
| 94 |
+

|
| 95 |
+
Figure 1: Parameter training of the configuration under different algorithms
|
| 96 |
+
|
| 97 |
+
tween the results of optimization methods, since—as we will see—GDA will not converge to the Nash equilibrium/ground-truth distribution. Generally, the solution that is sought after is the min max solution of the objective function (Goodfellow et al., 2014) which are NP-hard to compute in the general case (Borgs et al., 2008); nevertheless, applications of GANs have proven that first-order stationary points of the objective function suffice to produce samples of very good quality.
|
| 98 |
+
|
| 99 |
+
# 2.1.1 LEARNING A MIXTURE OF GAUSSIANS WITH MULTI-AGENT GAN'S
|
| 100 |
+
|
| 101 |
+
Consider the case of $\mathcal{O}$ , a mixture of gaussian distribution with two components, $C_1 \sim \mathcal{N}(\pmb{\mu}, \mathbf{I})$ and $C_2 \sim \mathcal{N}(-\pmb{\mu}, \mathbf{I})$ and mixture weights $\pi_1, \pi_2$ to be positive such that $\pi_1 + \pi_2 = 1$ and $\pi_1, \pi_2 \neq \frac{1}{2}$ .
|
| 102 |
+
|
| 103 |
+
To learn the distribution above, we utilize an instance of a Team-WGAN in which there exists a generating team of agents $G_{p}:\mathbb{R}\to \mathbb{R},G_{\theta}:\mathbb{R}^{n}\to \mathbb{R}^{n}$ , and a discriminating team of agents $D_{\boldsymbol{v}}:\mathbb{R}^n\to \mathbb{R},D_{\boldsymbol{w}}:\mathbb{R}^n\to \mathbb{R}$ , all described by the following equations:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\begin{array}{l} \text {G e n e r a t o r s :} G _ {p} (\zeta) = p + \zeta , G _ {\theta} (\boldsymbol {\xi}) = \boldsymbol {\xi} + \boldsymbol {\theta} \\ \text {D i s c r i m i n a t o r s :} D _ {\boldsymbol {v}} (\boldsymbol {y}) = \left\langle \boldsymbol {v}, \boldsymbol {y} \right\rangle , D _ {\boldsymbol {w}} (\boldsymbol {y}) = \sum_ {i} w _ {i} y _ {i} ^ {2} \end{array} \tag {1}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
The generating agent $G_{\theta}$ maps random noise $\xi \sim \mathcal{N}(0, \mathbf{I})$ to samples while generating agent $G_{p}(\zeta)$ , utilizing an independent source of randomness $\zeta \sim \mathcal{N}(0, 1)$ , probabilistically controls the sign of the output of the generator $G_{\theta}$ . The probability of ultimately generating a sample $y = \xi + \theta$ is in expectation equal to $p$ , while the probability of the sample being $y = -z - \theta$ is equal to $1 - p$ .
|
| 110 |
+
|
| 111 |
+
On the other end, there stands the discriminating team of $D_v, D_w$ . Discriminators, $D_v(\pmb{y}), D_w(\pmb{y})$ map any given sample $\pmb{y}$ to a scalar value accounting for the "realness" or "fakeness" of it – negative meaning fake, positive meaning real. The discriminators are disparate in the way they measure the realness of samples as seen in their definitions.
|
| 112 |
+
|
| 113 |
+
We follow the formalism of the Wasserstein GAN to form the optimization objective:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\left. \right. \max _ {\boldsymbol {v}, \boldsymbol {w}} \min _ {\boldsymbol {\theta}, p} \left\{\mathbb {E} _ {\boldsymbol {y} \sim \mathcal {O}} \left[ D _ {\boldsymbol {v}} (\boldsymbol {y}) + D _ {\boldsymbol {w}} (\boldsymbol {y}) \right] - \mathbb {E} _ {\substack {\boldsymbol {\xi} \sim \mathcal {N} (0, \mathbf {I}),\\\zeta \sim \mathcal {N} (0, 1)}} \left[\begin{array}{c}G _ {p} (\zeta) \cdot \left(D _ {\boldsymbol {v}} \left(G _ {\boldsymbol {\theta}} (\boldsymbol {y})\right) + D _ {\boldsymbol {w}} \left(G _ {\boldsymbol {\theta}} (\boldsymbol {y})\right)\right)\\+\\\left(1 - G _ {p} (\zeta)\right) \cdot \left(D _ {\boldsymbol {v}} \left(- G _ {\boldsymbol {\theta}} (\boldsymbol {y})\right) + D _ {\boldsymbol {w}} \left(- G _ {\boldsymbol {\theta}} (\boldsymbol {y})\right)\right)\end{array}\right]\right\} \tag{2}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
Equation (2) yields the simpler form:
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\max _ {\boldsymbol {v}, \boldsymbol {w}} \min _ {\boldsymbol {\theta}, p} \left(\pi_ {1} - \pi_ {2}\right) \boldsymbol {v} ^ {T} \boldsymbol {\mu} - 2 p \boldsymbol {v} ^ {T} \boldsymbol {\theta} + \boldsymbol {v} ^ {T} \boldsymbol {\theta} + \sum_ {i} ^ {n} w _ {i} \left(\mu_ {i} ^ {2} - \theta_ {i} ^ {2}\right) \tag {3}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
It is easy to check that Nash equilibria of (2) must satisfy:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\left\{ \begin{array}{r c l} \boldsymbol {\theta} & = & \boldsymbol {\mu}, \quad p = 1 - \pi_ {2} = \pi_ {1} \\ \boldsymbol {\theta} & = & - \boldsymbol {\mu}, \quad p = 1 - \pi_ {1} = \pi_ {2}. \end{array} \right\}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
Figure 1 demonstrates both GDA's failure and OGDA, EG, and our proposed method, KPV-GDA succeeding to converge to the above Nash equilibria and simultaneously discovering the mixture of the ground-truth.
|
| 132 |
+
|
| 133 |
+
# 2.1.2 MULTIPLAYER MATCHING PENNIES
|
| 134 |
+
|
| 135 |
+
Interestingly enough, there are non-trivial instances of two-team competition settings in which even OGDA and EG fail to converge. Such is the case for a team version of the well-known game of
|
| 136 |
+
|
| 137 |
+
matching pennies. The game can be shortly described as such: "coordinate with your teammates to play a game of matching pennies against the opposing team, coordinate not and pay a penalty". (We note that this game is a special case of the game presented in Section 3.3.) As we can see in Figures 2a and 2b, this multiplayer generalized matching pennies game constitutes an excellent benchmark on which all traditional gradient flow discretizations fail under the perfect competition setting. Interestingly, we are not aware of a similar example in min-max literature and it has been our starting point for seeking new optimization techniques inspired by Control theory. Indeed, the KPV-GDA variation with $(\mathbf{K},\mathbf{P}) = (-1.1\cdot \mathbf{I},0.3\cdot \mathbf{I})$ achieves to converge to the unique mixed Nash Equilibrium of the game. In the following sections, we provide theorems that explain formally the behavior of the examined dynamics.
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
(a) Generalized matching pennies under different algorithms. For the precise definition of the game, we refer to appendix C.4
|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
(b) Projected Trajectory of Team A under different algorithms. The sketched surface is not part of the feasible team strategy profiles (product of distributions).
|
| 144 |
+
|
| 145 |
+
# 3 MAIN RESULTS
|
| 146 |
+
|
| 147 |
+
In this section, we will prove that computing a Nash equilibrium in two-team zero-sum games is computationally hard and thus getting a polynomial-time algorithm that computes a Nash equilibrium is unlikely. Next, we will demonstrate the shortcomings of an array of commonly used online learning, first-order methods, and then we will provide a novel, decentralized, first-order method that locally converges to NE under some conditions.
|
| 148 |
+
|
| 149 |
+
# 3.1 ON THE HARDNESS OF COMPUTING NE IN TWO-TEAM ZERO-SUM GAMES
|
| 150 |
+
|
| 151 |
+
As promised, our first statement characterizes the hardness of NE computation in two-team zero-sum games:
|
| 152 |
+
|
| 153 |
+
Theorem 3.1 (CLS-hard). Computing a Nash equilibrium in a succinctly represented two-team zero-sum game is CLS-hard.
|
| 154 |
+
|
| 155 |
+
The main idea of the proof of Theorem 3.1 relies on a reduction of approximating Nash equilibria in congestion games, which has been shown to be complete for the CLS complexity class. The class CLS contains the problem of continuous optimization. We defer the proof of the above theorem to the paper's supplement.
|
| 156 |
+
|
| 157 |
+
# 3.2 FAILURE OF COMMON ONLINE, FIRST-ORDER METHODS
|
| 158 |
+
|
| 159 |
+
The negative computational complexity result we proved for two-team zero-sum games (Theorem 3.1) does not preclude the prospect of attaining algorithms (learning first-order methods) that converge to Nash equilibria. Unfortunately, we prove that these methods cannot guarantee convergence to Nash equilibria in two-team zero-sum games in general.
|
| 160 |
+
|
| 161 |
+
In this subsection, we are going to construct a family of two-team zero-sum games with the property that the dynamics of GDA, OGDA, OMWU, and EG are unstable on Nash equilibria. This result is indicative of the challenges that lie in the min-max optimization of two-team zero-sum games and the reason that provable, nonasymptotic convergence guarantees of online learning have not yet been established.
|
| 162 |
+
|
| 163 |
+
Before defining our benchmark game, we prove an important theorem which states that GDA does not converge to mixed Nash equilibria. This fact is a stepping stone in constructing the family of team-zero sum games later. We present the proof of all of the below statements in detail in the paper's appendix (Appendix B).
|
| 164 |
+
|
| 165 |
+
Weakly-stable Nash equilibrium. (Kleinberg et al., 2009; Mehta et al., 2015) Consider the set of Nash equilibria with the property that if any single randomizing agent of one team is forced to play any strategy in their current support with probability one, all other agents of the same team must remain indifferent between the strategies in their support. This type of Nash equilibria is called weakly-stable. We note that pure Nash equilibria are trivially weakly-stable. It has been shown that mixed Nash equilibria are not weakly-stable in generic games<sup>2</sup>
|
| 166 |
+
|
| 167 |
+
We can show that Nash equilibria that are not weakly-stable Nash are actually unstable for GDA. Moreover, through standard dynamical systems machinery, that the set of initial conditions that converges to Nash equilibria that are not weakly-stable should be of Lebesgue measure zero. Formally, we prove that:
|
| 168 |
+
|
| 169 |
+
Theorem 3.2 (Non weakly-stable Nash are unstable). Consider a two-team zero-sum game with the utility function of Team $B$ ( $y$ vector) being $U(\boldsymbol{x}, \boldsymbol{y})$ and Team $A$ ( $x$ vector) being $-U(\boldsymbol{x}, \boldsymbol{y})$ . Moreover, assume that $(\boldsymbol{x}^*, \boldsymbol{y}^*)$ is a Nash equilibrium of full support that is not weakly-stable. It follows that the set of initial conditions so that GDA converges to $(\boldsymbol{x}^*, \boldsymbol{y}^*)$ is of measure zero for step size $\eta < \frac{1}{L}$ where $L$ is the Lipschitz constant of $\nabla U$ .
|
| 170 |
+
|
| 171 |
+
# 3.3 GENERALIZED MATCHING PENNIES (GMP)
|
| 172 |
+
|
| 173 |
+
Inspired by Theorem 3.2, in this section we construct a family of team zero-sum games so that GDA, OGDA, OMWU, and EG methods fail to converge (if the initialization is a random point in the simplex, the probability of convergence of the aforementioned methods is zero). The intuition is to construct a family of games, each of which has only mixed Nash equilibria (that are not weakly-stable), i.e., the constructed games should lack pure Nash equilibria; using Theorem 3.2, it would immediately imply our claim for GDA. It turns out that OGDA, OMWU, and EG also fail to converge for the same family.
|
| 174 |
+
|
| 175 |
+
Definition of GMP. Consider a setting with two teams (Team $A$ , Team $B$ ), each of which has $n = 2$ players. Inspired by the standard matching pennies game and the game defined in (Schulman & Vazirani, 2019a), we allow each agent $i$ to have two strategies/actions that is $S = \{H, T\}$ for both teams with $2^4$ possible strategy profiles. In case all
|
| 176 |
+
|
| 177 |
+

|
| 178 |
+
|
| 179 |
+
<table><tr><td>HH</td><td>HT/TH</td><td>TT</td></tr><tr><td>1,-1</td><td>ω,-ω</td><td>-1,1</td></tr><tr><td>-ω,ω</td><td>0,0</td><td>-ω,ω</td></tr><tr><td>-1,1</td><td>ω,-ω</td><td>1,-1</td></tr></table>
|
| 180 |
+
|
| 181 |
+
the members of a Team choose the same strategy say $H$ or $T$ then the Team "agrees" to play $H$ or $T$ (otherwise the Team "does not agree").
|
| 182 |
+
|
| 183 |
+
Thus, in the case that both teams "agree", the payoff of each team is actually the payoff for the two-player matching pennies. If one team "agrees" and the other does not, the team that "agrees" enjoys the payoff $\omega \in (0,1)$ and the other team suffers a penalty $\omega$ . If both teams fail to "agree", both teams get payoff zero. Let $x_{i}$ with $i \in \{1,2\}$ be the probability that agent $i$ of Team $A$ chooses $H$ and $1 - x_{i}$ the probability that she chooses $T$ . We also denote $\mathbf{x}$ as the vector of probabilities for Team $A$ . Similarly, we denote $y_{i}$ for $i \in \{1,2\}$ be the probability that agent $i$ of Team $B$ chooses $H$ and $1 - y_{i}$ the probability that she chooses $T$ and $\mathbf{y}$ the probability vector.
|
| 184 |
+
|
| 185 |
+
Properties of GMP. An important remark on the properties of our presented game is due. Existing literature tackles settings with (i) (weak-)monotonocity (Mertikopoulos et al., 2019; Diakonikolas et al., 2021), (ii) cocoercivity (Zhu & Marcotte, 1996), (iii) zero-duality gap (Von Neumann, 1928), (iv) unconstrained solution space (Golowich et al., 2020). Our game is carefully crafted and – although it has a distinct structure and is nonconvex-nonconcave only due to multilinearity– satisfies none of the latter properties. This makes the (local) convergence of our proposed method even more surprising. (See also Appendix B.6.)
|
| 186 |
+
|
| 187 |
+
The first fact about the game that we defined is that for $\omega \in (0,1)$ , there is only one Nash equilibrium $(\pmb{x}^{*},\pmb{y}^{*})$ , which is the uniform strategy, i.e., $x_{1}^{*} = x_{2}^{*} = y_{1}^{*} = y_{2}^{*} = \frac{1}{2}$ for all agents $i$ .
|
| 188 |
+
|
| 189 |
+
Lemma 3.1 (GMP has a unique Nash). The Generalized Matching Pennies game exhibits a unique Nash equilibrium which is $(\pmb{x}^{*},\pmb{y}^{*}) = ((\frac{1}{2},\frac{1}{2}),(\frac{1}{2},\frac{1}{2}))$
|
| 190 |
+
|
| 191 |
+
Remark 1. The fact that the game we defined has a unique Nash equilibrium that is in the interior of $[0,1]^4$ is really crucial for our negative convergence results later in the section as we will show that it is not a weakly-stable Nash equilibrium and the negative result about GDA will be a corollary due to Theorem 3.2. We also note that if $\omega = 1$ then there are more Nash equilibria, in particular the $(0,0), (1,0), (0,1), (1,1)$ which are pure.
|
| 192 |
+
|
| 193 |
+
The following Theorem is the main (negative) result of this section.
|
| 194 |
+
|
| 195 |
+
Theorem 3.3 (GDA, OGDA, EG, and OMWU fail). Consider GMP game with $\omega \in (0,1)$ . Assume that $\eta_{GDA} < \frac{1}{4}$ , $\eta_{OGDA} < \min(\omega, \frac{1}{8})$ , $\eta_{EG} < \frac{\omega}{2}$ , and $\eta_{OMWU} < \min\left(\frac{1}{4}, \frac{\omega}{2}\right)$ (bound on the stepsize for GDA, OGDA, OMWU, and EG methods respectively). It holds that the set of initial conditions so that GDA, OGDA, OMWU, and EG converge (stabilize to any point) is of measure zero.
|
| 196 |
+
|
| 197 |
+
Remark 2. Theorem 3.3 formally demonstrates that the behavior of algorithms mentioned in Section 2.1.2 are not a result of "bad parametrization", and in fact, the probability that any of them converges to the NE is equal to the probability that the initialization of the variables coincides with the NE (Lebesgue measure zero).
|
| 198 |
+
|
| 199 |
+
Remark 3 (Average iterate also fails). One might ask what happens when we consider average iterates instead of the last iterate. It is a well-known fact (Syrgkanis et al., 2015) that the average iterate of no-regret algorithms converges to coarse correlated equilibria (CCE) so we expect that the average iterate stabilizes. Nevertheless, CCE might not be Nash equilibria. Indeed we can construct examples in which the average iterate of GDA, OGDA, OMWU, and EG experimentally fail to stabilize to Nash equilibria. In particular, we consider a slight modification of GMP; players and strategies are the same but the payoff matrix has changed and can be found reads
|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
Figure 3: GDA, OGDA, OMWU, & EG fail to converge to a Nash Equilibrium even in average
|
| 203 |
+
|
| 204 |
+
Figure 3 shows that the average iterates of GDA, OGDA, OMWU, and EG stabilize to points that are not Nash equilibria. We note that since our method (see next subsection) converges locally, the average iterate should converge locally to a Nash equilibrium.
|
| 205 |
+
|
| 206 |
+
# 3.4 OUR PROPOSED METHOD
|
| 207 |
+
|
| 208 |
+
The aforementioned results prove that the challenging goal of computing two-team zero-sum games calls for an expansion of existing optimization techniques. The mainstay of this effort and our positive result is the KPV-GDA method defined in (KPV-GDA) which is inspired by techniques of adaptive control literature. The first statement we make is that KPV-GDA stabilizes around any Nash equilibrium for appropriate choices of matrices $\mathbf{K},\mathbf{P}$ :
|
| 209 |
+
|
| 210 |
+
Theorem 3.4 (KPV-GDA stabilizes). Consider a team zero-sum game so that the utility of Team $B$ is $U(\pmb{x},\pmb{y})$ and hence the utility of Team $A$ is $-U(\pmb{x},\pmb{y})$ and a Nash equilibrium $(\pmb{x}^*,\pmb{y}^*)$ of the game. Moreover, we assume
|
| 211 |
+
|
| 212 |
+
$$
|
| 213 |
+
\left( \begin{array}{l l} - \nabla_ {\pmb {x x}} ^ {2} U (\pmb {x} ^ {*}, \pmb {y} ^ {*}) & - \nabla_ {\pmb {x y}} ^ {2} U (\pmb {x} ^ {*}, \pmb {y} ^ {*}) \\ \nabla_ {\pmb {y x}} ^ {2} U (\pmb {x} ^ {*}, \pmb {y} ^ {*}) & \nabla_ {\pmb {y y}} ^ {2} U (\pmb {x} ^ {*}, \pmb {y} ^ {*}) \end{array} \right) i s i n v e r t i b l e.
|
| 214 |
+
$$
|
| 215 |
+
|
| 216 |
+
For any fixed step size $\eta > 0$ , we can always find matrices $K, P$ so that KPV-GDA method defined in (KPV-GDA) converges locally to $(\pmb{x}^{*}, \pmb{y}^{*})$ .
|
| 217 |
+
|
| 218 |
+
This is an existential theorem and cannot be generally useful in practice. Further, this dynamic would not be necessarily uncoupled and the design of matrices $\mathbf{K}$ and $\mathbf{P}$ could necessitate knowledge of the NE we are trying to compute. Instead, our next statement provides sufficient conditions under which a simple parametrization of matrices $\mathbf{K},\mathbf{P}$ results in an uncoupled, converging dynamic:
|
| 219 |
+
|
| 220 |
+
Theorem 3.5. Consider a two-team zero-sum game so that the utility of Team $B$ is $U(\pmb{x},\pmb{y})$ , the utility of Team $A$ is $-U(\pmb{x},\pmb{y})$ , and a Nash equilibrium $(\pmb{x}^*,\pmb{y}^*)$ . Moreover, let
|
| 221 |
+
|
| 222 |
+
$$
|
| 223 |
+
\mathbf {H} := \left( \begin{array}{c c} - \nabla_ {\boldsymbol {x x}} ^ {2} U (\boldsymbol {x} ^ {*}, \boldsymbol {y} ^ {*}) & - \nabla_ {\boldsymbol {x y}} ^ {2} U (\boldsymbol {x} ^ {*}, \boldsymbol {y} ^ {*}) \\ \nabla_ {\boldsymbol {y x}} ^ {2} U (\boldsymbol {x} ^ {*}, \boldsymbol {y} ^ {*}) & \nabla_ {\boldsymbol {y y}} ^ {2} U (\boldsymbol {x} ^ {*}, \boldsymbol {y} ^ {*}) \end{array} \right).
|
| 224 |
+
$$
|
| 225 |
+
|
| 226 |
+
and $E$ be the set of eigenvalues $\rho$ of $\mathbf{H}$ with real part positive, that is $E = \{ \text{Eigenvalues of matrix } \mathbf{H}, \rho : Re(\rho) > 0 \}$ . We assume that $\mathbf{H}$ is invertible and moreover
|
| 227 |
+
|
| 228 |
+
$$
|
| 229 |
+
\beta = \min _ {\rho \in E} \frac {R e (\rho) ^ {2} + I m (\rho) ^ {2}}{R e (\rho)} > \max _ {\rho \in E} R e (\rho) = \alpha . \tag {4}
|
| 230 |
+
$$
|
| 231 |
+
|
| 232 |
+
We set $\mathbf{K} = k\cdot \mathbf{I}$ , $\mathbf{P} = p\cdot \mathbf{I}$ . There exist small enough step size $\eta >0$ and scalar $p > 0$ and for any $k\in (-\beta , - \alpha)$ so that (KPV-GDA) with chosen $\mathbf{K},\mathbf{P}$ converges locally to $(\boldsymbol {x}^{*},\boldsymbol{y}^{*})$
|
| 233 |
+
|
| 234 |
+
# 4 EXPERIMENTS
|
| 235 |
+
|
| 236 |
+
In this section, we perform a series of experiments to further motivate the study of two-team zero-sum games, especially in the context of multi-agent generative adversarial networks (multi-GANs). A multi-agent generative adversarial network (multi-GAN) (Arora et al., 2017; Hoang et al., 2017; Zhang et al., 2018; Tang, 2020; Hardy et al., 2019; Albuquerque et al., 2019) is a generative adversarial network (GAN) that leverages multiple "agents" (generators and/or discriminators) in order to achieve statistical and computational benefits. In particular, Arora et al. formally proved the expressive superiority of multi-generator adversarial network architectures something that we empirically verify in Section 4. In this direction, researchers strive to harness the efficacy of distributed processing by utilizing shallower networks that can collectively learn more diverse datasets<sup>4</sup>.
|
| 237 |
+
|
| 238 |
+
At first, the superiority of multi-GANs might appear to contrast our theoretical findings; but in reality, the superiority comes from the quality of solutions that are attainable from multi-agent architectures (expressivity) and the fact that hardness (complexity) translates to rates of convergence but not non-convergence. Single agent GANs quickly converge to critical points that are not guaranteed to capture the distribution very well. In figure 4 we see the fast convergence of a single-agent GAN to solutions of bad quality versus the convergence of a multi-GAN to an obviously better solution. Due to space constraints, we defer further discussion of the experiments at Section D.1.
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
Figure 4: From left to right: (i) Each generator of MGAN learns one mode of the mixture of 8 gaussians, (ii) Mode Collapse of single-agent GANs, (iii) Single-agent GAN can't discriminate between the modes.
|
| 242 |
+
|
| 243 |
+
# 5 CONCLUSIONS AND OPEN PROBLEMS
|
| 244 |
+
|
| 245 |
+
In this work we study the wide class of nonconvex-nonconcave games that express the two-team competition, inspired broadly by the structure of the complex competition between multi-agent generators and discriminators in GAN's. Furthermore, in this setting of two-team zero-sum games, we have presented a number of negative results about the problem of computing a Nash equilibrium. Moreover, through a simple family of games that we construct, we prove the inability of commonly used methods for min-max optimization such as GDA, OGDA, OMWU, and EG to converge both in average and in the last iterate to Nash Equilibria which comes to a stark contrast to recent literature that is concerned with simpler games. We have also presented an optimization method (called KPV-GDA) that manages to stabilize around Nash equilibria.
|
| 246 |
+
|
| 247 |
+
# ACKNOWLEDGEMENTS
|
| 248 |
+
|
| 249 |
+
Ioannis Panageas would like to acknowledge a start-up grant. Emmanouil V. VG is grateful for the financial support by FODSI Postdoctoral Fellowship. This work was partially completed while IP and EVVG were visiting research fellows at the Simons Institute for Theory of Computing during Learning and Games Semester.
|
| 250 |
+
|
| 251 |
+
# REFERENCES
|
| 252 |
+
|
| 253 |
+
Isabela Albuquerque, João Monteiro, Thang Doan, Breandan Considine, Tiago Falk, and Ioannis Mitliagkas. Multi-objective training of generative adversarial networks with multiple discriminators. In International Conference on Machine Learning, pp. 202-211. PMLR, 2019.
|
| 254 |
+
Ioannis Anagnostides, Gabriele Farina, Christian Kroer, Andrea Celli, and Tuomas Sandholm. Faster no-regret learning dynamics for extensive-form correlated and coarse correlated equilibria. arXiv preprint arXiv:2202.05446, 2022a.
|
| 255 |
+
Ioannis Anagnostides, Ioannis Panageas, Gabriele Farina, and Tuomas Sandholm. On last-iterate convergence beyond zero-sum games. arXiv preprint arXiv:2203.12056, 2022b.
|
| 256 |
+
Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 214-223, 2017.
|
| 257 |
+
Sanjeev Arora, Elad Hazan, and Satyen Kale. The multiplicative weights update method: a meta-algorithm and applications. Theory of Computing, 8(1):121-164, 2012.
|
| 258 |
+
Sanjeev Arora, Rong Ge, Yingyu Liang, Tengyu Ma, and Yi Zhang. Generalization and equilibrium in generative adversarial nets (gans). In International Conference on Machine Learning, pp. 224-232. PMLR, 2017.
|
| 259 |
+
Moshe Babaioff, Robert Kleinberg, and Christos H Papadimitriou. Congestion games with malicious players. Games and Economic Behavior, 67(1):22-35, 2009.
|
| 260 |
+
Yakov Babichenko and Aviad Rubinstein. Settling the complexity of nash equilibrium in congestion games. In Samir Khuller and Virginia Vassilevska Williams (eds.), STOC '21: 53rd Annual ACM SIGACT Symposium on Theory of Computing, Virtual Event, Italy, June 21-25, 2021, pp. 1426-1437. ACM, 2021.
|
| 261 |
+
Michael Bacharach. Interactive team reasoning: A contribution to the theory of co-operation. Research in Economics, 53(2):117-147, 1999. ISSN 1090-9443. doi: https://doi.org/10.1006/reec.1999.0188. URL https://www.sciencedirect.com/science/article/pii/S1090944399901886.
|
| 262 |
+
Nicola Basilico, Andrea Celli, Giuseppe De Nittis, and Nicola Gatti. Computing the team-maxmin equilibrium in single-team single-adversary team games. Intelligenza Artificiale, 11(1):67-79, 2017a.
|
| 263 |
+
Nicola Basilico, Andrea Celli, Giuseppe De Nittis, and Nicola Gatti. Team-maxmin equilibrium: efficiency bounds and algorithms. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 31, 2017b.
|
| 264 |
+
A. S. Bazanella, P. V. Kokotovic, and A. S. e Silva. On the control of dynamic systems with unknown operating point. In 1997 European Control Conference (ECC), pp. 3434-3439, 1997. doi: 10.23919/ECC.1997.7082644.
|
| 265 |
+
Alexandre S. Bazanella, Petar V. Kokotovic, and Aguinaldo S. E Silva. On the control of dynamic systems with unknown operating point. International Journal of Control, 73(7): 600-605, 2000. doi: 10.1080/002071700219443. URL https://doi.org/10.1080/ 002071700219443.
|
| 266 |
+
Kimmo Berg and Tuomas Sandholm. Exclusion method for finding nash equilibrium in multiplayer games. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 31, 2017.
|
| 267 |
+
|
| 268 |
+
David Blackwell. An analog of the minimax theorem for vector payoffs. Pacific Journal of Mathematics, 6(1):1-8, 1956.
|
| 269 |
+
Christian Borgs, Jennifer Chayes, Nicole Immorlica, Adam Tauman Kalai, Vahab Mirrokni, and Christos Papadimitriou. The myth of the folk theorem. In Proceedings of the fortieth annual ACM symposium on Theory of computing, pp. 365-372, 2008.
|
| 270 |
+
Michael Bowling, Neil Burch, Michael Johanson, and Oskari Tammelin. Heads-up limit hold'em poker is solved. Science, 347(6218):145-149, 2015.
|
| 271 |
+
Noam Brown and Tuomas Sandholm. Superhuman ai for heads-up no-limit poker: Libratus beats top professionals. Science, 359(6374):418-424, 2018.
|
| 272 |
+
Federico Cacciamani, Andrea Celli, Marco Ciccone, and Nicola Gatti. Multi-agent coordination in adversarial environments through signal mediated strategies. In Proceedings of the 20th International Conference on Autonomous Agents and MultiAgent Systems, pp. 269-278, 2021.
|
| 273 |
+
Yongcan Cao, Wenwu Yu, Wei Ren, and Guanrong Chen. An overview of recent progress in the study of distributed multi-agent coordination. IEEE Transactions on Industrial informatics, 9(1): 427-438, 2012.
|
| 274 |
+
Andrea Celli and Nicola Gatti. Computational results for extensive-form adversarial team games. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018.
|
| 275 |
+
N. Cesa-Bianchi and G. Lugosi. On prediction of individual sequences. Annals of Statistics, pp. 1865-1895, 1999.
|
| 276 |
+
Chao-Kai Chiang, Tianbao Yang, Chia-Jung Lee, Mehrdad Mahdavi, Chi-Jen Lu, Rong Jin, and Shenghuo Zhu. Online optimization with gradual variations. In Conference on Learning Theory, pp. 6-1. JMLR Workshop and Conference Proceedings, 2012.
|
| 277 |
+
Constantinos Daskalakis and Ioannis Panageas. The limit points of (optimistic) gradient descent in min-max optimization. Advances in Neural Information Processing Systems, 31, 2018.
|
| 278 |
+
Constantinos Daskalakis and Ioannis Panageas. Last-iterate convergence: Zero-sum games and constrained min-max optimization. Innovations in Theoretical Computer Science, 2019.
|
| 279 |
+
Constantinos Daskalakis and Christos Papadimitriou. Continuous local search. In Proceedings of the twenty-second annual ACM-SIAM symposium on Discrete Algorithms, pp. 790-804. SIAM, 2011.
|
| 280 |
+
Constantinos Daskalakis, Alex Fabrikant, and Christos H Papadimitriou. The game world is flat: The complexity of nash equilibria in succinct games. In International Colloquium on Automata, Languages, and Programming, pp. 513-524. Springer, 2006.
|
| 281 |
+
Constantinos Daskalakis, Paul W Goldberg, and Christos H Papadimitriou. The complexity of computing a nash equilibrium. SIAM Journal on Computing, 39(1):195-259, 2009.
|
| 282 |
+
Constantinos Daskalakis, Alan Deckelbaum, and Anthony Kim. Near-optimal no-regret algorithms for zero-sum games. In Proceedings of the twenty-second annual ACM-SIAM symposium on Discrete Algorithms, pp. 235-254. SIAM, 2011.
|
| 283 |
+
Constantinos Daskalakis, Stratis Skoulakis, and Manolis Zampetakis. The complexity of constrained min-max optimization. In Proceedings of the 53rd Annual ACM SIGACT Symposium on Theory of Computing, pp. 1466-1478, 2021.
|
| 284 |
+
Jelena Diakonikolas, Constantinos Daskalakis, and Michael I Jordan. Efficient methods for structured nonconvex-nonconcave min-max optimization. In International Conference on Artificial Intelligence and Statistics, pp. 2746-2754. PMLR, 2021.
|
| 285 |
+
Ishan Durugkar, Ian Gemp, and Sridhar Mahadevan. Generative multi-adversarial networks. arXiv preprint arXiv:1611.01673, 2016.
|
| 286 |
+
|
| 287 |
+
Alex Fabrikant, Christos Papadimitriou, and Kunal Talwar. The complexity of pure nash equilibria. In Proceedings of the thirty-sixth annual ACM symposium on Theory of computing, pp. 604-612, 2004.
|
| 288 |
+
Gabriele Farina, Andrea Celli, Nicola Gatti, and Tuomas Sandholm. Ex ante coordination and collusion in zero-sum multi-player extensive-form games. Advances in Neural Information Processing Systems, 31, 2018.
|
| 289 |
+
John Fearnley, Paul W Goldberg, Alexandros Hollender, and Rahul Savani. The complexity of gradient descent: CLS = PPAD∩PLS. In Proceedings of the 53rd Annual ACM SIGACT Symposium on Theory of Computing, pp. 46-59, 2021.
|
| 290 |
+
Lampros Flokas, Emmanouil-Vasileios Vlatakis-Gkaragkounis, and Georgios Piliouras. Solving min-max optimization with hidden structure via gradient descent ascent. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 2021, virtual, 2021.
|
| 291 |
+
Drew Fudenberg. Jean tirole game theory, 1991.
|
| 292 |
+
Ian Gemp, Rahul Savani, Marc Lanctot, Yoram Bachrach, Thomas Anthony, Richard Everett, Andrea Tacchetti, Tom Eccles, and János Kramár. Sample-based approximation of nash in large many-player games via gradient descent. arXiv preprint arXiv:2106.01285, 2021.
|
| 293 |
+
Natalie Gold. Introduction: Teamwork in theory and in practice. In Teamwork, pp. 1-21. Springer, 2005.
|
| 294 |
+
Noah Golowich, Sarath Pattathil, and Constantinos Daskalakis. Tight last-iterate convergence rates for no-regret learning in multi-player games. Advances in neural information processing systems, 33:20766-20778, 2020.
|
| 295 |
+
Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron C. Courville, and Yoshua Bengio. Generative Adversarial Nets. In Advances in Neural Information Processing Systems 27: Annual Conference on Neural Information Processing Systems 2014, December 8-13 2014, Montreal, Quebec, Canada, pp. 2672-2680, 2014.
|
| 296 |
+
Hans W Gottinger. J. marschak and roy radner,"economic theory of teams"(book review).Theory and Decision,5(3):349,1974.
|
| 297 |
+
James Hannan. 4. approximation to rayes risk in repeated play. In Contributions to the Theory of Games (AM-39), Volume III, pp. 97-140. Princeton University Press, 2016.
|
| 298 |
+
Coretin Hardy, Erwan Le Merrer, and Bruno Sericola. Md-gan: Multi-discriminator generative adversarial networks for distributed datasets. In 2019 IEEE international parallel and distributed processing symposium (IPDPS), pp. 866-877. IEEE, 2019.
|
| 299 |
+
Munther A Hassouneh, Hsien-Chiarn Lee, and Eyad H Abed. Washout filters in feedback control: Benefits, limitations and extensions. In Proceedings of the 2004 American control conference, volume 5, pp. 3950-3955. IEEE, 2004.
|
| 300 |
+
Quan Hoang, Tu Dinh Nguyen, Trung Le, and Dinh Phung. Multi-generator generative adversarial nets. arXiv preprint arXiv:1708.02556, 2017.
|
| 301 |
+
Quan Hoang, Tu Dinh Nguyen, Trung Le, and Dinh Phung. Mgan: Training generative adversarial nets with multiple generators. In International conference on learning representations, 2018.
|
| 302 |
+
Max Jaderberg, Wojciech M Czarnecki, Iain Dunning, Luke Marris, Guy Lever, Antonio Garcia Castaneda, Charles Beattie, Neil C Rabinowitz, Ari S Morcos, Avraham Ruderman, et al. Human-level performance in 3d multiplayer games with population-based reinforcement learning. Science, 364(6443):859-865, 2019.
|
| 303 |
+
Jeongbin Kim, Thomas R Palfrey, and Jeffrey R Zeidel. A theory of games played by teams of players. 2019.
|
| 304 |
+
|
| 305 |
+
R. Kleinberg, G. Piliouras, and E. Tardos. Multiplicative updates outperform generic no-regret learning in congestion games. In STOC, 2009.
|
| 306 |
+
GM Korpelevich. The extragradient method for finding saddle points and other problems. Matecon, 12:747-756, 1976.
|
| 307 |
+
Jason D. Lee, Ioannis Panageas, Georgios Piliouras, Max Simchowitz, Michael I. Jordan, and Benjamin Recht. First-order methods almost always avoid strict saddle points. Math. Program., 176(1-2):311-337, 2019. doi: 10.1007/s10107-019-01374-3. URL https://doi.org/10.1007/s10107-019-01374-3.
|
| 308 |
+
Joel Z Leibo, Vinicius Zambaldi, Marc Lanctot, Janusz Marecki, and Thore Graepel. Multi-agent reinforcement learning in sequential social dilemmas. arXiv preprint arXiv:1702.03037, 2017.
|
| 309 |
+
Dan Li, Dacheng Chen, Baihong Jin, Lei Shi, Jonathan Goh, and See-Kiong Ng. Mad-gan: Multivariate anomaly detection for time series data with generative adversarial networks. In International Conference on Artificial Neural Networks, pp. 703-716. Springer, 2019.
|
| 310 |
+
Tianyi Lin, Chi Jin, and Michael Jordan. On gradient descent ascent for nonconvex-concave minimax problems. In International Conference on Machine Learning, pp. 6083-6093. PMLR, 2020.
|
| 311 |
+
Richard J Lipton, Evangelos Markakis, and Aranyak Mehta. Playing large games using simple strategies. In Proceedings of the 4th ACM Conference on Electronic Commerce, pp. 36-41, 2003.
|
| 312 |
+
Jakob Marschak. Elements for a theory of teams. Management science, 1(2):127-137, 1955.
|
| 313 |
+
H Brendan McMahan, Geoffrey J Gordon, and Avrim Blum. Planning in the presence of cost functions controlled by an adversary. In Proceedings of the 20th International Conference on Machine Learning (ICML-03), pp. 536-543, 2003.
|
| 314 |
+
R. Mehta, I. Panageas, and G. Piliouras. Natural selection as an inhibitor of genetic diversity: Multiplicative weights updates algorithm and a conjecture of haploid genetics. In ITCS, 2015.
|
| 315 |
+
Panayotis Mertikopoulos, Christos Papadimitriou, and Georgios Piliouras. Cycles in adversarial regularized learning. In Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, pp. 2703-2717. SIAM, 2018.
|
| 316 |
+
Panayotis Mertikopoulos, Houssam Zenati, Bruno Lecouat, Chuan-Sheng Foo, Vijay Chandrasekhar, and Georgios Piliouras. Optimistic mirror descent in saddle-point problems: Going the extra (gradient) mile. In ICLR'19-International Conference on Learning Representations, pp. 1-23, 2019.
|
| 317 |
+
Dov Monderer and Lloyd S Shapley. Potential games. Games and economic behavior, 14(1):124-143, 1996.
|
| 318 |
+
Matej Moravcik, Martin Schmid, Neil Burch, Viliam Lisy, Dustin Morrill, Nolan Bard, Trevor Davis, Kevin Waugh, Michael Johanson, and Michael Bowling. Deepstack: Expert-level artificial intelligence in heads-up no-limit poker. Science, 356(6337):508-513, 2017.
|
| 319 |
+
Mohammad Sal Moslehian. Ky fan inequalities. CoRR, abs/1108.1467, 2011.
|
| 320 |
+
Thomas Nagylaki. The evolution of multilocus systems under weak selection. Genetics, 134(2): 627-647, 1993.
|
| 321 |
+
Arkadj Semenovič Nemirovskij and David Borisovich Yudin. Problem complexity and method efficiency in optimization. 1983.
|
| 322 |
+
Martin A Nowak, Akira Sasaki, Christine Taylor, and Drew Fudenberg. Emergence of cooperation and evolutionary stability in finite populations. Nature, 428(6983):646-650, 2004.
|
| 323 |
+
Christos H Papadimitriou. The complexity of the lin-kernighan heuristic for the traveling salesman problem. SIAM Journal on Computing, 21(3):450-465, 1992.
|
| 324 |
+
Leonid Denisovich Popov. A modification of the arrow-hurwicz method for search of saddle points. Mathematical notes of the Academy of Sciences of the USSR, 28(5):845-848, 1980.
|
| 325 |
+
|
| 326 |
+
Julia Robinson. An iterative method of solving a game. Annals of mathematics, pp. 296-301, 1951.
|
| 327 |
+
R.W. Rosenthal. A class of games possessing pure-strategy Nash equilibria. International Journal of Game Theory, 2(1):65-67, 1973.
|
| 328 |
+
Tim Roughgarden. Intrinsic robustness of the price of anarchy. In Proceedings of the forty-first annual ACM symposium on Theory of computing, pp. 513-522, 2009.
|
| 329 |
+
Leonard J. Schulman and Umesh V. Vazirani. The duality gap for two-team zero-sum games. Games Econ. Behav., 115:336-345, 2019a. doi: 10.1016/j.geb.2019.03.011. URL https://doi.org/10.1016/j.geb.2019.03.011.
|
| 330 |
+
Leonard J Schulman and Umesh V Vazirani. The duality gap for two-team zero-sum games. Games and Economic Behavior, 115:336-345, 2019b.
|
| 331 |
+
Hassam Ullah Sheikh, Mina Razghandi, and Ladislau Boloni. Learning distributed cooperative policies for security games via deep reinforcement learning. In 2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC), volume 1, pp. 489-494. IEEE, 2019.
|
| 332 |
+
Karthik Sridharan. Learning from an optimization viewpoint. arXiv preprint arXiv:1204.4145, 2012.
|
| 333 |
+
Karthik Sridharan and Ambuj Tewari. Convex games in banach spaces. $COLT$ 2010, pp. 1, 2010.
|
| 334 |
+
Vasilis Syrgkanis, Alekh Agarwal, Haipeng Luo, and Robert E Schapire. Fast convergence of regularized learning in games. In Advances in Neural Information Processing Systems, pp. 2989-2997, 2015.
|
| 335 |
+
Shichang Tang. Lessons learned from the training of gans on artificial datasets. IEEE Access, 8: 165044-165055, 2020.
|
| 336 |
+
Liang Tong, Sixie Yu, Scott Alfeld, et al. Adversarial regression with multiple learners. In International Conference on Machine Learning, pp. 4946-4954. PMLR, 2018.
|
| 337 |
+
Oriol Vinyals, Igor Babuschkin, Wojciech M Czarnecki, Michael Mathieu, Andrew Dudzik, Junyoung Chung, David H Choi, Richard Powell, Timo Ewalds, Petko Georgiev, et al. Grandmaster level in starcraft ii using multi-agent reinforcement learning. Nature, 575(7782):350-354, 2019.
|
| 338 |
+
Yannick Viossat and Andriy Zapechelyuk. No-regret dynamics and fictitious play. Journal of Economic Theory, 148(2):825-842, 2013.
|
| 339 |
+
Emmanouil-Vasileios Vlatakis-Gkaragkounis, Lampros Flokas, and Georgios Piliouras. Poincaré recurrence, cycles and spurious equilibria in gradient-descent-ascent for non-convex nonconcave zero-sum games. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 10450-10461, 2019. URL https://proceedings.neurips.cc/paper/2019/ hash/6c7cd904122e623ce625613d6af337c4-Abstract.html.
|
| 340 |
+
John Von Neumann. Zur theorie der gesellschaftsspiele. Math, 100:295-320, 1928.
|
| 341 |
+
Bernhard von Stengel and Daphne Koller. Team-maxmin equilibria. Games and Economic Behavior, 21(1-2):309-321, 1997.
|
| 342 |
+
Hongyang Zhang, Susu Xu, Jiantao Jiao, Pengtao Xie, Ruslan Salakhutdinov, and Eric P Xing. Stackelberg gan: Towards provable minimax equilibrium via multi-generator architectures. arXiv preprint arXiv:1811.08010, 2018.
|
| 343 |
+
Youzhi Zhang and Bo An. Converging to team-maxmin equilibria in zero-sum multiplayer games. In International Conference on Machine Learning, pp. 11033-11043. PMLR, 2020.
|
| 344 |
+
Youzhi Zhang, Bo An, and Jakub Černý. Computing ex ante coordinated team-maxmin equilibria in zero-sum multiplayer extensive-form games. arXiv preprint arXiv:2009.12629, 2020.
|
| 345 |
+
Dao Li Zhu and Patrice Marcotte. Co-coercivity and its role in the convergence of iterative schemes for solving variational inequalities. SIAM Journal on Optimization, 6(3):714-726, 1996.
|
2023/Towards convergence to Nash equilibria in two-team zero-sum games/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b59f337cc2b5f9e229a66d502a13dadc7c5ddd783e48537bc0ec2cb260f80540
|
| 3 |
+
size 161188
|
2023/Towards convergence to Nash equilibria in two-team zero-sum games/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards the Generalization of Contrastive Self-Supervised Learning/ba91a1ec-18bd-462a-93e5-dee94f15572a_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards the Generalization of Contrastive Self-Supervised Learning/ba91a1ec-18bd-462a-93e5-dee94f15572a_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards the Generalization of Contrastive Self-Supervised Learning/ba91a1ec-18bd-462a-93e5-dee94f15572a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f687162cb7011ed64b853568392553e3c828b6df6a55372ba92dc80b7dacbcb5
|
| 3 |
+
size 1770596
|
2023/Towards the Generalization of Contrastive Self-Supervised Learning/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards the Generalization of Contrastive Self-Supervised Learning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3f9aaee848d512edebdf50fefdec085a510f09862293ad102dd8579ed6c894ed
|
| 3 |
+
size 2228776
|
2023/Towards the Generalization of Contrastive Self-Supervised Learning/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trading Information between Latents in Hierarchical Variational Autoencoders/1aa93880-e4b4-43df-a6da-767b4c6119cd_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trading Information between Latents in Hierarchical Variational Autoencoders/1aa93880-e4b4-43df-a6da-767b4c6119cd_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trading Information between Latents in Hierarchical Variational Autoencoders/1aa93880-e4b4-43df-a6da-767b4c6119cd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cbe3ce9fa3506574e54c153204e3f1ec395efe733154241f8e493dddf03433e3
|
| 3 |
+
size 6369736
|
2023/Trading Information between Latents in Hierarchical Variational Autoencoders/full.md
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TRADING INFORMATION BETWEEN LATENTS IN HIERARCHICAL VARIATIONAL AUTOENCODERS
|
| 2 |
+
|
| 3 |
+
Tim Z. Xiao
|
| 4 |
+
|
| 5 |
+
University of Tübingen & IMPRS-IS
|
| 6 |
+
|
| 7 |
+
zhenzhong.xiao@uni-tuebingen.de
|
| 8 |
+
|
| 9 |
+
Robert Bamler
|
| 10 |
+
|
| 11 |
+
University of Tübingen
|
| 12 |
+
|
| 13 |
+
robert.bamler@uni-tuebingen.de
|
| 14 |
+
|
| 15 |
+
# ABSTRACT
|
| 16 |
+
|
| 17 |
+
Variational Autoencoders (VAEs) were originally motivated (Kingma & Welling, 2014) as probabilistic generative models in which one performs approximate Bayesian inference. The proposal of $\beta$ -VAEs (Higgins et al., 2017) breaks this interpretation and generalizes VAEs to application domains beyond generative modeling (e.g., representation learning, clustering, or lossy data compression) by introducing an objective function that allows practitioners to trade off between the information content ("bit rate") of the latent representation and the distortion of reconstructed data (Alemi et al., 2018). In this paper, we reconsider this rate/distortion trade-off in the context of hierarchical VAEs, i.e., VAEs with more than one layer of latent variables. We identify a general class of inference models for which one can split the rate into contributions from each layer, which can then be tuned independently. We derive theoretical bounds on the performance of downstream tasks as functions of the individual layers' rates and verify our theoretical findings in large-scale experiments. Our results provide guidance for practitioners on which region in rate-space to target for a given application.
|
| 18 |
+
|
| 19 |
+
# 1 INTRODUCTION
|
| 20 |
+
|
| 21 |
+
Variational autoencoders (VAEs) (Kingma & Welling, 2014; Rezende et al., 2014) are a class of deep generative models that are used, e.g., for density modeling (Takahashi et al., 2018), clustering (Jiang et al., 2017), nonlinear dimensionality reduction of scientific measurements (Laloy et al., 2017), data compression (Ballé et al., 2017), anomaly detection (Xu et al., 2018), and image generation (Razavi et al., 2019). VAEs (more precisely, $\beta$ -VAEs (Higgins et al., 2017)) span such a diverse set of application domains in part because they can be tuned to a specific task without changing the network architecture, in a way that is well understood from information theory (Alemi et al., 2018).
|
| 22 |
+
|
| 23 |
+
The original proposal of VAEs (Kingma & Welling, 2014) motivates them from the perspective of generative probabilistic modeling and approximate Bayesian inference. However, the generalization to $\beta$ -VAEs breaks this interpretation as they are no longer trained by maximizing a lower bound on the marginal data likelihood. These models are better described as neural networks that are trained to learn the identity function, i.e., to make their output resemble the input as closely as possible. This task is made nontrivial by introducing a so-called (variational) information bottleneck (Alemi et al., 2017; Tishby & Zaslavsky, 2015) at one or more layers, which restricts the information content that passes through these layers. The network activations at the information bottleneck are called latent representations (or simply "latents"), and they split the network into an encoder part (from input to latents) and a decoder part (from latents to output). This separation of the model into an encoder and a decoder allows us to categorize the wide variety of applications of VAEs into three domains:
|
| 24 |
+
|
| 25 |
+
1. data reconstruction tasks, i.e., applications that involve both the encoder and the decoder; these include various nonlinear inter- and extrapolations (e.g., image upscaling, denoising, or inpainting), and VAE-based methods for lossy data compression;
|
| 26 |
+
2. representation learning tasks, i.e., applications that involve only the encoder; they serve a downstream task that operates on the (typically lower dimensional) latent representation, e.g., classification, regression, visualization, clustering, or anomaly detection; and
|
| 27 |
+
3. generative modeling tasks, i.e., applications that involve only the decoder are less common but include generating new samples that resemble training data.
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
Figure 1: Left: trade-off between performance in the three applications domains of VAEs, using GHVAE trained on the SVHN data set (details: Section 5); higher is better for all three metrics; gray dots on walls show 2d-projections. Right: color code, corresponding layer-wise rates (Eq. 7), and individual performance landscapes (size of dots $\propto$ performance). The hyperparameters $\beta_{2}$ and $\beta_{1}$ allow us to tune the HVAE for best data reconstruction $(\triangle)$ , best representation learning $(\diamond)$ , or best generative modeling $(\odot)$ . Note that performance landscapes differ strongly across the three applications, and neither a standard VAE $(\beta_{2} = \beta_{1} = 1$ ; marked “ $\bullet$ ” in right panels) nor a conventional $\beta$ -VAE $(\beta_{2} = \beta_{1}$ ; dashed red lines) result in optimal models for any of the three applications.
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+
The information bottleneck incentivizes the VAE to encode information into the latents efficiently by removing any redundancies from the input. How agressively this is done can be controlled by tuning the strength $\beta$ of the information bottleneck (Alemi et al., 2018). Unfortunately, information theory distinguishes relevant from redundant information only in a quantitative way that is agnostic to the qualitative features that each piece of information represents about some data point. In practice, many VAE-architectures (Deng et al., 2017; Yingzhen & Mandt, 2018; Balle et al., 2018) try to separate qualitatively different features into different parts of the latent representation by making the model architecture reflect some prior assumptions about the semantic structure of the data. This allows downstream applications from the three domains discussed above to more precisely target specific qualitative aspects of the data by using or manipulating only the corresponding part of the latent representation. However, in this approach, the degree of detail to which each qualitative aspect is encoded in the latents can be controlled at most indirectly by tuning network layer sizes.
|
| 35 |
+
|
| 36 |
+
In this paper, we argue both theoretically and empirically that the three different application domains of VAEs identified above require different trade-offs in the amount of information that is encoded in each part of the latent representation. We propose a method to independently control the information content (or "rate") of each layer of latent representations, generalizing the rate/distortion theory of $\beta$ -VAEs (Alemi et al., 2018) for VAEs with more than one layer of latents ("hierarchical VAEs" or HVAEs for short). We identify the most general model architecture that is compatible with our proposal and analyze how both theoretical performance bounds and empirically measured performances in each of the above three application domains depend on how rate is distributed across layers.
|
| 37 |
+
|
| 38 |
+
Our approach is summarized in Figure 1. The 3d-plot shows empirically measured performance metrics (discussed in detail in Section 5.2) for the three application domains identified above. Each point on the colored surface corresponds to different layer-wise rates in an HVAE with two layers of latents. Crucially, the rates that lead to optimal performance are different for each of the three application domains (see markers $\triangle$ , $\diamond$ , and $\diamond$ in Figure 1), and none of these three optimal models coincide with a conventional $\beta$ -VAE (dashed red lines in right panels). Thus, being able to control each layer's individual rate allows practitioners to train VAEs that target a specific application.
|
| 39 |
+
|
| 40 |
+
The paper is structured as follows. Section 2 summarizes related work. Section 3 introduces the proposed information-trading method. We then analyze how controlling individual layers' rates can be used to tune HVAEs for specific tasks, i.e., how performance in each of the three application domains identified above depends on the allocation of rates across layers. This analysis is done theoretically in Section 4 and empirically in Section 5. Section 6 provides concluding remarks.
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
(a) bottom-up
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
(b) implicit top-down (e.g., LVAE)
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
(c) generalized (explicit) top-down
|
| 50 |
+
Figure 2: Inference (dashed arrows) and generative (solid arrows) models for hierarchical VAEs (HVAEs) with two layers of latent variables. White/gray circles denote latent/observed random variables, respectively; the diamond $\mathbf{d}_1$ in (b) is the result of a deterministic transformation of $\mathbf{x}$ .
|
| 51 |
+
|
| 52 |
+
# 2 RELATED WORK
|
| 53 |
+
|
| 54 |
+
We group related work into work on model architectures for hierarchical VAEs, and on $\beta$ -VAEs.
|
| 55 |
+
|
| 56 |
+
Model Design for Hierarchical VAEs. The original VAE design (Kingma & Welling, 2014; Rezende et al., 2014) has a single layer of latent variables, but recent works (Vahdat & Kautz, 2020; Child, 2021), found that increasing the number of stochastic layers in hierarchical VAEs (HVAEs) improves performance. HVAEs have various designs for their inference models. Sønderby et al. (2016) introduced Ladder VAE (LVAE) with a top-down inference path rather than the naive bottom-up inference (see Section 3), whereas the Bidirectional-Inference VAE (BIVA) (Maaløe et al., 2019) uses a combination of top-down and bottom-up. Our proposed framework applies to a large class of inference models (see Section 3) that includes the popular LVAE (Sønderby et al., 2016).
|
| 57 |
+
|
| 58 |
+
$\beta$ -VAEs And Their Information-Theoretical Interpretations. Higgins et al. (2017) introduced an extra hyperparameter $\beta$ in the objective of VAEs that tunes the strength of the information bottleneck, and they observed that large $\beta$ leads to a disentangled latent representation. An information-theoretical interpretation of $\beta$ -VAEs was provided in (Alemi et al., 2018) by applying the concept of a (variational) bottleneck (Tishby & Zaslavsky, 2015; Alemi et al., 2017) to autoencoders. Due to this information-theoretical interetation, $\beta$ -VAEs are popular models for data compression (Balle et al., 2017; Minnen et al., 2018; Yang et al., 2020), where tuning $\beta$ allows trading off between the bit rate of compressed data and data distortion. In the present work, we generalize $\beta$ -VAEs when applied to HVAEs, and we introduce a framework for tuning the rate of each latent layer individually.
|
| 59 |
+
|
| 60 |
+
# 3 A HIERARCHICAL INFORMATION TRADING FRAMEWORK
|
| 61 |
+
|
| 62 |
+
We propose a refinement of the rate/distortion theory of $\beta$ -VAEs (Alemi et al., 2018) that admits controlling individual layers' rates in VAEs with more than one layers of latents (hierarchical VAEs).
|
| 63 |
+
|
| 64 |
+
# 3.1 CONVENTIONAL $\beta$ -VAE WITH HIERARCHICAL LATENT REPRESENTATIONS
|
| 65 |
+
|
| 66 |
+
We consider a hierarchical VAE (HVAE) for data $\pmb{x}$ with $L$ layers of latent representations $\{z_{\ell}\}_{\ell=1}^{L}$ . Figure 2, discussed further in Section 3.2 below, illustrates various model architectures for the example of $L = 2$ . Solid arrows depict the generative model $p_{\theta}(\{z_{\ell}\}, \pmb{x})$ , where $\theta$ are model parameters (neural network weights). We assume that the implementation factorizes $p_{\theta}(\{z_{\ell}\}, \pmb{x})$ as follows,
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
p _ {\theta} \left(\left\{\boldsymbol {z} _ {\ell} \right\}, \boldsymbol {x}\right) = p _ {\theta} \left(\boldsymbol {z} _ {L}\right) p _ {\theta} \left(\boldsymbol {z} _ {L - 1} \mid \boldsymbol {z} _ {L}\right) p _ {\theta} \left(\boldsymbol {z} _ {L - 2} \mid \boldsymbol {z} _ {L - 1}, \boldsymbol {z} _ {L}\right) \dots p _ {\theta} \left(\boldsymbol {z} _ {1} \mid \boldsymbol {z} _ {\geq 2}\right) p _ {\theta} \left(\boldsymbol {x} \mid \boldsymbol {z} _ {\geq 1}\right) \tag {1}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
where the notation $z_{\geq n}$ for any $n$ is short for the collection of latents $\{\pmb{z}_{\ell}\}_{\ell = n}^{L}$ (thus, $\pmb{z}_{\geq 1}$ and $\{\pmb{z}_{\ell}\}$ are synonymous), and the numbering of latents from $L$ down to 1 follows the common convention in the literature (Sønderby et al., 2016; Gulrajani et al., 2017; Child, 2021). The loss function of a
|
| 73 |
+
|
| 74 |
+
normal $\beta$ -VAE (Higgins et al., 2017) with this generic architecture would be
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\mathcal {L} _ {\beta} (\theta , \phi) = \mathbb {E} _ {\boldsymbol {x} \sim \mathbb {X} _ {\text {t r a i n}}} \left[ \underbrace {\mathbb {E} _ {q _ {\phi} \left(\left\{\boldsymbol {z} _ {\ell} \right\} \mid \boldsymbol {x}\right)} \left[ - \log p _ {\theta} (\boldsymbol {x} \mid \left\{\boldsymbol {z} _ {\ell} \right\}) \right]} _ {= “ \text {d i s t o r t i o n ” D}} + \beta \underbrace {D _ {\mathrm {K L}} \left[ q _ {\phi} \left(\left\{\boldsymbol {z} _ {\ell} \right\} \mid \boldsymbol {x}\right) \| p _ {\theta} \left(\left\{\boldsymbol {z} _ {\ell} \right\}\right) \right]} _ {= “ \text {r a t e ” R}} \right]. \tag {2}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
Here, $q_{\phi}(\{\pmb{z}_{\ell}\} \mid \pmb{x})$ is the inference (or "encoder") model with parameters $\phi$ , $\mathbb{X}_{\mathrm{train}}$ is the training set, $D_{\mathrm{KL}}[\cdot \|\cdot]$ denotes Kullback-Leibler divergence, and the Lagrange parameter $\beta > 0$ trades off between a (total) rate $R$ and a distortion $D$ (Alemi et al., 2018). Setting $\beta = 1$ turns Eq. 2 into the negative ELBO objective of a regular VAE (Kingma & Welling, 2014). The rate $R$ obtains its name as it measures the (total) information content that $q_{\phi}$ encodes into the latent representations $\{\pmb{z}_{\ell}\}$ , which would manifest itself in the expected bit rate when one optimally encodes a random draw $\{\pmb{z}_{\ell}\} \sim q_{\phi}(\{\pmb{z}_{\ell}\} \mid \pmb{x})$ using $p_{\theta}(\{\pmb{z}_{\ell}\})$ as an entropy model (Agustsson & Theis, 2020; Bennett et al., 2002). An important observation pointed out in (Alemi et al., 2017) is that, regardless how rate $R$ is traded off against distortion $D$ by tuning $\beta$ , their sum $R + D$ is—in expectation under any data distribution $p_{\mathrm{data}}(\pmb{x})$ —always lower bounded by the entropy $H[p_{\mathrm{data}}(\pmb{x})] := \mathbb{E}_{p_{\mathrm{data}}(\pmb{x})}[-\log p_{\mathrm{data}}(\pmb{x})]$ ,
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\mathbb {E} _ {p _ {\text {d a t a}} (\boldsymbol {x})} [ R + D ] \geq H [ p _ {\text {d a t a}} (\boldsymbol {x}) ] \quad \forall p _ {\text {d a t a}}. \tag {3}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
Limitations. The rate $R$ in Eq. 2 is a property of the collection $\{z_{\ell}\}$ of all latents, which can limit its interpretability for some inference models. For example, the common convention of enumerating layers $z_{\ell}$ from $\ell = L$ down to 1 in Eq. 1 is reminiscent of a naive architecture for the inference model that factorizes in reverse order compared to Eq. 1 ("bottom up", see dashed arrows in Figure 2(a)), i.e., $q_{\phi}(\{z_{\ell}\} | \mathbf{x}) = q_{\phi}(z_1|\mathbf{x})q_{\phi}(z_2|z_1)\dots q_{\phi}(z_L|z_{L - 1})$ . Using a HVAE with such a "bottom-up" inference model to reconstruct some given data point $\mathbf{x}$ would map $\mathbf{x}$ to $z_{1}$ using $q_{\phi}(z_1|x)$ and then map $z_{1}$ back to the data space using $p_{\theta}(\pmb {x}|z_1)$ , thus ignoring all latents $z_{\ell}$ with $\ell > 1$ . Yet, the rate term in Eq. 2 still depends on all latents, including the ones not needed to reconstruct any data (practical VAE-based compression methods using bits-back coding (Frey & Hinton, 1997) would, however, indeed use $z_{\ell}$ with $\ell > 1$ as auxiliary variables for computational efficiency).
|
| 87 |
+
|
| 88 |
+
# 3.2 TRADING INFORMATION BETWEEN LATENTS
|
| 89 |
+
|
| 90 |
+
Many HVAEs used in the literature allow us to resolve the limitations identified in Section 3.1. For example, the popular LVAE architecture (Sønderby et al., 2016), (Figure 2(b)), uses an inference model (dashed arrows) that traverses the latents $\{z_{\ell}\}$ in the same order as the generative model (solid arrows). We consider the following generalization of this architecture (see Figure 2(c)),
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
q _ {\phi} \left(\left\{\boldsymbol {z} _ {\ell} \right\} \mid \boldsymbol {x}\right) = q _ {\phi} \left(\boldsymbol {z} _ {L} \mid \boldsymbol {x}\right) q _ {\phi} \left(\boldsymbol {z} _ {L - 1} \mid \boldsymbol {z} _ {L}, \boldsymbol {x}\right) q _ {\phi} \left(\boldsymbol {z} _ {L - 2} \mid \boldsymbol {z} _ {L - 1}, \boldsymbol {z} _ {L}, \boldsymbol {x}\right) \dots q _ {\phi} \left(\boldsymbol {z} _ {1} \mid \boldsymbol {z} _ {\geq 2}, \boldsymbol {x}\right). \tag {4}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
Formally, Eq. 4 is just the product rule of probability theory and therefore holds for arbitrary inference models $q_{\phi}(\{z_{\ell}\} \mid x)$ . More practically, however, we make the assumption that the actual implementation of $q_{\phi}(\{z_{\ell}\} \mid x)$ follows the structure in Eq. 4. This means that, using the trained model, the most efficient way to map a given data point $x$ to its reconstruction $\hat{x}$ now involves all latents $z_{\ell}$ (either drawing a sample or taking the mode at each step):
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\boldsymbol {x} \xrightarrow {q _ {\phi} (\boldsymbol {z} _ {L} | \boldsymbol {x})} \boldsymbol {z} _ {L} \xrightarrow {q _ {\phi} (\boldsymbol {z} _ {L - 1} | \boldsymbol {z} _ {L} , \boldsymbol {x})} \boldsymbol {z} _ {L - 1} \longrightarrow \dots \longrightarrow \boldsymbol {z} _ {2} \xrightarrow {q _ {\phi} (\boldsymbol {z} _ {1} | \boldsymbol {z} _ {\geq 2} , \boldsymbol {x})} \boldsymbol {z} _ {1} \xrightarrow {p _ {\theta} (\boldsymbol {x} | \{\boldsymbol {z} _ {\ell} \})} \hat {\boldsymbol {x}}. \tag {5}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
Layer-wise Rates. We can interpret Eq. 5 in that it first maps $\pmb{x}$ to a "crude" representation $\pmb{z}_L$ , which gets iteratively refined to $\pmb{z}_1$ , and finally to a reconstruction $\hat{\pmb{x}}$ . Note that each factor $q_{\phi}(z_{\ell} | z_{\geq \ell + 1}, \pmb{x})$ of the inference model in Eq. 4 is conditioned not only on the previous layers $z_{\geq \ell + 1}$ but also on the original data $\pmb{x}$ . This allows the inference model to target each refinement step in Eq. 5 such that the reconstruction $\hat{\pmb{x}}$ becomes close to $\pmb{x}$ . More formally, we chose the inference architecture in Eq. 4 such that it factorizes over $\{z_{\ell}\}$ in the same order as the generative model (Eq. 1). This allows us to split the total rate $R$ into a sum of layer-wise rates as follows,
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\begin{array}{l} R = \mathbb {E} _ {q _ {\phi} (\left\{\boldsymbol {z} _ {\ell} \right\} | \boldsymbol {x})} \left[ \log \frac {q _ {\phi} \left(\boldsymbol {z} _ {L} \mid \boldsymbol {x}\right)}{p _ {\theta} \left(\boldsymbol {z} _ {L}\right)} + \log \frac {q _ {\phi} \left(\boldsymbol {z} _ {L - 1} \mid \boldsymbol {z} _ {L} , \boldsymbol {x}\right)}{p _ {\theta} \left(\boldsymbol {z} _ {L - 1} \mid \boldsymbol {z} _ {L}\right)} + \dots + \log \frac {q _ {\phi} \left(\boldsymbol {z} _ {1} \mid \boldsymbol {z} _ {\geq 2} , \boldsymbol {x}\right)}{p _ {\theta} \left(\boldsymbol {z} _ {1} \mid \boldsymbol {z} _ {\geq 2}\right)} \right] \tag {6} \\ = R \left(\boldsymbol {z} _ {L}\right) + R \left(\boldsymbol {z} _ {L - 1} \mid \boldsymbol {z} _ {L}\right) + R \left(\boldsymbol {z} _ {L - 2} \mid \boldsymbol {z} _ {L - 1}, \boldsymbol {z} _ {L}\right) + \dots + R \left(\boldsymbol {z} _ {1} \mid \boldsymbol {z} _ {\geqslant 2}\right). \\ \end{array}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
Here,
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
R \left(\boldsymbol {z} _ {L}\right) = D _ {\mathrm {K L}} \left[ q _ {\phi} \left(\boldsymbol {z} _ {L} \mid \boldsymbol {x}\right) \| p _ {\theta} \left(\boldsymbol {z} _ {L}\right) \right] \quad \text {a n d}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
R \left(\boldsymbol {z} _ {\ell} \mid \boldsymbol {z} _ {\geq \ell + 1}\right) = \mathbb {E} _ {q \left(\boldsymbol {z} _ {\geq \ell + 1} \mid \boldsymbol {x}\right)} \left[ D _ {\mathrm {K L}} \left[ q _ {\phi} \left(\boldsymbol {z} _ {\ell} \mid \boldsymbol {z} _ {\geq \ell + 1}, \boldsymbol {x}\right) \| p _ {\theta} \left(\boldsymbol {z} _ {\ell} \mid \boldsymbol {z} _ {\geq \ell + 1}\right) \right] \right] \tag {7}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
quantify the information content of the highest-order latent representation $z_{L}$ and the (expected) increase in information content in each refinement step $z_{\ell +1}\rightarrow z_{\ell}$ in Eq. 5, respectively.
|
| 119 |
+
|
| 120 |
+
Controlling Each Layer's Rate. Using Eqs. 6-7, we generalize the rate/distortion trade-off from Section 3.1 by introducing $L$ individual Lagrange multipliers $\beta_{L}, \beta_{L - 1}, \ldots, \beta_{1}$ , collectively denoted as boldface $\beta$ . This leads to a new loss function that generalizes Eq. 2 as follows,
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\mathcal {L} _ {\boldsymbol {\beta}} (\theta , \phi) = \mathbb {E} _ {\boldsymbol {x} \sim \mathbb {X} _ {\text {t r a i n}}} [ D + \beta_ {L} R (\boldsymbol {z} _ {L}) + \beta_ {L - 1} R (\boldsymbol {z} _ {L - 1} | \boldsymbol {z} _ {L}) + \dots + \beta_ {1} R (\boldsymbol {z} _ {1} | \boldsymbol {z} _ {\geq 2}) ]. \tag {8}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
Setting all $\beta$ s to the same value recovers the conventional $\beta$ -VAE (Eq. 2), which trades off distortion against total information content in $\{z_{\ell}\}$ . Tuning each $\beta$ -hyperparameter individually allows trading off information content across latents. (In a very deep HVAE (i.e., large $L$ ) it may be more practical to group layers into only few bins and to use the same $\beta$ -value for all layers within a bin.) We analyze how to tune $\beta$ s for various applications theoretically in Section 4 and empirically in Section 5.
|
| 127 |
+
|
| 128 |
+
# 4 INFORMATION-THEORETICAL PERFORMANCE BOUNDS FOR HVAES
|
| 129 |
+
|
| 130 |
+
In this section, we analyze theoretically how various performance metrics for HVAEs are restricted by the individual layers' rates $R(z_{L})$ and $R(z_{\ell} | z_{\geq \ell + 1})$ identified in Eq. 7 for a HVAE with "top-down" inference model. Our analysis motivates the use of the information-trading loss function in Eq. 8 for training HVAEs, following the argument from the introduction that VAEs are commonly used for a vast variety of tasks. As we show, different tasks require different trade-offs that can be targeted by tuning the Lagrange multipliers $\beta$ in Eq. 8. We group tasks into the application domains of (i) data reconstruction and manipulation, (ii) representation learning, and (iii) data generation.
|
| 131 |
+
|
| 132 |
+
Data Reconstruction and Manipulation. The most obvious class of application domains of VAEs includes tasks that combine encoder and decoder to map some data point $\mathbf{x}$ to representations $\{z_{\ell}\}$ and then back to the data space. The simplest performance metric for such data reconstruction tasks is the expected distortion $E_{p_{\mathrm{data}}(\mathbf{x})}[D]$ , which we can bound by combining Eq. 3 with Eqs. 6-7,
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\mathbb {E} _ {p _ {\text {d a t a}} (\boldsymbol {x})} [ D ] \geq H [ p _ {\text {d a t a}} (\boldsymbol {x}) ] - \mathbb {E} _ {p _ {\text {d a t a}} (\boldsymbol {x})} \left[ R \left(\boldsymbol {z} _ {L}\right) + R \left(\boldsymbol {z} _ {L - 1} \mid \boldsymbol {z} _ {L}\right) + \dots + R \left(\boldsymbol {z} _ {1} \mid \boldsymbol {z} _ {\geq 2}\right) \right]. \tag {9}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
Eq. 9 would suggest that higher rates (i.e., lower $\beta$ 's) are always better for data reconstruction tasks. However, in many practical tasks (e.g., image upscaling, denoising, or inpainting) the goal is not solely to reconstruct the original data but also to manipulate the latent representations $\{z_{\ell}\}$ in a meaningful way. Here, lower rates can lead to more semantically meaningful representation spaces (see, e.g., Section 5.6 below). Controlling how rate is distributed across layers via Eq. 8 may allow practitioners to have a semantically meaningful high-level representation $z_{L}$ with low rate $R(z_{L})$ while still retaining a high total rate $R$ , thus allowing for low distortion $D$ without violating Eq. 9.
|
| 139 |
+
|
| 140 |
+
Representation Learning. In many practical applications, VAEs are used as nonlinear dimensionality reduction methods to prepare some complicated high-dimensional data $\mathbf{x}$ for downstream tasks such as classification, regression, visualization, clustering, or anomaly detection. We consider a classifier $p_{\mathrm{cls}}(y|\mathbf{z}_{\ell})$ operating on the latents $\mathbf{z}_{\ell}$ at some level $\ell$ . We assume that the (unknown) true data generative process $p_{\mathrm{data}}(y,\mathbf{x}) = p_{\mathrm{data}}(y)p_{\mathrm{data}}(\mathbf{x}|y)$ generates data $\mathbf{x}$ conditioned on some true label $y$ , thus defining a Markov chain $y\xrightarrow{p_{\mathrm{data}}}x\xrightarrow{q_{\phi}}z_{\ell}\xrightarrow{p_{\mathrm{cls}}}\hat{y}$ where $\hat{y} := \arg \max_y p_{\mathrm{cls}}(y|\mathbf{z}_\ell)$ . Classification accuracy is bounded (Meyen, 2016) by a function of the mutual information $I_q(y;z_\ell)$ ,
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\begin{array}{l} I _ {q} (y; \boldsymbol {z} _ {\ell}) \leq I _ {q} (\boldsymbol {x}; \boldsymbol {z} _ {\ell}) \equiv \mathbb {E} _ {p _ {\mathrm {d a t a}} (\boldsymbol {x})} \left[ \mathbb {E} _ {q _ {\phi} (\boldsymbol {z} _ {\ell} | \boldsymbol {x})} \left[ \log \frac {q _ {\phi} (\boldsymbol {z} _ {\ell} | \boldsymbol {x})}{q _ {\phi} (\boldsymbol {z} _ {\ell})} \right] \right] \tag {10} \\ = \mathbb {E} _ {p _ {\text {d a t a}} (\boldsymbol {x})} \left[ \mathbb {E} _ {q _ {\phi} \left(\boldsymbol {z} _ {\ell} \mid \boldsymbol {x}\right)} \left[ \log \frac {q _ {\phi} \left(\boldsymbol {z} _ {\ell} \mid \boldsymbol {x}\right)}{p _ {\theta} \left(\boldsymbol {z} _ {\ell}\right)} \right] \right] - D _ {\mathrm {K L}} \left[ q _ {\phi} \left(\boldsymbol {z} _ {\ell}\right) \| p _ {\theta} \left(\boldsymbol {z} _ {\ell}\right) \right] \\ \leq \mathbb {E} _ {p _ {\text {d a t a}} (\boldsymbol {x})} \left[ \mathbb {E} _ {q _ {\phi} (\boldsymbol {z} _ {\geq \ell} | \boldsymbol {x})} \left[ \log \frac {q _ {\phi} (\boldsymbol {z} _ {\geq \ell} | \boldsymbol {x})}{p _ {\theta} (\boldsymbol {z} _ {\geq \ell})} \right] \right. \\ \left. - \mathbb {E} _ {q _ {\phi} (\boldsymbol {z} _ {\ell} | \boldsymbol {x})} \left[ D _ {\mathrm {K L}} \left[ q _ {\phi} \left(\boldsymbol {z} _ {\geq \ell + 1} \mid \boldsymbol {x}, \boldsymbol {z} _ {\ell}\right) \| p _ {\theta} \left(\boldsymbol {z} _ {\geq \ell + 1} \mid \boldsymbol {z} _ {\ell}\right) \right] \right] \right] \\ \leq \mathbb {E} _ {p _ {\mathrm {d a t a}} (\boldsymbol {x})} \big [ \underbrace {R (\boldsymbol {z} _ {L}) + R (\boldsymbol {z} _ {L - 1} | \boldsymbol {z} _ {L}) + \ldots + R (\boldsymbol {z} _ {\ell} | \boldsymbol {z} _ {\geq \ell + 1})} _ {=: R (\boldsymbol {z} _ {\geq \ell}) (\leq R)} \big ]. \\ \end{array}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
Here, $q_{\phi}(\boldsymbol{z}_{\ell}) \coloneqq \mathbb{E}_{p_{\mathrm{data}}(\boldsymbol{x})}[q_{\phi}(\boldsymbol{z}_{\ell}|\boldsymbol{x})]$ and we identify $R(\boldsymbol{z}_{\geq \ell})$ as the rate accumulated in all layers from $\boldsymbol{z}_L$ to $\boldsymbol{z}_{\ell}$ . The first inequality in Eq. 10 comes from the data processing inequality (MacKay,
|
| 147 |
+
|
| 148 |
+
2003), and the other two inequalities result from discarding the (nonnegative) KL-terms. The classification accuracy is thus bounded by (Meyen, 2016) (see also proof in Appendix B)
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
\text {c l a s s .} \leq f ^ {- 1} \left(I _ {q} (y; z _ {\ell})\right) \leq f ^ {- 1} \left(\mathbb {E} _ {p _ {\mathrm {d a t a}} (\boldsymbol {x})} [ R (\boldsymbol {z} _ {\geq \ell}) ]\right) \quad \left(\leq f ^ {- 1} \left(\mathbb {E} _ {p _ {\mathrm {d a t a}} (\boldsymbol {x})} [ R ]\right)\right) \tag {11}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
where $f^{-1}$ is the inverse of the monotonic function $f(\alpha) = H[p_{\mathrm{data}}(y)] + \alpha \log \alpha + (1 - \alpha) \log \frac{1 - \alpha}{M - 1}$ with $M$ being the number of classes and $H[p_{\mathrm{data}}(y)] \leq \log M$ the marginal label entropy. Eq. 11 suggests that the accuracy of an optimal classifier on $z_{\ell}$ would increase as the rate $R(z_{\geq \ell})$ accumulated from $z_{L}$ to $z_{\ell}$ grows (i.e., as $\beta_{\geq \ell} \to 0$ ), and that the rate added in downstream layers $z_{< \ell}$ would be irrelevant. Practical classifiers, however, have a limited expressiveness, which a very high rate $R(z_{\geq \ell})$ might exceed by encoding too many details into $z_{\ell}$ that are not necessary for classification. We observe in Section 5.6 that, in such cases, increasing the rates of downstream layers $z_{< \ell}$ improves classification accuracy as it allows keeping $z_{\ell}$ simpler by deferring details to $z_{< \ell}$ .
|
| 155 |
+
|
| 156 |
+
Data Generation. The original proposal of VAEs (Kingma & Welling, 2014) motivated them from a generative modeling perspective using that, for $\beta = 1$ , the negative of the loss function in Eq. 2 is a lower bound on the log marginal data likelihood. This suggests setting all $\beta$ -hyperparameters in Eq. 8 to values close to 1 if a HVAE is used primarily for its generative model $p_{\theta}$ .
|
| 157 |
+
|
| 158 |
+
In summary, our theoretical analysis suggests that optimally tuned layer-wise rates depend on whether a HVAE is used for data reconstruction, representation learning, or data generation. The next section tests our theoretical predictions empirically for the same three application domains.
|
| 159 |
+
|
| 160 |
+
# 5 EXPERIMENTS
|
| 161 |
+
|
| 162 |
+
To demonstrate the features of our hierarchical information trading framework, we run large-scale grid searches over a two-dimensional rate space using two different implementations of HVAEs and three different data sets. Although the proposed framework is applicable for HVAEs with $L \geq 2$ , we only use HVAEs with $L = 2$ in our experiments for simplicity and visualization purpose.
|
| 163 |
+
|
| 164 |
+
# 5.1 EXPERIMENTAL SETUP
|
| 165 |
+
|
| 166 |
+
Data sets. We used the SVHN (Netzer et al., 2011) and CIFAR-10 (Krizhevsky, 2009) data sets (both $32 \times 32$ pixel color images), and MNIST (LeCun et al., 1998) ( $28 \times 28$ binary pixel images). SVHN consists of photographed house numbers from 0 to 9, which are geometrically simpler than the 10 classes of objects from CIFAR-10 but more complex than MNIST digits. Most results shown in the main paper use SVHN; comprehensive results for CIFAR-10 and MNIST are shown in Appendix A.2 and tell a similar story except where explicitly discussed.
|
| 167 |
+
|
| 168 |
+
Model Architectures. For the generative model (Eq. 1), we assume a (fixed) standard Gaussian prior $p(z_2) = \mathcal{N}(\mathbf{0},\mathbf{I})$ , and we use diagonal Gaussian models for $p_{\theta}(z_1|z_2) = \mathcal{N}(g_{\mu}(z_2),g_{\sigma}(z_2)^2)$ and (for SVHN and CIFAR-10) $p_{\theta}(\boldsymbol {x}|z_1) = \mathcal{N}(g_{\mu '}(z_1),\sigma_x^2\mathbf{I})$ (this is similar to, e.g., (Minnen et al., 2018)). Here, $g_{\mu}$ , $g_{\sigma}$ , and $g_{\mu '}$ , denote neural networks (see details below). Since MNIST has binary pixel values, we model it with a Bernoulli distribution for $p_{\theta}(\boldsymbol {x}|z_1) = \mathrm{Bern}(g_{\mu '}(z_1))$ . For the inference model, we also use diagonal Gaussian models for $q_{\phi}(z_2|\boldsymbol {x}) = \mathcal{N}(f_{\mu}(\boldsymbol {x}),f_{\sigma}(\boldsymbol {x})^2)$ and for $q_{\phi}(z_1|\boldsymbol {x},z_2) = \mathcal{N}(f_{\mu '}(\boldsymbol {x},z_2),f_{\sigma '}(\boldsymbol {x},z_2)^2)$ , where $f_{\mu}$ , $f_{\sigma}$ , $f_{\mu '}$ , and $f_{\sigma '}$ are again neural networks.
|
| 169 |
+
|
| 170 |
+
We examine both LVAE (Figure 2(b)) and our generalized top-down HVAEs (GHVAEs; see Figure 2(c)), using simple network architectures with only 2 to 3 convolutional and 1 fully connected layers (see Appendix A.1 for details) so that we can scan a large rate-space efficiently. Note that we are not trying to find the new state-of-the-art HVAEs. Results for LVAE are in Appendix A.2.2.
|
| 171 |
+
|
| 172 |
+
We trained 441 different HVAEs for each data set/model combination, scanning the rate-hyperparameters $(\beta_{2},\beta_{1})$ over a $21\times 21$ grid ranging from 0.1 to 10 on a log scale in both directions (see Figure 1 on page 2, right panels). Each model took about 2 hours to train on an RTX-2080Ti GPU ( $\sim 27$ hours in total for each data set/model combination using 32 GPUs in parallel).
|
| 173 |
+
|
| 174 |
+
Baselines. Our proposed framework (Eq. 8) generalizes over both VAEs and $\beta$ -VAEs (Eq. 2), which we obtain in the cases $\beta_{2} = \beta_{1} = 1$ and $\beta_{2} = \beta_{1}$ , respectively. These baselines are indicated as black “o” and red “o” circles, respectively, in Figures 3, 5, 6, and 7, discussed below.
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
(a) Rate/rate/distortion surface for SVHN.
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
Figure 3: PSNR-rate trade-off for GHVAEs trained on SVHN and CIFAR-10. Figure (a) visualizes the same data as the left panel of (b) in 3d. Black circles “o” mark standard VAEs $(\beta_{2} = \beta_{1} = 1)$ , red circles “o” mark $\beta$ -VAEs $(\beta_{2} = \beta_{1})$ , and purple circles “o” mark optimal models along constant total rate (dashed diagonal lines) as defined in Section 5.3. Crosses point to columns in Figure 4.
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
(b) PSNR-rates comparison in 2d.
|
| 184 |
+
|
| 185 |
+
Metrics. Performance metrics for the three application domains of VAEs mentioned in the introduction are introduced at the beginnings of the corresponding Sections 5.4-5.6. In addition, we evaluate the individual rates $R(\mathbf{z}_2)$ and $R(\mathbf{z}_1|\mathbf{z}_2)$ (Eq. 7), which we report in nats (i.e., to base $e$ ).
|
| 186 |
+
|
| 187 |
+
# 5.2 THERE IS NO "ONE HVAE FITS ALL"
|
| 188 |
+
|
| 189 |
+
Figure 1 on page 2 summarizes our results. The $21 \times 21$ GHVAEs trained with the grid of hyperparameters $\beta_{2}$ and $\beta_{1}$ map out a surface in a 3d-space spanned by suitable metrics for the three application domains (metrics defined in Sections 5.4-5.6 below). The two upper right panels map colors on this surface to $\beta_{s}$ used for training and to the resulting layer-wise rates, respectively. The lower right panels show performance landscapes and identify the optimal models for the three application domains of data reconstruction $(\triangle)$ , representation learning $(\diamond)$ , and generative modeling $(\bigcirc)$ .
|
| 190 |
+
|
| 191 |
+
The figure shows that moving away from a conventional $\beta$ -VAE ( $\beta_{2} = \beta_{1}$ ; dashed red lines in Figure 1) allows us to find better models for a given application domain as the three application domains favor vastly different regions in $\beta$ -space. Thus, there is no single HVAE that is optimal for all tasks, and a HVAE that has been optimized for one task can perform poorly on a different task.
|
| 192 |
+
|
| 193 |
+
# 5.3 DEFINITION OF THE OPTIMAL MODEL FOR A GIVEN TOTAL RATE
|
| 194 |
+
|
| 195 |
+
One of the questions we study in Sections 5.4-5.6 below is: "Which allocation of rates across layers results in best model performance if we keep the total rate $R$ fixed". Unfortunately, it is difficult to keep $R$ fixed at training time since we control rates only indirectly via their Lagrange multipliers $\beta_{2}$ and $\beta_{1}$ . We instead use the following definition, illustrated in Figure 6 for a performance metric introduced in Section 5.6 below. The figure plots the performance metric over $R$ for all $21 \times 21 \beta$ -settings and highlights with purple circles "o" all points on the upper convex hull. These highlighted models are optimal for a small interval of total rates in the following sense: if we use the total rates $R$ of all "o" to partition the horizontal axis into intervals then, by definition of the convex hull, each "o" represents the model with highest performance in either the interval to its left or the one to its right.
|
| 196 |
+
|
| 197 |
+
# 5.4 PERFORMANCE ON DATA RECONSTRUCTION
|
| 198 |
+
|
| 199 |
+
Reconstruction is a popular task for VAEs, e.g., in the area of lossy compression (Ballé et al., 2017). We measure reconstruction quality using the common peak signal-to-noise ratio (PSNR), which is equal to $\mathbb{E}_{\mathbf{x}\sim \mathbb{X}_{\mathrm{test}}}[-\log D]$ up to rescaling and shifting. Higher PSNR means better reconstruction.
|
| 200 |
+
|
| 201 |
+
Figure 3(a) shows a 3d-plot of PSNR as a function of both $R(z_{1}|z_{2})$ and $R(z_{2})$ for SVHN, thus generalizing the rate/distortion curve of a conventional $\beta$ -VAE to a rate/rate/distortion surface. Figure 3(b) introduces a more compact 2d-representation of the same data that we use for all remaining metrics in the rest of this section and in Appendix A.2, and it also shows results for CIFAR-10.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
Figure 4: Samples (top) and reconstructions (bottom) from 3 different models (blue column labels "1", "2", and "3" from left to right correspond to crosses "1", "2", and "3" in Figures 3(b) & 5). Consistent with PSNR and IS metrics, model "1" produces poorest samples but best reconstructions.
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
Figure 5: Sample generation performance, measured in Inception Score (IS, see Eq. 12) and its factorization into diversity and sharpness as a function of layer-wise rates for GHVAEs trained using SVHN data. Crosses in left panel correspond to samples shown in Figure 4. Markers “o”, “o”, and “o” same as in Figure 3.
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
Figure 6: RBF-SVM classification accuracies on $\pmb{\mu}_{2}$ . Dashed line shows theoretical bound (Eq. 11). Other markers as in Figure 3.
|
| 211 |
+
|
| 212 |
+
Unsurprisingly and consistent with Eq. 9, reconstruction performance improves as total rate grows. However, minimizing distortion without any constraints is not useful in practice as we can simply use the original data, which has no distortion. To simulate a practical constraint in, e.g., a data-compression application, we consider models with optimal PSNR for a given total rate $R$ (as defined in Section 5.3) which are marked as purple circles "o" in Figure 3(b). We see for both SVHN and CIFAR-10 that conventional $\beta$ -VAEs $(\beta_{2} = \beta_{1}$ ; red circles) perform somewhat suboptimal for a given total rate and can be improved by trading some rate in $z_{2}$ for some rate in $z_{1}$ . Reconstruction examples for the three models marked with crosses in Figure 3(b) are shown in Figure 4 (bottom). Visual reconstruction quality improves from "3" to "2" to "1", consistent with reported PSNRs.
|
| 213 |
+
|
| 214 |
+
# 5.5 PERFORMANCE ON SAMPLE GENERATION
|
| 215 |
+
|
| 216 |
+
We next evaluate how tuning layer-wise rates affects the quality of samples from the generative model. We measure sample quality by the widely used Inception Score (IS) (Salimans et al., 2016),
|
| 217 |
+
|
| 218 |
+
$$
|
| 219 |
+
\mathbf {I S} = \exp \left\{\mathbb {E} _ {p _ {\theta} (\boldsymbol {x})} \left[ D _ {\mathrm {K L}} \left[ p _ {\mathrm {c l s .}} (y | \boldsymbol {x}) \| p _ {\mathrm {c l s .}} (y) \right] \right] \right\} = e ^ {H \left[ p _ {\mathrm {c l s .}} (y) \right]} \times e ^ {- \mathbb {E} _ {p _ {\theta} (\boldsymbol {x})} \left[ H \left[ p _ {\mathrm {c l s .}} (y | \boldsymbol {x}) \right] \right]} \tag {12}
|
| 220 |
+
$$
|
| 221 |
+
|
| 222 |
+
Here, $p_{\theta}$ is the trained generative model (Eq. 1), $p_{\mathrm{cls.}}(y|\boldsymbol{x})$ is the predictive distribution of a classifier trained on the same training set, and $p_{\mathrm{cls.}}(y) \coloneqq \mathbb{E}_{p_{\theta}(\boldsymbol{x})}[p_{\mathrm{cls.}}(y|\boldsymbol{x})]$ . The second equality in Eq. 12 follows Barratt & Sharma (2018) to split IS into a product of a diversity score and a sharpness score. Higher is better for all scores. The classifier is a ResNet-18 (He et al., 2016) for SVHN (test accuracy $95.02\%$ ) and a DenseNet-121 (Huang et al., 2017) for CIFAR-10 (test accuracy $94.34\%$ ).
|
| 223 |
+
|
| 224 |
+
Figure 5 (left) shows IS for GHVAEs trained on SVHN. Unlike the results for PSNR, here, higher rate does not always lead to better sample quality: for very high $R(\mathbf{z}_2)$ and low $R(\mathbf{z}_1|\mathbf{z}_2)$ , IS eventually drops. The region of high IS is in the area where $\beta_2 < \beta_1$ , i.e., where $R(\mathbf{z}_2)$ is higher than in a comparable conventional $\beta$ -VAE. The center and right panels of Figure 5 show diversity and sharpness, indicating that IS is mainly driven here by sharpness, which depends mostly on $R(\mathbf{z}_2)$ , possibly because $z_2$ captures higher-level concepts than $z_1$ that may be more important to the classifier in Eq. 12. Samples from the three models marked with crosses in Figure 5 are shown in Figure 4 (top). Visual sample quality improves from "1" to "3" to "2", consistent with reported IS.
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
Figure 7: Mutual information (MI) $I_{q}(y;z_{2})$ and classification accuracies of four classifiers (see column labels) as a function of layer-wise rates $R(z_{2})$ and $R(z_{1}|z_{2})$ . Classifiers are conditioned on $\pmb{\mu}_{2} \coloneqq \arg \max_{\pmb{z}_{2}} q(\pmb{z}_{2}|\pmb{x})$ learned from GHVAEs trained with SVHN (top) and CIFAR-10 (bottom). Markers "o", "o", and "o" same as in Figure 3.
|
| 228 |
+
|
| 229 |
+
# 5.6 PERFORMANCE ON REPRESENTATION LEARNING FOR DOWNSSTREAM CLASSIFICATION
|
| 230 |
+
|
| 231 |
+
VAEs are very popular for representation learning as they map complicated high dimensional data $\mathbf{x}$ to typically lower dimensional representations $\{z_{\ell}\}$ . To measure the quality of learned representations, we train two sets of classifiers on a labeled test set for each trained HVAE, each consisting of: logistic regression, a Support Vector Machine (SVM) (Boser et al., 1992) with linear kernel, an SVM with RBF kernel, and $k$ -nearest neighbors (kNN) with $k = 5$ . One set of classifiers is conditioned on the mode $\pmb{\mu}_{2}$ of $q_{\phi}(z_2|x)$ and the other one on the mode $\pmb{\mu}_{1}$ of $q_{\phi}(z_1|z_2,\pmb{x})$ , where $z_{2} \sim q_{\phi}(z_{2}|x)$ . We use the implementations from scikit-learn (Pedregosa et al., 2011) for all classifiers.
|
| 232 |
+
|
| 233 |
+
Figure 7 shows the classification accuracies (columns 2-5) for all classifiers trained on $\mu_{2}$ . The first column shows the mutual information $I_{q}(y;z_{2})$ , which depends mainly on $R(z_{2})$ as expected from Eq. 10. As long as the classifier is expressive enough (e.g., RBF-SVM or kNN) and the data set is simple (SVHN; top row), higher mutual information ( $\approx$ higher $R(z_{2})$ ) corresponds to higher classification accuracies, consistent with Eq. 11. But for less expres
|
| 234 |
+
|
| 235 |
+
Table 1: Optimal classification accuracies (across all $(\beta_{2},\beta_{1})$ -settings) using either $\mu_{2}$ or $\pmb{\mu}_{1}$
|
| 236 |
+
|
| 237 |
+
<table><tr><td>Data Set</td><td>log. reg.</td><td>lin. SVM</td><td>RBF SVM</td><td>kNN</td></tr><tr><td>SVHN (μ2)</td><td>28.43 %</td><td>27.87 %</td><td>77.60 %</td><td>64.25 %</td></tr><tr><td>SVHN (μ1)</td><td>45.77 %</td><td>49.81 %</td><td>59.28 %</td><td>56.49 %</td></tr><tr><td>CIFAR-10 (μ2)</td><td>47.36 %</td><td>46.95 %</td><td>53.15 %</td><td>44.20 %</td></tr><tr><td>CIFAR-10 (μ1)</td><td>43.27 %</td><td>42.55 %</td><td>45.60 %</td><td>39.25 %</td></tr></table>
|
| 238 |
+
|
| 239 |
+
sive (e.g., linear) classifiers or more complex data (CIFAR-10; bottom row), increasing $R(z_{1} \mid z_{2})$ improves classification accuracy (see purple circles "○" in corresponding panels), consistent with the discussion below Eq. 11. We see a similar effect (Table 1) for most classifier/data set combinations when replacing $\mu_{2}$ by $\mu_{1}$ , which has more information about $x$ but is also higher dimensional.
|
| 240 |
+
|
| 241 |
+
# 6 CONCLUSIONS
|
| 242 |
+
|
| 243 |
+
We classified the various tasks that can be performed with Variational Autoencoders (VAEs) into three application domains and argued that each domain has different trade-offs, such that a good VAE for one domain is not necessarily good for another. This observation motivated us to propose a refinement of the rate/distortion theory of VAEs that allows trading off rates across individual layers of latents in hierarchical VAEs. We showed both theoretically and empirically that the proposal indeed provides practitioners better control for tuning VAEs for the three application domains. In the future, it would be interesting to explore adaptive schedules for the Lagrange parameters $\beta$ that would make it possible to target a specific given rate for each layer in a single training run, for example by using the method proposed by Rezende & Viola (2018).
|
| 244 |
+
|
| 245 |
+
# ACKNOWLEDGMENTS
|
| 246 |
+
|
| 247 |
+
The authors would like to thank Johannes Zenn, Zicong Fan, Zhen Liu for their helpful discussion. Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany's Excellence Strategy - EXC number 2064/1 - Project number 390727645. This work was supported by the German Federal Ministry of Education and Research (BMBF): Tübingen AI Center, FKZ: 01IS18039A. The authors thank the International Max Planck Research School for Intelligent Systems (IMPRS-IS) for supporting Tim Z. Xiao.
|
| 248 |
+
|
| 249 |
+
Reproducibility Statement. All code necessary to reproduce the results in this paper is available at https://github.com/timxzz/HIT/.
|
| 250 |
+
|
| 251 |
+
# REFERENCES
|
| 252 |
+
|
| 253 |
+
Eirikur Agustsson and Lucas Theis. Universally quantized neural compression. Advances in Neural Information Processing Systems, 33:12367-12376, 2020. 4
|
| 254 |
+
Alexander Alemi, Ben Poole, Ian Fischer, Joshua Dillon, Rif A Saurous, and Kevin Murphy. Fixing a broken elbo. In International Conference on Machine Learning, pp. 159-168. PMLR, 2018. 1, 2, 3, 4
|
| 255 |
+
Alexander A Alemi, Ian Fischer, Joshua V Dillon, and Kevin Murphy. Deep variational information bottleneck. In International Conference on Learning Representations, 2017. 1, 3, 4
|
| 256 |
+
Johannes Balle, Valero Laparra, and Eero P Simoncelli. End-to-end optimized image compression. In International Conference on Learning Representations, 2017. 1, 3, 7
|
| 257 |
+
Johannes Balle, David Minnen, Saurabh Singh, Sung Jin Hwang, and Nick Johnston. Variational image compression with a scale hyperprior. In International Conference on Learning Representations, 2018. 2
|
| 258 |
+
Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018. 8
|
| 259 |
+
Charles H Bennett, Peter W Shor, John A Smolin, and Ashish V Thapliyal. Entanglement-assisted capacity of a quantum channel and the reverse shannon theorem. IEEE transactions on Information Theory, 48(10):2637-2655, 2002. 4
|
| 260 |
+
Bernhard E Boser, Isabelle M Guyon, and Vladimir N Vapnik. A training algorithm for optimal margin classifiers. In Proceedings of the fifth annual workshop on Computational learning theory, pp. 144-152, 1992. 9
|
| 261 |
+
Rewon Child. Very deep vaes generalize autoregressive models and can outperform them on images. In International Conference on Learning Representations, 2021. 3
|
| 262 |
+
Zhiwei Deng, Rajitha Navarathna, Peter Carr, Stephan Mandt, Yisong Yue, Iain Matthews, and Greg Mori. Factorized variational autoencoders for modeling audience reactions to movies. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2577-2586, 2017. 2
|
| 263 |
+
Brendan J Frey and GE Hinton. Ecient stochastic source coding and an application to a bayesian network source model. Computer Journal, 1997. 4
|
| 264 |
+
Ishaan Gulrajani, Kundan Kumar, Faruk Ahmed, Adrien Ali Taiga, Francesco Visin, David Vazquez, and Aaron Courville. Pixelvae: A latent variable model for natural images. In International Conference on Learning Representations, 2017. 3
|
| 265 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. 8
|
| 266 |
+
|
| 267 |
+
Irina Higgins, Loic Matthew, Arka Pal, Christopher Burgess, Xavier Glorot, Matthew Botvinick, Shakir Mohamed, and Alexander Lerchner. beta-vae: Learning basic visual concepts with a constrained variational framework. In International Conference on Learning Representations, 2017. 1, 3, 4
|
| 268 |
+
Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4700-4708, 2017. 8
|
| 269 |
+
Zhuxi Jiang, Yin Zheng, Huachun Tan, Bangsheng Tang, and Hanning Zhou. Variational deep embedding: an unsupervised and generative approach to clustering. In International Joint Conference on Artificial Intelligence, 2017. 1
|
| 270 |
+
Diederik P Kingma and Max Welling. Auto-encoding variational bayes. In International Conference on Learning Representations, 2014. 1, 3, 4, 6
|
| 271 |
+
Alex Krizhevsky. Learning multiple layers of features from tiny images. 2009. 6
|
| 272 |
+
Eric Laloy, Romain Hérault, John Lee, Diederik Jacques, and Niklas Linde. Inversion using a new low-dimensional representation of complex binary geological media based on a deep neural network. Advances in water resources, 110:387-405, 2017. 1
|
| 273 |
+
Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner. Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11):2278-2324, 1998. 6
|
| 274 |
+
Lars Maaloe, Marco Fraccaro, Valentin Lievin, and Ole Winther. Biva: A very deep hierarchy of latent variables for generative modeling. Advances in Neural Information Processing Systems, 32, 2019. 3
|
| 275 |
+
David JC MacKay. Information theory, inference and learning algorithms. Cambridge university press, 2003. 5
|
| 276 |
+
Sascha Meyen. Relation between classification accuracy and mutual information in equally weighted classification tasks. Master's thesis, Universität Hamburg, 2016. 5, 6, 17
|
| 277 |
+
David Minnen, Johannes Balle, and George D Toderici. Joint autoregressive and hierarchical priors for learned image compression. Advances in Neural Information Processing Systems, 31, 2018. 3,6
|
| 278 |
+
Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, and Andrew Y. Ng. Reading digits in natural images with unsupervised feature learning. In NIPS Workshop on Deep Learning and Unsupervised Feature Learning, 2011. 6
|
| 279 |
+
F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011. 9
|
| 280 |
+
Ali Razavi, Aaron Van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. Advances in Neural Information Processing Systems, 32, 2019. 1
|
| 281 |
+
Danilo Jimenez Rezende and Fabio Viola. Taming vaes. arXiv preprint arXiv:1810.00597, 2018. 9
|
| 282 |
+
Danilo Jimenez Rezende, Shakir Mohamed, and Daan Wierstra. Stochastic backpropagation and approximate inference in deep generative models. In International conference on machine learning, pp. 1278-1286. PMLR, 2014. 1, 3
|
| 283 |
+
Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in Neural Information Processing Systems, 29, 2016. 8
|
| 284 |
+
Casper Kaae Sønderby, Tapani Raiko, Lars Maaløe, Søren Kaae Sønderby, and Ole Winther. Ladder variational autoencoders. Advances in Neural Information Processing Systems, 29, 2016. 3, 4
|
| 285 |
+
|
| 286 |
+
Hiroshi Takahashi, Tomoharu Iwata, Yuki Yamanaka, Masanori Yamada, and Satoshi Yagi. Student-t variational autoencoder for robust density estimation. In International Joint Conference on Artificial Intelligence, 2018. 1
|
| 287 |
+
Naftali Tishby and Noga Zaslavsky. Deep learning and the information bottleneck principle. In 2015 IEEE information theory workshop (itw), pp. 1-5. IEEE, 2015. 1, 3
|
| 288 |
+
Arash Vahdat and Jan Kautz. NVAE: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020. 3
|
| 289 |
+
Haowen Xu, Wenxiao Chen, Nengwen Zhao, Zeyan Li, Jiahao Bu, Zhihan Li, Ying Liu, Youjian Zhao, Dan Pei, Yang Feng, et al. Unsupervised anomaly detection via variational auto-encoder for seasonal kpis in web applications. In Proceedings of the 2018 world wide web conference, pp. 187–196, 2018. 1
|
| 290 |
+
Yibo Yang, Robert Bamler, and Stephan Mandt. Improving inference for neural image compression. Advances in Neural Information Processing Systems, 33:573-584, 2020. 3
|
| 291 |
+
Li Yingzhen and Stephan Mandt. Disentangled sequential autoencoder. In International Conference on Machine Learning, 2018. 2
|
| 292 |
+
|
| 293 |
+
# A EXPERIMENT SUPPLEMENTARIES
|
| 294 |
+
|
| 295 |
+
# A.1 IMPLEMENTATION DETAILS
|
| 296 |
+
|
| 297 |
+
Table 2: Model architecture details for generalized top-down HVAEs (GHVAEs) used in Section 5. Conv and ConvTransp denote the convolutional and transposed convolutional layer, which has the corresponding input: input channel, output channel, kernel size, stride, padding. FC represents fully connected layer.
|
| 298 |
+
|
| 299 |
+
<table><tr><td>Data set</td><td>q(z2|x)</td><td>q(z1|z2,x)</td><td>p(z1|z2)</td><td>p(x|z1)</td></tr><tr><td rowspan="5">SVHN/CIFAR-10</td><td>Share:
|
| 300 |
+
Conv(3, 32, 4, 2, 1),
|
| 301 |
+
Conv(32, 32, 4, 2, 1),
|
| 302 |
+
Conv(32, 32, 4, 2, 1)</td><td>For x:
|
| 303 |
+
Conv(3, 32, 4, 2, 1),
|
| 304 |
+
Conv(32, 32, 4, 2, 1)</td><td>Share:
|
| 305 |
+
FC(In=32, Out=256)</td><td rowspan="4">ConvTransp(32, 32, 4, 2, 1),
|
| 306 |
+
ConvTransp(32, 32, 4, 2, 1),
|
| 307 |
+
ConvTransp(32, 3, 4, 2, 1)</td></tr><tr><td>For mean:
|
| 308 |
+
FC(In=512, Out=32)</td><td>Share:
|
| 309 |
+
ConvTransp(64, 32, 4, 2, 1)</td><td>For mean:
|
| 310 |
+
FC(In=256, Out=512)</td></tr><tr><td>For variance:
|
| 311 |
+
FC(In=512, Out=32)</td><td>For mean:
|
| 312 |
+
Conv(32, 32, 3, 1, 1)</td><td>For variance:
|
| 313 |
+
FC(In=256, Out=512)</td></tr><tr><td></td><td>For variance:
|
| 314 |
+
Conv(32, 32, 3, 1, 1)</td><td></td></tr><tr><td>z1 dims: 512</td><td>z2 dims: 32</td><td>σx=0.71</td><td>Total params: 475811</td></tr><tr><td rowspan="5">MNIST (Binary)</td><td>Share:
|
| 315 |
+
Conv(1, 16, 4, 2, 1),
|
| 316 |
+
Conv(16, 16, 4, 2, 1),
|
| 317 |
+
Conv(16, 16, 4, 1, 0)</td><td>For x:
|
| 318 |
+
Conv(1, 16, 4, 2, 1),
|
| 319 |
+
Conv(16, 16, 4, 1, 0)</td><td>Share:
|
| 320 |
+
FC(In=20, Out=128)</td><td rowspan="4">ConvTransp(16, 16, 4, 1, 0),
|
| 321 |
+
ConvTransp(16, 16, 4, 2, 1),
|
| 322 |
+
ConvTransp(16, 1, 4, 2, 1)</td></tr><tr><td>For mean:
|
| 323 |
+
FC(In=256, Out=20)</td><td>For mean:
|
| 324 |
+
ConvTransp(32, 16, 4, 1, 0)</td><td>For mean:
|
| 325 |
+
FC(In=128, Out=256)</td></tr><tr><td>For variance:
|
| 326 |
+
FC(In=256, Out=20)</td><td>For variance:
|
| 327 |
+
Conv(16, 16, 3, 1, 1)</td><td>For variance:
|
| 328 |
+
FC(In=128, Out=256)</td></tr><tr><td></td><td>For variance:
|
| 329 |
+
Conv(16, 16, 3, 1, 1)</td><td></td></tr><tr><td>z1 dims: 256</td><td>z2 dims: 20</td><td>σx: N/A</td><td>Total params: 122713</td></tr></table>
|
| 330 |
+
|
| 331 |
+
Table 3: Model architecture details for LVAEs used in Section 5. Conv and ConvTransp denote the convolutional and transposed convolutional layer, which has the corresponding input: input channel, output channel, kernel size, stride, padding. FC represents fully connected layer.
|
| 332 |
+
|
| 333 |
+
<table><tr><td>Data set</td><td>q(z2|x)</td><td>q(z1|z2,x)</td><td>p(z1|z2)</td><td>p(x|z1)</td></tr><tr><td rowspan="5">SVHN/CIFAR-10</td><td rowspan="2">Share:
|
| 334 |
+
Conv(3, 32, 4, 2, 1),
|
| 335 |
+
Conv(32, 32, 4, 2, 1),
|
| 336 |
+
Conv(32, 32, 4, 2, 1)</td><td>Involve d:
|
| 337 |
+
Conv(32, 32, 4, 2, 1)</td><td>Share:
|
| 338 |
+
FC(In=32, Out=256)</td><td rowspan="3">ConvTransp(32, 32, 4, 2, 1),
|
| 339 |
+
ConvTransp(32, 32, 4, 2, 1),
|
| 340 |
+
ConvTransp(32, 3, 4, 2, 1)</td></tr><tr><td>For mean:
|
| 341 |
+
Conv(32, 32, 3, 1, 1)</td><td>For mean:
|
| 342 |
+
FC(In=256, Out=512)</td></tr><tr><td>For mean:
|
| 343 |
+
FC(In=512, Out=32)</td><td>For variance:
|
| 344 |
+
Conv(32, 32, 3, 1, 1)</td><td>For variance:
|
| 345 |
+
FC(In=256, Out=512)</td></tr><tr><td>For variance:
|
| 346 |
+
FC(In=512, Out=32)</td><td></td><td></td><td></td></tr><tr><td>z1 dims: 512</td><td>z2 dims: 32</td><td>σx=0.71</td><td>Total params: 408131</td></tr></table>
|
| 347 |
+
|
| 348 |
+
# A.2 ADDITIONAL RESULTS
|
| 349 |
+
|
| 350 |
+
Here we attached the results for MNIST, as well as the full results for LVAE on SVHN and generalized top-down HVAEs on CIFAR-10.
|
| 351 |
+
|
| 352 |
+
# A.2.1 RESULTS FOR GENERALIZED TOP-DOWN HVAES ON MNIST
|
| 353 |
+
|
| 354 |
+
We also evaluate our proposed framework using generalized top-down HVAEs trained on binary MNIST data (i.e., black and white images rather than grayscale).
|
| 355 |
+
|
| 356 |
+
We note that the inception score (IS) behaves different in our MNIST models compared to SVHN (see Figure 5) in that optimal IS in MNIST occurs for high $R(z_{1}|z_{2})$ rather than high $R(z_{2})$ . This indicates that semantically low-level properties (hand-writing style) of MNIST might have more variation than high level properties (the digit), whereas SVHN images show variation in additional high-level properties such as the background color.
|
| 357 |
+
|
| 358 |
+

|
| 359 |
+
|
| 360 |
+

|
| 361 |
+
|
| 362 |
+

|
| 363 |
+
|
| 364 |
+

|
| 365 |
+
Figure 8: Trade-offs between rates and all metrics we used in Section 5 from the generalized top-down HVAEs trained with MNIST. The results from the standard VAE (i.e. $\beta_{2} = \beta_{1} = 1$ ) and the $\beta$ -VAE (i.e. $\beta_{2} = \beta_{1}$ ) are marked with “o” and “o”. The markers “o” highlight the optimal models selected using convex hull (see Figure 6 for details). The diagonal grid lines are references for equivalent total rates, i.e. points on the same line have the same total rates.
|
| 366 |
+
|
| 367 |
+

|
| 368 |
+
|
| 369 |
+

|
| 370 |
+
|
| 371 |
+

|
| 372 |
+
|
| 373 |
+
# A.2.2 RESULTS FOR LVAE ON SVHN
|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
Figure 9: Trade-offs between rates and all metrics we used in Section 5 from LVAE trained with SVHN. The results from the standard VAE (i.e. $\beta_{2} = \beta_{1} = 1$ ) and the $\beta$ -VAE (i.e. $\beta_{2} = \beta_{1}$ ) are marked with “o” and “o”. The markers “o” highlight the optimal models selected using convex hull (see Figure 6 for details). The diagonal grid lines are references for equivalent total rates, i.e. points on the same line have the same total rates.
|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
|
| 390 |
+
# A.2.3 RESULTS FOR GENERALIZED TOP-DOWN HVAES ON CIFAR-10
|
| 391 |
+
|
| 392 |
+

|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
Figure 10: Trade-offs between rates and all metrics we used in Section 5 from the generalized top-down HVAEs trained with CIFAR-10. The results from the standard VAE (i.e. $\beta_{2} = \beta_{1} = 1$ ) and the $\beta$ -VAE (i.e. $\beta_{2} = \beta_{1}$ ) are marked with “o” and “o”. The markers “o” highlight the optimal models selected using convex hull (see Figure 6 for details). The diagonal grid lines are references for equivalent total rates, i.e. points on the same line have the same total rates.
|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
|
| 403 |
+

|
| 404 |
+
|
| 405 |
+

|
| 406 |
+
|
| 407 |
+
# B PROOF OF THE BOUND ON CLASSIFICATION ACCURACY
|
| 408 |
+
|
| 409 |
+
This section provides a proof of Eq. 11 by reformulating the proof of Proposition 5 in the thesis by Meyen (2016) into the notation used in the present paper. We stress that this section contains no original contribution and is provided only as a convenience to the reader, motivated by reviewer feedback. All credits for this section belong to Meyen (2016).
|
| 410 |
+
|
| 411 |
+
We consider an (unknown) true data generative distribution $p_{\mathrm{data}}(y, \boldsymbol{x})$ for data $\boldsymbol{x}$ with (unobserved) true labels $y$ , and a hierarchical VAE with an inference model $q_{\phi}(\{\boldsymbol{z}_{\ell}\} \mid \boldsymbol{x})$ of the form of Eq. 4. Focusing on a single layer $\ell$ of latents, we denote the joint probability over $y$ , $\boldsymbol{x}$ , and $\boldsymbol{z}_{\ell}$ as
|
| 412 |
+
|
| 413 |
+
$$
|
| 414 |
+
q (y, \boldsymbol {x}, \boldsymbol {z} _ {\ell}) := p _ {\text {d a t a}} (y, \boldsymbol {x}) q _ {\phi} (\boldsymbol {z} _ {\ell} | \boldsymbol {x}) \tag {13}
|
| 415 |
+
$$
|
| 416 |
+
|
| 417 |
+
where the marginal $q_{\phi}(\pmb{z}_{\ell}|\pmb{x})$ of $q_{\phi}(\{\pmb{z}_{\ell}\}|\pmb{x})$ is defined as usual. We further consider a classifier $p_{\mathrm{cls.}}(y|\pmb{z}_{\ell})$ that operates on $\pmb{z}_{\ell}$ . Denoting its top prediction as $\hat{y} \coloneqq \arg \max_y p_{\mathrm{cls.}}(y|\pmb{z}_{\ell})$ , the classification accuracy is $\alpha \coloneqq \mathbb{E}_q[\delta_{y,\hat{y}}]$ , where $\delta$ is the Kronecker delta.
|
| 418 |
+
|
| 419 |
+
Theorem 1. The mutual information $I_{q}(y;z_{\ell})$ between the latent representation $z_{\ell}$ and the true label $y$ under the distribution $q$ defined in Eq. 13 is lower bounded as follows,
|
| 420 |
+
|
| 421 |
+
$$
|
| 422 |
+
I _ {q} (y; \boldsymbol {z} _ {\ell}) \geq f (\alpha) \quad \text {w i t h} \quad f (\alpha) = H _ {p _ {\text {d a t a}}} [ y ] - H _ {2} (\alpha) - (1 - \alpha) \log (M - 1) \tag {14}
|
| 423 |
+
$$
|
| 424 |
+
|
| 425 |
+
where $H_{2}(\alpha) = -\alpha \log \alpha - (1 - \alpha)\log(1 - \alpha)$ is the entropy of a Bernoulli distribution, $H_{p_{data}}[y] \leq \log M$ is the marginal entropy of the true labels, and $M$ denotes the number of classes.
|
| 426 |
+
|
| 427 |
+
Before we prove Theorem 1, we note that the function $f$ is strictly monotonically increasing on the relevant interval $[\max_y p_{\mathrm{data}}(y), 1]$ . Thus, $f$ is invertible and we obtain the following corollary:
|
| 428 |
+
|
| 429 |
+
Corollary 1. The classification accuracy $\alpha$ is upper bounded as in Eq. 11 of the main text, i.e.,
|
| 430 |
+
|
| 431 |
+
$$
|
| 432 |
+
\alpha \leq f ^ {- 1} \left(I _ {q} (y; \boldsymbol {z} _ {\ell})\right) \leq f ^ {- 1} \left(\mathbb {E} _ {p _ {\text {d a t a}} (\boldsymbol {x})} [ R (\boldsymbol {z} _ {\geq \ell}) ]\right). \tag {15}
|
| 433 |
+
$$
|
| 434 |
+
|
| 435 |
+
The second inequality in Eq. 15 results from the bound $I_{q}(y;z_{\ell}) \leq \mathbb{E}_{p_{\mathrm{data}}(\boldsymbol{x})}[R(\boldsymbol{z}_{\geq \ell})]$ derived in Eq. 10, using the fact that $f^{-1}$ is monotonically increasing (since $f$ is).
|
| 436 |
+
|
| 437 |
+
Proof of Theorem 1. We split the mutual information into two contributions,
|
| 438 |
+
|
| 439 |
+
$$
|
| 440 |
+
I _ {q} (y; \boldsymbol {z} _ {\ell}) = H _ {p _ {\mathrm {d a t a}}} [ y ] - H _ {q} [ y | \boldsymbol {z} _ {\ell} ] = H _ {p _ {\mathrm {d a t a}}} [ y ] - \mathbb {E} _ {\boldsymbol {z} _ {\ell} \sim q (\boldsymbol {z} _ {\ell})} \left[ \mathbb {E} _ {y \sim q (y | \boldsymbol {z} _ {\ell})} [ - \log q (y | \boldsymbol {z} _ {\ell}) ] \right] \tag {16}
|
| 441 |
+
$$
|
| 442 |
+
|
| 443 |
+
where, as clarified in the second equality, $H_{q}[y|\pmb{z}_{\ell}]$ is the expectation over $\pmb{z}_{\ell}$ of the conditional entropy of $y$ given $\pmb{z}_{\ell}$ , and $q(\pmb{z}_{\ell})$ and $q(y|\pmb{z}_{\ell})$ are marginals and conditionals of $q$ (Eq. 13) as usual.
|
| 444 |
+
|
| 445 |
+
Since $H_{p_{\mathrm{data}}}[\boldsymbol {y}]$ is fixed by the problem at hand, finding a lower bound on $I_q(y;z_\ell)$ for a given classification accuracy $\alpha$ is equivalent to finding an upper bound on the second term on the right-hand side of Eq. 16, $H_{q}[y|\boldsymbol{z}_{\ell}] = \mathbb{E}_{\boldsymbol{z}_{\ell}\sim q(\boldsymbol{z}_{\ell})}[\mathbb{E}_{y\sim q(y|\boldsymbol{z}_{\ell})}[-\log q(y|\boldsymbol{z}_{\ell})]]$ , with the constraint $\mathbb{E}_q[\delta_{y,\hat{g}}] = \alpha$ . We do this by upper bounding the conditional entropy $\mathbb{E}_{y\sim q(y|\boldsymbol{z}_{\ell})}[-\log q(y|\boldsymbol{z}_{\ell})]$ of $y$ given $\boldsymbol{z}_{\ell}$ for all $\boldsymbol{z}_{\ell}$ independently, and then taking the expectation over $\boldsymbol{z}_{\ell}\sim q(\boldsymbol{z}_{\ell})$ .
|
| 446 |
+
|
| 447 |
+
For a fixed latent representation $\mathbf{z}_{\ell}$ , we first split off the contribution to $\mathbb{E}_{y \sim q(y|\mathbf{z}_{\ell})}[-\log q(y|\mathbf{z}_{\ell})]$ from $y = \hat{y}$ , where $\hat{y} = \arg \max_{y} p_{\mathrm{cls.}}(y|\mathbf{z}_{\ell})$ is the label that our classifier would predict for $\mathbf{z}_{\ell}$ ,
|
| 448 |
+
|
| 449 |
+
$$
|
| 450 |
+
\mathbb {E} _ {y \sim q (y | \boldsymbol {z} _ {\ell})} [ - \log q (y | \boldsymbol {z} _ {\ell}) ] = - q (y = \hat {y} | \boldsymbol {z} _ {\ell}) \log q (y = \hat {y} | \boldsymbol {z} _ {\ell}) - \sum_ {y \neq \hat {y}} q (y | \boldsymbol {z} _ {\ell}) \log q (y | \boldsymbol {z} _ {\ell}). \tag {17}
|
| 451 |
+
$$
|
| 452 |
+
|
| 453 |
+
Here, the second term on the right-hand side resembles the entropy of a distribution over the remaining $(M - 1)$ labels $(y\neq \hat{y})$ , except that the probabilities sum to $(1 - q(y = \hat{y} |z_{\ell}))$ rather than one. Thus, regardless of the value of $q(y = \hat{y} |z_{\ell})$ , this term is maximized if $q(y|z_{\ell})$ distributes the remaining probability mass $(1 - q(y = \hat{y} |z_{\ell}))$ uniformly over the remaining $(M - 1)$ labels, i.e.,
|
| 454 |
+
|
| 455 |
+
$$
|
| 456 |
+
\begin{array}{l} \mathbb {E} _ {y \sim q (y | \boldsymbol {z} _ {\ell})} [ - \log q (y | \boldsymbol {z} _ {\ell}) ] \leq - q (y = \hat {y} | \boldsymbol {z} _ {\ell}) \log q (y = \hat {y} | \boldsymbol {z} _ {\ell}) - (1 - q (y = \hat {y} | \boldsymbol {z} _ {\ell})) \log \frac {1 - q (y = \hat {y} | \boldsymbol {z} _ {\ell})}{M - 1} \\ = H _ {2} \left(q \left(y = \hat {y} \mid \boldsymbol {z} _ {\ell}\right)\right) + \left(1 - q \left(y = \hat {y} \mid \boldsymbol {z} _ {\ell}\right)\right) \log (M - 1). \tag {18} \\ \end{array}
|
| 457 |
+
$$
|
| 458 |
+
|
| 459 |
+
Plugging Eq. 18 back into Eq. 16, we obtain the bound
|
| 460 |
+
|
| 461 |
+
$$
|
| 462 |
+
I _ {q} (y; \boldsymbol {z} _ {\ell}) \geq H _ {p _ {\text {d a t a}}} [ y ] - \mathbb {E} _ {\boldsymbol {z} _ {\ell} \sim q (\boldsymbol {z} _ {\ell})} \left[ H _ {2} \left(q \left(y = \hat {y} \mid \boldsymbol {z} _ {\ell}\right)\right) \right] - \mathbb {E} _ {\boldsymbol {z} _ {\ell} \sim q (\boldsymbol {z} _ {\ell})} \left[ 1 - q \left(y = \hat {y} \mid \boldsymbol {z} _ {\ell}\right) \right] \log (M - 1). \tag {19}
|
| 463 |
+
$$
|
| 464 |
+
|
| 465 |
+
We arrive at the proposition (Eq. 14) by pulling the concave function $H_{2}$ out of the expectation using Jensen's inequality, and by then identifying $\mathbb{E}_{\boldsymbol{z}_{\ell}\sim q(\boldsymbol{z}_{\ell})}[q(y = \hat{y} |\boldsymbol{z}_{\ell})] = q(y = \hat{y}) = \alpha$ .
|
2023/Trading Information between Latents in Hierarchical Variational Autoencoders/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b07765dc0f7e856fec59171d95ee505df0182cc4782b3aff5165d3b462fd45dc
|
| 3 |
+
size 1277193
|
2023/Trading Information between Latents in Hierarchical Variational Autoencoders/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trainability Preserving Neural Pruning/e056486d-504c-467f-8ebf-fb592f5e5aa1_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trainability Preserving Neural Pruning/e056486d-504c-467f-8ebf-fb592f5e5aa1_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trainability Preserving Neural Pruning/e056486d-504c-467f-8ebf-fb592f5e5aa1_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1269c038fc8977300c1c5986ff4d2d621a9f2de049f74284888d3be0c3820221
|
| 3 |
+
size 1097904
|
2023/Trainability Preserving Neural Pruning/full.md
ADDED
|
@@ -0,0 +1,501 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TRAINABILITY PRESERVING NEURAL PRUNING
|
| 2 |
+
|
| 3 |
+
Huan Wang<sup>1</sup> Yun Fu<sup>1,2</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ Northeastern University, Boston, USA $^{2}$ AInovation Labs, Inc.
|
| 6 |
+
|
| 7 |
+
wang.huan@northeastern.edu yunfu@ece.neu.edu
|
| 8 |
+
|
| 9 |
+
# ABSTRACT
|
| 10 |
+
|
| 11 |
+
Many recent works have shown trainability plays a central role in neural network pruning – unattended broken trainability can lead to severe under-performance and unintentionally amplify the effect of retraining learning rate, resulting in biased (or even misinterpreted) benchmark results. This paper introduces trainability preserving pruning (TPP), a scalable method to preserve network trainability against pruning, aiming for improved pruning performance and being more robust to retraining hyper-parameters (e.g., learning rate). Specifically, we propose to penalize the gram matrix of convolutional filters to decorrelate the pruned filters from the retained filters. In addition to the convolutional layers, per the spirit of preserving the trainability of the whole network, we also propose to regularize the batch normalization parameters (scale and bias). Empirical studies on linear MLP networks show that TPP can perform on par with the oracle trainability recovery scheme. On nonlinear ConvNets (ResNet56/VGG19) on CIFAR10/100, TPP outperforms the other counterpart approaches by an obvious margin. Moreover, results on ImageNet-1K with ResNets suggest that TPP consistently performs more favorably against other top-performing structured pruning approaches. Code: https://github.com/MingSun-Tse/TPP.
|
| 12 |
+
|
| 13 |
+
# 1 INTRODUCTION
|
| 14 |
+
|
| 15 |
+
Neural pruning aims to remove redundant parameters without seriously compromising the performance. It normally consists of three steps (Reed, 1993; Han et al., 2015; 2016b; Li et al., 2017; Liu et al., 2019b; Wang et al., 2021b; Gale et al., 2019; Hoefler et al., 2021; Wang et al., 2023): pretrain a dense model; prune the unnecessary connections to obtain a sparse model; retrain the sparse model to regain performance. Pruning is usually categorized into two classes, unstructured pruning (a.k.a. element-wise pruning or fine-grained pruning) and structured pruning (a.k.a. filter pruning or coarse-grained pruning). Unstructured pruning chooses a single weight as the basic pruning element; while structured pruning chooses a group of weights (e.g., 3d filter or a 2d channel) as the basic pruning element. Structured pruning fits more for acceleration because of the regular sparsity. Unstructured pruning, in contrast, results in irregular sparsity, hard to exploit for acceleration unless customized hardware and libraries are available (Han et al., 2016a; 2017; Wen et al., 2016).
|
| 16 |
+
|
| 17 |
+
Recent papers (Renda et al., 2020; Le & Hua, 2021) report an interesting phenomenon: During retraining, a larger learning rate (LR) helps achieve a significantly better final performance, empowering the two baseline methods, random pruning and magnitude pruning, to match or beat many more complex pruning algorithms. The reason behind is argued (Wang et al., 2021a; 2023) to be related to the trainability of neural networks (Saxe et al., 2014; Lee et al., 2020; Lubana & Dick, 2021). They make two major observations to explain the LR effect mystery (Wang et al., 2023). (1) The weight removal operation immediately breaks the network trainability or dynamical isometry (Saxe et al., 2014) (the ideal case of trainability) of the trained network. (2) The broken trainability slows down the optimization in retraining, where a greater LR aids the model converge faster, thus a better performance is observed earlier – using a smaller LR can actually do as well, but needs more epochs.
|
| 18 |
+
|
| 19 |
+
Although these works (Lee et al., 2020; Lubana & Dick, 2021; Wang et al., 2021a; 2023) provide a plausibly sound explanation, a more practical issue is how to recover the broken trainability or maintain it during pruning. In this regard, Wang et al. (2021a) proposes to apply weight orthogonalization based on QR decomposition (Trefethen & Bau III, 1997; Mezzadri, 2006) to the pruned
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
Figure 1: Illustration of the proposed TPP algorithm on a typical residual block. Weight parameters are classified into two groups as a typical pruning algorithm does: important (white color) and unimportant (orange or blue color), right from the beginning (before any training starts) based on the filter $L_{1}$ -norms. Then only the unimportant parameters are enforced with the proposed TPP regularization terms, which is the key to maintain trainability when the unimportant weights are eventually eliminated from the network. Notably, the critical part of a regularization-based pruning algorithm lies in its specific regularization term, i.e., Eqs. (3) and (5), which we will show perform more favorably than other alternatives (see Tabs. 1 and 10).
|
| 23 |
+
|
| 24 |
+
model. However, their method is shown to only work for linear MLP networks. On modern deep convolutional neural networks (CNNs), how to maintain trainability during pruning is still elusive.
|
| 25 |
+
|
| 26 |
+
We introduce trainability preserving pruning (TPP), a new and novel filter pruning algorithm (see Fig. 1) that maintains trainability via a regularized training process. By our observation, the primary cause that pruning breaks trainability lies in the dependency among parameters. The primary idea of our approach is thus to decorrelate the pruned weights from the kept weights so as to "cut off" the dependency, so that the subsequent sparsifying operation barely hurts the network trainability.
|
| 27 |
+
|
| 28 |
+
Specifically, we propose to regularize the gram matrix of weights: All the entries representing the correlation between the pruned filters (i.e., unimportant filters) and the kept filters (i.e., important filters) are encouraged to diminish to zero. This is the first technical contribution of our method. The second one lies in how to treat the other entries. Conventional dynamical isometry wisdom suggests orthogonality, namely, 1 self-correlation and 0 cross-correlation, even among the kept filters, while we find directly translating the orthogonality idea here is unnecessary or even harmful because the too strong penalty will constrain the optimization, leading to deteriorated local minimum. Rather, we propose not to impose any regularization on the correlation entries of kept filters.
|
| 29 |
+
|
| 30 |
+
Finally, modern deep models are typically equipped with batch normalization (BN) (Ioffe & Szegedy, 2015). However, previous filter pruning papers rarely explicitly take BN into account (except two (Liu et al., 2017; Ye et al., 2018); the differences of our work from theirs will be discussed in Sec. 3.2) to mitigate the side effect when it is removed because its associated filter is removed. Since they are also a part of the whole trainable parameters in the network, unattended removal of them will also lead to severely crippled trainability (especially at large sparsity). Therefore, BN parameters (both the scale and bias included) ought to be explicitly taken into account too, when we develop the pruning algorithm. Based on this idea, we propose to regularize the two learnable parameters of BN to minimize the influence of its absence later.
|
| 31 |
+
|
| 32 |
+
Practically, our TPP is easy to implement and robust to hyper-parameter variations. On ResNet50 ImageNet, TPP delivers encouraging results compared to many recent SOTA filter pruning methods.
|
| 33 |
+
|
| 34 |
+
Contributions. (1) We present the first filter pruning method (trainability preserving pruning) that effectively maintains trainability during pruning for modern deep networks, via a customized weight gram matrix as regularization target. (2) Apart from weight regularization, a BN regularizer is introduced to allow for their subsequent absence in pruning – this issue has been overlooked by most previous pruning papers, although it is shown to be pretty important to preserve trainability, especially in the large sparsity regime. (3) Practically, the proposed method can easily scale to
|
| 35 |
+
|
| 36 |
+
modern deep networks (such as ResNets) and datasets (such as ImageNet-1K (Deng et al., 2009)). It achieves promising pruning performance in the comparison to many SOTA filter pruning methods.
|
| 37 |
+
|
| 38 |
+
# 2 RELATED WORK
|
| 39 |
+
|
| 40 |
+
Network pruning. Pruning mainly falls into structured pruning (Li et al., 2017; Wen et al., 2016; He et al., 2017; 2018; Wang et al., 2021b) and unstructured pruning (Han et al., 2015; 2016b; LeCun et al., 1990; Hassibi & Stork, 1993; Singh & Alistarh, 2020), according to the sparsity structure. For more comprehensive coverage, we recommend surveys (Sze et al., 2017; Cheng et al., 2018; Deng et al., 2020; Hoefler et al., 2021; Wang et al., 2022). This paper targets structured pruning (filter pruning, to be specific) because it is more imperative to make modern networks (e.g., ResNets (He et al., 2016)) faster rather than smaller compared to the early single-branch convolutional networks.
|
| 41 |
+
|
| 42 |
+
It is noted that random pruning of a normally-sized (i.e., not severely over-parameterized) network usually leads to significant performance drop. We need to cleverly choose some unimportant parameters to remove. Such a criterion for choosing is called pruning criterion. In the area, there have been two major paradigms to address the pruning criterion problem dating back to the 1990s: regularization-based methods and importance-based (a.k.a. saliency-based) methods (Reed, 1993).
|
| 43 |
+
|
| 44 |
+
Specifically, the regularization-based approaches choose unimportant parameters via a sparsity-inducing penalty term (e.g., Wen et al. (2016); Yang et al. (2020); Lebedev & Lempitsky (2016); Louizos et al. (2018); Liu et al. (2017); Ye et al. (2018); Zhang et al. (2021a; 2022; 2021b)). This paradigm can be applied to a random or pretrained network. Importance-based methods choose unimportant parameters via an importance formula, derived from the Taylor expansion of the loss function (e.g., LeCun et al. (1990); Hassibi & Stork (1993); Han et al. (2015; 2016b); Li et al. (2017); Molchanov et al. (2017; 2019)). This paradigm is majorly applied to a pretrained network. Despite the differences, it is worth noting that these two paradigms are not firmly unbridgeable. We can develop approaches that take advantage of both ideas, such as Ding et al. (2018); Wang et al. (2019; 2021b) – these methods identify unimportant weights per a certain importance criterion; then, they utilize a penalty term to produce sparsity. Our TPP method in this paper is also in this line.
|
| 45 |
+
|
| 46 |
+
Trainability, dynamical isometry, and orthogonality. Trainability describes the easiness of optimization of a neural network. Dynamical isometry, the perfect case of trainability, is first introduced by Saxe et al. (2014), stating that singular values of the Jacobian matrix are close to 1. It can be achieved (for linear MLP models) by the orthogonality of weight matrix at initialization. Recent works on this topic mainly focus on how to maintain dynamical isometry during training instead of only for initialization (Xie et al., 2017; Huang et al., 2018; Bansal et al., 2018; Huang et al., 2020; Wang et al., 2020). These methods are developed independent of pruning, thus not directly related to our proposed approach. However, the insights from these works inspire us to our method (see Sec. 3.2) and possibly more in the future. Several pruning papers study the network trainability issue in the context of network pruning, such as Lee et al. (2020); Lubana & Dick (2021); Vysogorets & Kempe (2021). These works mainly discuss the trainability issue of a randomly initialized network. In contrast, we focus on the pruning case of a pretrained network.
|
| 47 |
+
|
| 48 |
+
# 3 METHODOLOGY
|
| 49 |
+
|
| 50 |
+
# 3.1 PREREQUISITES: DYNAMICAL ISOMETRY AND ORTHOGONALITY
|
| 51 |
+
|
| 52 |
+
The definition of dynamical isometry is that the Jacobian of a network has as many singular values (JSVs) as possible close to 1 (Saxe et al., 2014). With it, the error signal can preserve its norm during propagation without serious amplification or attenuation, which in turn helps the convergence of (very deep) networks. For a single fully-connected layer $W$ , a sufficient and necessary condition to realize dynamical isometry is orthogonality, i.e., $W^{\top}W = I$ ,
|
| 53 |
+
|
| 54 |
+
$$
|
| 55 |
+
\mathbf {y} = W \mathbf {\bar {x}},
|
| 56 |
+
$$
|
| 57 |
+
|
| 58 |
+
$$
|
| 59 |
+
\left| \left| \mathbf {y} \right| \right| = \sqrt {\mathbf {y} ^ {\top} \mathbf {y}} = \sqrt {\mathbf {x} ^ {\top} W ^ {\top} W \mathbf {x}} = \left| \left| \mathbf {x} \right| \right|, i f f. W ^ {\top} W = I, \tag {1}
|
| 60 |
+
$$
|
| 61 |
+
|
| 62 |
+
where $I$ represents the identity matrix. Orthogonality of a weight matrix can be easily realized by matrix orthogonalization techniques such as QR decomposition (Trefethen & Bau III, 1997; Mezzadri, 2006). Exact (namely all the Jacobian singular values are exactly 1) dynamical isometry
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
(a) Kernel orthogonality
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
(b) Kernel orthogonality for pruning
|
| 69 |
+
|
| 70 |
+

|
| 71 |
+
(c) De-correlate pruned from kept
|
| 72 |
+
Figure 2: Regularization target comparison between the proposed scheme (c) and similar counterparts (a) and (b). Green part stands for zero entries. Index 1 to N denotes the filter indices. In (b, c), filter 2 and N are the unimportant filters to be removed. (a) Regularization target of pure kernel orthogonality (an identity matrix), no pruning considered. (b) Regularization target of directly applying the weight orthogonality to filter pruning. (c) Regularization target of the proposed weight de-correlation solution in TPP: only regularize the filters to be removed, leave the others unconstrained. This scheme maintains trainability while imposing the least constraint on the weights.
|
| 73 |
+
|
| 74 |
+
can be achieved for linear networks since multiple linear layers essentially reduce to a single 2d weight matrix. In contrast, the convolutional and non-linear cases are much complicated. Previous work (Wang et al., 2023) has shown that merely considering convolution or ReLU (Nair & Hinton, 2010) renders the weight orthogonalization method much less effective in terms of recovering dynamical isometry after pruning, let alone considering modern deep networks with BN (Ioffe & Szegedy, 2015) and residuals (He et al., 2016). The primary goal of our paper is to bridge this gap.
|
| 75 |
+
|
| 76 |
+
Following the seminal work of Saxe et al. (2014), several papers propose to maintain orthogonality during training instead of sorely for the initialization. There are primarily two groups of orthogonalization methods for CNNs: kernel orthogonality (Xie et al., 2017; Huang et al., 2018; 2020) and orthogonal convolution (Wang et al., 2020),
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
K K ^ {\top} = I \Rightarrow \mathcal {L} _ {\text {o r t h}} = K K ^ {\top} - I, \triangleleft \text {k e r n e l o r t h o g o n a l i t y} \tag {2}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\mathcal {K K} ^ {\top} = I \Rightarrow \mathcal {L} _ {\text {o r t h}} = \mathcal {K K} ^ {\top} - I. \triangleleft \text {o r t h o g o n a l c o n v o l u t i o n}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
Clearly the difference lies in the weight matrix $K$ vs. $\mathcal{K}$ : (1) $K$ denotes the original weight matrix in a convolutional layer. Weights of a CONV layer make up a 4d tensor $\mathbb{R}^{N\times C\times H\times W}$ ( $N$ stands for the output channel number, $C$ for the input channel number, $H$ and $W$ for the height and width of the CONV kernel). Then, $K$ is a reshaped version of the 4d tensor: $K\in \mathbb{R}^{N\times CHW}$ (if $N < CHW$ ; otherwise, $K\in \mathbb{R}^{CHW\times N}$ ). (2) In contrast, $\mathcal{K}\in \mathbb{R}^{NH_{fo}W_{fo}\times CH_{fi}W_{fi}}$ stands for the doubly block-Toeplitz representation of $K$ ( $H_{fo}$ stands for the output feature map height, $H_{fi}$ for the input feature map height. $W_{fo}$ and $W_{fi}$ can be inferred the same way for width).
|
| 87 |
+
|
| 88 |
+
Wang et al. (2020) have shown that orthogonal convolution is more effective than kernel orthogonality (Xie et al., 2017) in that the latter is only a necessary but insufficient condition of the former. In this work, we will evaluate both methods to see how effective they are in recovering trainability.
|
| 89 |
+
|
| 90 |
+
# 3.2 TRAINABILITY PRESERVING PRUNING (TPP)
|
| 91 |
+
|
| 92 |
+
Our TPP method has two parts. First, we explain how we come up with the proposed scheme and how it intuitively is better than the straight idea of directly applying orthogonality regularization methods (Xie et al., 2017; Wang et al., 2020) here. Second, a batch normalization regularizer is introduced given the prevailing use of BN as a standard component in deep CNNs nowadays.
|
| 93 |
+
|
| 94 |
+
(1) Trainability vs. orthogonality. From previous works (Lee et al., 2020; Lubana & Dick, 2021; Wang et al., 2021a; 2023), we know recovering the broken trainability (or dynamical isometry) impaired by pruning is very important. Considering orthogonality regularization can encourage isometry, a pretty straightforward solution is to build upon the existing weight orthogonality regularization schemes. Specifically, kernel orthogonality regularizes the weight gram matrix towards an identity matrix (see Fig. 2(a)). In our case, we aim to remove some filters, so naturally we can regularize the weight gram matrix to be close to a partial identity matrix, with the diagonal entries at the pruned filters zeroed (see Fig. 2(b); note the diagonal green zeros).
|
| 95 |
+
|
| 96 |
+
The above scheme is simple and straightforward. However, it is not in the best shape by our empirical observation. It imposes too strong unnecessary constraint on the remaining weights, which will in turn hurt the optimization. Therefore, we propose to seek a weaker constraint, not demanding the perfect trainability (i.e., exact isometry realized by orthogonality), but only a benign status, which describes a state of the neural network where gradients can flow effectively through the model without being interrupted. Orthogonality requires the Jacobian singular values to be exactly 1; in contrast, a benign trainability only requires them not to be extremely large or small so that the network can be trained normally. To this end, we propose to decorrelate the kept filters from the pruned ones: in the target gram matrix, all the entries associated with the pruned filters are zero; all the other entries stay as they are (see Fig. 2(c)). This scheme will be empirically justified (Tab. 3).
|
| 97 |
+
|
| 98 |
+
Specifically, all the filters in a layer are sorted based on their $L_{1}$ -norms. Then, we consider those with the smallest $L_{1}$ norms as unimportant filters (the $S_{l}$ below) (so the proposed method also falls into the magnitude-based pruning method group). Then, the proposed regularization term is,
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\mathcal {L} _ {1} = \sum_ {l = 1} ^ {L} \left| \left| W _ {l} W _ {l} ^ {\top} \odot \left(\mathbf {1} - \mathbf {m m} ^ {\top}\right) \right| \right| _ {F} ^ {2}, \mathbf {m} _ {j} = 0 \text {i f} j \in S _ {l}, \text {e l s e} 1, \tag {3}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
where $W$ refers to the weight matrix; $\mathbf{1}$ represents the matrix full of 1; $\mathbf{m}$ is a 0/1-valued column mask vector; $\odot$ is the Hadamard (element-wise) product; and $\left\| \cdot \right\|_{F}$ denotes the Frobenius norm.
|
| 105 |
+
|
| 106 |
+
(2) BN regularization. Per the idea of preserving trainability, BN is not ignorable since BN layers are also trainable. Removing filters will change the internal feature distributions. If the learned BN statistics do not change accordingly, the error will accumulate and result in deteriorated performance (especially for deep networks). Consider the following BN formulation (Ioffe & Szegedy, 2015),
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
f = \gamma \frac {W * X - \mu}{\sqrt {\sigma^ {2} + \epsilon}} + \beta , \tag {4}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
where $*$ stands for convolution; $\mu/\sigma^2$ refers to the running mean/variance; $\epsilon$ , a small number, is used for numerical stability. The two learnable parameters are $\gamma$ and $\beta$ . Although unimportant weights are enforced with regularization for sparsity, their magnitude can barely be exact zero, making the subsequent removal of filters biased. This will skew the feature distribution and render the BN statistics inaccurate. Using these biased BN statistics will be improper and damages trainability. To mitigate such influence from BN, we propose to regularize both the $\gamma$ and $\beta$ of pruned feature map channels to zero, which gives us the following BN penalty term,
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\mathcal {L} _ {2} = \sum_ {l = 1} ^ {L} \sum_ {j \in S _ {l}} \gamma_ {j} ^ {2} + \beta_ {j} ^ {2}. \tag {5}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
The merits of BN regularization will be justified in our experiments (Tab. 4).
|
| 119 |
+
|
| 120 |
+
To sum, with the proposed regularization terms, the total error function is
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\mathcal {E} = \mathcal {L} _ {c l s} + \frac {\lambda}{2} \left(\mathcal {L} _ {1} + \mathcal {L} _ {2}\right), \tag {6}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where $\mathcal{L}_{cls}$ means the original classification loss. The coefficient $\lambda$ grows by a predefined constant $\Delta$ per $K_{u}$ iterations (up to a ceiling $\tau$ ) during training to ensure the pruned parameters are rather close to zero (inspired by Wang et al. (2019; 2021b)). See Algorithm 1 in Appendix for more details.
|
| 127 |
+
|
| 128 |
+
Discussion. Prior works (Liu et al., 2017; Ye et al., 2018) also study regularizing BN for pruning. Our BN regularization method is starkly different from theirs. (1) In terms of the motivation or goal, Liu et al. (2017); Ye et al. (2018) regularize $\gamma$ to learn unimportant filters, namely, regularizing BN is to indirectly decide which filters are unimportant. In contrast, in our method, unimportant filters are decided by their $L_{1}$ -norms. We adopt BN regularization for a totally different consideration - to mitigate the side effect of breaking trainability, which is not mentioned at all in their works. (2) In terms of specific technique, Liu et al. (2017); Ye et al. (2018) only regularize the multiplier factor $\gamma$ (because it is enough to decide which filters are unimportant) while we regularize both the learnable parameters because only regularizing one still misses a few trainable parameters. Besides, we employ different regularization strength for different parameters (by the group of important filters vs. unimportant filters), while Liu et al. (2017); Ye et al. (2018) simply adopt a uniform penalty
|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
91.36/90.54/0.0040
|
| 132 |
+
(a) $L_{1}$
|
| 133 |
+
|
| 134 |
+

|
| 135 |
+
92.79/92.77/1.0000
|
| 136 |
+
(b) $L_{1} + \mathrm{OrthP}$
|
| 137 |
+
Figure 3: Mean Jacobian singular value (JSV) and test accuracy during retraining with different setups (network: MLP-7-Linear, dataset: MNIST). Below each plot are, in order, the best accuracy of LR 1e-2, the best accuracy of LR 1e-3, and the mean JSV right after pruning (i.e., without retraining). LR 1e-2 and 1e-3 are short for two retraining LR schedules: {init LR 1e-2, decay at epoch 30/60, epochs:90}, {init LR 1e-3, decay at epoch 45, epochs:90}. The accuracies are averaged by 5 random runs. For reference, the unpruned model has mean JSV 2.4987, test accuracy 92.77.
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
92.82 / 92.77 / 3.4875
|
| 141 |
+
(c) TPP (ours)
|
| 142 |
+
|
| 143 |
+
strength for all parameters – this is another key difference because regularizing all parameters (including those that are meant to be retained) will damage trainability, which is exactly what we want to avoid. In short, in terms of either general motivation or specific technical details, our proposed BN regularization is distinct from previous works (Liu et al., 2017; Ye et al., 2018).
|
| 144 |
+
|
| 145 |
+
# 4 EXPERIMENTS
|
| 146 |
+
|
| 147 |
+
Networks and datasets. We first present some analyses with MLP-7-Linear network on MNIST (LeCun et al., 1998). Then compare our method to other plausible solutions with the ResNet56 (He et al., 2016) and VGG19 (Simonyan & Zisserman, 2015) networks, on the CIFAR10 and 100 datasets (Krizhevsky, 2009), respectively. Next we evaluate our algorithm on ImageNet1K (Deng et al., 2009) with ResNet34 and ResNet50 (He et al., 2016). Finally, we present ablation studies to show the efficacy of two main technical novelties in our approach. On ImageNet, we use public torchvision models (Paszke et al., 2019) as the unpruned models for fair comparison with other papers. On other datasets, we train our own base models with comparable accuracies reported in their original papers. See the Appendix (Tab. 5) for concrete training settings.
|
| 148 |
+
|
| 149 |
+
Comparison methods. We compare with Wang et al. (2021a), which proposes a method, OrthP, to recover broken trainability after pruning pretrained models. Furthermore, since weight orthogonality is closely related to network trainability and there have been plenty of orthogonality regularization approaches (Xie et al., 2017; Wang et al., 2020; Huang et al., 2018; 2020; Wang et al., 2020), a straightforward solution is to combine them with $L_{1}$ pruning (Li et al., 2017) to see whether they can help maintain or recover the broken trainability. Two plausible combination schemes are easy to see: 1) apply orthogonality regularization methods before $L_{1}$ pruning, 2) apply orthogonality regularization methods after $L_{1}$ pruning, i.e., in retraining. Two representative orthogonality regularization methods are selected because of their proved effectiveness: kernel orthogonality (KernOrth) (Xie et al., 2017) and convolutional orthogonality (OrthConv) (Wang et al., 2020), so in total there are four combinations: $L_{1} + \text{KernOrth}$ , $L_{1} + \text{OrthConv}$ , $\text{KernOrth} + L_{1}$ , $\text{OrthConv} + L_{1}$ .
|
| 150 |
+
|
| 151 |
+
Comparison metrics. (1) We examine the final test accuracy after retraining with the similar FLOPs budget – this is currently the most prevailing metric to compare different filter pruning methods in classification. Concretely, we compare two settings: a relatively large retraining LR (1e-2) and a small one (1e-3). We introduce these settings because previous works (Renda et al., 2020; Le & Hua, 2021; Wang et al., 2021a; 2023) have showed that retraining LR has a great impact on the final performance. From this metric, we can see how sensitive different methods are to the retraining LR. (2) We also compare the test accuracy before retraining – from this metric, we will see how robust different methods are in the face of weight removal.
|
| 152 |
+
|
| 153 |
+
Table 1: Test accuracy (\%) comparison among different isometry maintenance or recovery methods on ResNet56 on CIFAR10. Scratch stands for training from scratch. KernOrth means Kernel Orthogonalization (Xie et al., 2017); OrthConv means Convolutional Orthogonalization (Wang et al., 2020). Two retraining LR schedules are evaluated here: initial LR $1\mathrm{e}-2$ vs. $1\mathrm{e}-3$ . Acc. diff. refers to the accuracy gap of LR $1\mathrm{e}-3$ against LR $1\mathrm{e}-2$ .
|
| 154 |
+
|
| 155 |
+
<table><tr><td colspan="6">ResNet56 on CIFAR10: Unpruned acc. 93.78%, Params: 0.85M, FLOPs: 0.25G</td></tr><tr><td>Layerwise PR</td><td>0.3</td><td>0.5</td><td>0.7</td><td>0.9</td><td>0.95</td></tr><tr><td>Sparsity/Speedup</td><td>31.14%/1.45×</td><td>49.82%/1.99×</td><td>70.57%/3.59×</td><td>90.39%/11.41×</td><td>95.19%/19.31×</td></tr><tr><td></td><td></td><td>Initial</td><td>retraining</td><td>LR 1e-2</td><td></td></tr><tr><td>Scratch</td><td>93.16 (0.16)</td><td>92.78 (0.23)</td><td>92.11 (0.12)</td><td>88.36 (0.20)</td><td>84.60 (0.14)</td></tr><tr><td>L1(Li et al., 2017)</td><td>93.79 (0.06)</td><td>93.51 (0.07)</td><td>92.26 (0.17)</td><td>86.75 (0.31)</td><td>83.03 (0.07)</td></tr><tr><td>L1+ OrthP (Wang et al., 2021a)</td><td>93.69 (0.02)</td><td>93.36 (0.19)</td><td>91.96 (0.06)</td><td>86.01 (0.34)</td><td>82.62 (0.05)</td></tr><tr><td>L1+ KernOrth (Xie et al., 2017)</td><td>93.49 (0.04)</td><td>93.30 (0.19)</td><td>91.71 (0.14)</td><td>84.78 (0.34)</td><td>80.87 (0.47)</td></tr><tr><td>L1+ OrthConv (Wang et al., 2020)</td><td>92.54 (0.09)</td><td>92.41 (0.07)</td><td>91.02 (0.16)</td><td>84.52 (0.27)</td><td>80.23 (1.19)</td></tr><tr><td>KernOrth (Xie et al., 2017) + L1</td><td>93.49 (0.07)</td><td>92.82 (0.10)</td><td>90.54 (0.25)</td><td>85.47 (0.20)</td><td>79.48 (0.81)</td></tr><tr><td>OrthConv (Wang et al., 2020) + L1</td><td>93.63 (0.17)</td><td>93.28 (0.20)</td><td>92.27 (0.13)</td><td>86.70 (0.07)</td><td>83.21 (0.61)</td></tr><tr><td>TPP (ours)</td><td>93.81 (0.11)</td><td>93.46 (0.06)</td><td>92.35 (0.12)</td><td>89.63 (0.10)</td><td>85.86 (0.08)</td></tr><tr><td></td><td></td><td>Initial</td><td>retraining</td><td>LR 1e-3</td><td></td></tr><tr><td>L1(Li et al., 2017)</td><td>93.43 (0.06)</td><td>93.12 (0.10)</td><td>91.77 (0.11)</td><td>87.57 (0.09)</td><td>83.10 (0.12)</td></tr><tr><td>TPP (ours)</td><td>93.54 (0.08)</td><td>93.32 (0.11)</td><td>92.00 (0.08)</td><td>89.09 (0.10)</td><td>85.47 (0.22)</td></tr><tr><td>Acc. diff. (L1)</td><td>-0.38</td><td>-0.40</td><td>-0.50</td><td>+0.82</td><td>+0.07</td></tr><tr><td>Acc. diff. (TPP)</td><td>-0.27</td><td>-0.14</td><td>-0.35</td><td>-0.54</td><td>-0.39</td></tr></table>
|
| 156 |
+
|
| 157 |
+
# 4.1 ANALYSIS: MLP-7-LINEAR ON MNIST AND RESNET56 ON CIFAR10
|
| 158 |
+
|
| 159 |
+
MLP-7-Linear is a seven-layer linear MLP. It is adopted in Wang et al. (2021a) for analysis because linear MLP is the only network that can achieve exact dynamical isometry (all JSVs are exactly 1) so far. Their proposed dynamical isometry recovery method, OrthP (Wang et al., 2021a), is shown to achieve exact isometry on linear MLP networks. Since we claim our method TPP can maintain dynamical isometry too, conceivably, our method should play a similar role to OrthP in pruning. To confirm this, we prune the MLP-7-Linear network with our method.
|
| 160 |
+
|
| 161 |
+
TPP can perform as well as OrthP on linear MLP. In Fig. 3, (b) is the one equipped with OrthP, which can exactly recover dynamical isometry (note its mean JSV right after pruning is 1.0000), so it works as the oracle here. (1) OrthP improves the best accuracy from 91.36/90.54 to 92.79/92.77. Using TPP, we obtain 92.81/92.77. Namely, in terms of accuracy, our method is as good as the oracle scheme. (2) Note the mean JSV right after pruning - the $L_{1}$ pruning destroys the mean JSV from 2.4987 to 0.0040, and OrthP brings it back to 1.0000. In comparison, TPP achieves 3.4875, at the same order of magnitude of 1.0000, also as good as OrthP. These demonstrate, in terms of either the final evaluation metric (test accuracy) or the trainability measure (mean JSV), our TPP performs as well as the oracle method OrthP on the linear MLP.
|
| 162 |
+
|
| 163 |
+
Loss surface analysis with ResNet56 on CIFAR10. We further analyze the loss surfaces (Li et al., 2018) of pruned networks (before retraining) by different methods. Our result (due to limited space, we defer this result to Appendix; see Fig. 4) suggests that the loss surface of our method is flatter than other methods, implying the loss landscape is easier for optimization.
|
| 164 |
+
|
| 165 |
+
# 4.2 RESNET56 ON CIFAR10 / VGG19 ON CIFAR100
|
| 166 |
+
|
| 167 |
+
Here we compare our method to other plausible solutions on the CIFAR datasets (Krizhevsky, 2009) with non-linear convolutional architectures. The results in Tab. 1 (for CIFAR10) and Tab. 10 (for CIFAR100, deferred to Appendix due to limited space here) show that,
|
| 168 |
+
|
| 169 |
+
(1) OrthP does not work well $-L_{1} + \mathrm{OrthP}$ underperforms the original $L_{1}$ under all the five pruning ratios for both ResNet56 and VGG19. This further confirms the weight orthogonalization method proposed for linear networks indeed does not generalize to non-linear CNNs.
|
| 170 |
+
(2) For KernOrth vs. OrthConv, the results look mixed – OrthConv is generally better when applied before the $L_{1}$ pruning. This is reasonable since OrthConv is shown more effective than KernOrth in enforcing more isometry (Wang et al., 2020), which in turn can stand more damage of pruning.
|
| 171 |
+
(3) Of particular note is that, none of the above five methods actually outperform the $L_{1}$ pruning or the simple scratch training. It means that neither enforcing more isometry before pruning nor
|
| 172 |
+
|
| 173 |
+
Table 2: Comparison on ImageNet-1K validation set. *Advanced training recipe (such as cosine LR schedule) is used; we single them out for fair comparison.
|
| 174 |
+
|
| 175 |
+
<table><tr><td>Method</td><td>Model</td><td>Unpruned top-1 (%)</td><td>Pruned top-1 (%)</td><td>Top-1 drop (%)</td><td>Speedup</td></tr><tr><td>L1(pruned-B) (Li et al., 2017)</td><td></td><td>73.23</td><td>72.17</td><td>1.06</td><td>1.32×</td></tr><tr><td>L1(pruned-B, reimpl.) (Wang et al., 2023)</td><td></td><td>73.31</td><td>73.67</td><td>-0.36</td><td>1.32×</td></tr><tr><td>Taylor-FO (Molchanov et al., 2019)</td><td>ResNet34</td><td>73.31</td><td>72.83</td><td>0.48</td><td>1.29×</td></tr><tr><td>GReg-2 (Wang et al., 2021b)</td><td></td><td>73.31</td><td>73.61</td><td>-0.30</td><td>1.32×</td></tr><tr><td>TPP (ours)</td><td></td><td>73.31</td><td>73.77</td><td>-0.46</td><td>1.32×</td></tr><tr><td>ProvableFP (Liebenwein et al., 2020)</td><td></td><td>76.13</td><td>75.21</td><td>0.92</td><td>1.43×</td></tr><tr><td>MetaPruning (Liu et al., 2019a)</td><td rowspan="2">ResNet50</td><td>76.6</td><td>76.2</td><td>0.4</td><td>1.37×</td></tr><tr><td>GReg-1 (Wang et al., 2021b)</td><td>76.13</td><td>76.27</td><td>-0.14</td><td>1.49×</td></tr><tr><td>TPP (ours)</td><td></td><td>76.13</td><td>76.44</td><td>-0.31</td><td>1.49×</td></tr><tr><td>IncReg (Wang et al., 2019)</td><td></td><td>75.60</td><td>72.47</td><td>3.13</td><td>2.00×</td></tr><tr><td>SFP (He et al., 2018)</td><td></td><td>76.15</td><td>74.61</td><td>1.54</td><td>1.72×</td></tr><tr><td>HRank (Lin et al., 2020)</td><td></td><td>76.15</td><td>74.98</td><td>1.17</td><td>1.78×</td></tr><tr><td>Taylor-FO (Molchanov et al., 2019)</td><td></td><td>76.18</td><td>74.50</td><td>1.68</td><td>1.82×</td></tr><tr><td>Factorized (Li et al., 2019)</td><td></td><td>76.15</td><td>74.55</td><td>1.60</td><td>2.33×</td></tr><tr><td>DCP (Zhuang et al., 2018)</td><td>ResNet50</td><td>76.01</td><td>74.95</td><td>1.06</td><td>2.25×</td></tr><tr><td>CCP-AC (Peng et al., 2019)</td><td></td><td>76.15</td><td>75.32</td><td>0.83</td><td>2.18×</td></tr><tr><td>GReg-2 (Wang et al., 2021b)</td><td></td><td>76.13</td><td>75.36</td><td>0.77</td><td>2.31×</td></tr><tr><td>CC (Li et al., 2021)</td><td></td><td>76.15</td><td>75.59</td><td>0.56</td><td>2.12×</td></tr><tr><td>MetaPruning (Liu et al., 2019a)</td><td></td><td>76.6</td><td>75.4</td><td>1.2</td><td>2.00×</td></tr><tr><td>TPP (ours)</td><td></td><td>76.13</td><td>75.60</td><td>0.53</td><td>2.31×</td></tr><tr><td>LFPC (He et al., 2020)</td><td></td><td>76.15</td><td>74.46</td><td>1.69</td><td>2.55×</td></tr><tr><td>GReg-2 (Wang et al., 2021b)</td><td>ResNet50</td><td>76.13</td><td>74.93</td><td>1.20</td><td>2.56×</td></tr><tr><td>CC (Li et al., 2021)</td><td></td><td>76.15</td><td>74.54</td><td>1.61</td><td>2.68×</td></tr><tr><td>TPP (ours)</td><td></td><td>76.13</td><td>75.12</td><td>1.01</td><td>2.56×</td></tr><tr><td>IncReg (Wang et al., 2019)</td><td></td><td>75.60</td><td>71.07</td><td>4.53</td><td>3.00×</td></tr><tr><td>Taylor-FO (Molchanov et al., 2019)</td><td>ResNet50</td><td>76.18</td><td>71.69</td><td>4.49</td><td>3.05×</td></tr><tr><td>GReg-2 (Wang et al., 2021b)</td><td></td><td>76.13</td><td>73.90</td><td>2.23</td><td>3.06×</td></tr><tr><td>TPP (ours)</td><td></td><td>76.13</td><td>74.51</td><td>1.62</td><td>3.06×</td></tr><tr><td>Method</td><td>Network</td><td colspan="2">Top-1 (%)</td><td colspan="2">FLOPs (G)</td></tr><tr><td>CHEX* (Hou et al., 2022)</td><td></td><td colspan="2">77.4</td><td colspan="2">2</td></tr><tr><td>CHEX* (Hou et al., 2022)</td><td>ResNet50</td><td colspan="2">76.0</td><td colspan="2">1</td></tr><tr><td>TPP* (ours)</td><td></td><td colspan="2">77.75</td><td colspan="2">2</td></tr><tr><td>TPP* (ours)</td><td></td><td colspan="2">76.52</td><td colspan="2">1</td></tr></table>
|
| 176 |
+
|
| 177 |
+
compensating isometry after pruning can help recover trainability. In stark contrast, our TPP method outperforms $L_{1}$ pruning and scratch consistently against different pruning ratios (only one exception is pruning ratio 0.7 on ResNet56, but our method is still the second best and the gap to the best is only marginal: 93.46 vs. 93.51). Besides, note that the accuracy trend – in general, with a larger sparsity ratio, TPP beats $L_{1}$ or Scratch by a more pronounced margin. This is because, ar a larger pruning ratio, the trainability is impaired more, where our method can help more, thus harvesting more performance gains. We will see similar trends many times.
|
| 178 |
+
|
| 179 |
+
(4) In Tabs. 1 and 10, we also present the results when the initial retraining LR is $1 \mathrm{e} - 3$ . Wang et al. (2021a) argue that if the broken dynamical isometry can be well maintained/recovered, the final performance gap between LR $1 \mathrm{e} - 2$ and $1 \mathrm{e} - 3$ should be diminished. Now that TPP is claimed to be able to maintain trainability, the performance gap should become smaller. This is empirically verified in the table. In general, the accuracy gap between LR $1 \mathrm{e} - 2$ and LR $1 \mathrm{e} - 3$ of TPP is smaller than that of $L_{1}$ pruning. Two exceptions are PR 0.9/0.95 on ResNet56: LR $1 \mathrm{e} - 3$ is unusually better than LR $1 \mathrm{e} - 2$ for $L_{1}$ pruning. Despite them, the general picture is that the accuracy gap between LR $1 \mathrm{e} - 3$ and $1 \mathrm{e} - 2$ turns smaller with TPP. This is a sign that trainability is effectively maintained.
|
| 180 |
+
|
| 181 |
+
# 4.3 IMAGENET BENCHMARK
|
| 182 |
+
|
| 183 |
+
We further evaluate TPP on ImageNet-1K (Deng et al., 2009) in comparison to many existing filter pruning algorithms. Results in Tab. 2 show that TPP is consistently better than the others across different speedup ratios. Moreover, under larger speedups, the advantage of our method is usually more evident. E.g., TPP outperforms Taylor-FO (Molchanov et al., 2019) by $1.15\%$ in terms of the top-1 acc. drop at the $2.31 \times$ speedup track; at $3.06 \times$ speedup, TPP leads Taylor-FO (Molchanov et al., 2019) by $2.87\%$ . This shows TPP is more robust to more aggressive pruning. The reason is easy to see - more aggressive pruning hurts trainability more (Lee et al., 2019; Wang et al., 2023), where our method can find more use, in line with the observations on CIFAR (Tabs. 1 and 10).
|
| 184 |
+
|
| 185 |
+
Table 3: Test accuracy (without retraining) comparison between two plausible schemes diagonal vs. decorrelate in our TPP method.
|
| 186 |
+
|
| 187 |
+
<table><tr><td colspan="6">ResNet56 on CIFAR10: Unpruned acc. 93.78%, Params: 0.85M, FLOPs: 0.25G</td></tr><tr><td>Layerwise PR</td><td>0.3</td><td>0.5</td><td>0.7</td><td>0.9</td><td>0.95</td></tr><tr><td>TPP (diagonal)</td><td>92.67 (0.29)</td><td>91.97 (0.02)</td><td>90.21 (0.23)</td><td>23.23 (5.19)</td><td>14.23 (1.42)</td></tr><tr><td>TPP (decorrelate)</td><td>92.74 (0.16)</td><td>92.07 (0.05)</td><td>89.95 (0.26)</td><td>30.35 (4.69)</td><td>17.33 (0.50)</td></tr><tr><td colspan="6">VGG19 on CIFAR100: Unpruned acc. 74.02%, Params: 20.08M, FLOPs: 0.80G</td></tr><tr><td>Layerwise PR</td><td>0.1</td><td>0.3</td><td>0.5</td><td>0.7</td><td>0.9</td></tr><tr><td>TPP (diagonal)</td><td>68.70 (0.18)</td><td>64.55 (0.14)</td><td>55.66 (0.73)</td><td>13.76 (0.53)</td><td>1.00 (0.00)</td></tr><tr><td>TPP (decorrelate)</td><td>72.43 (0.12)</td><td>69.31 (0.11)</td><td>62.59 (0.14)</td><td>18.97 (1.25)</td><td>1.00 (0.00)</td></tr></table>
|
| 188 |
+
|
| 189 |
+
Table 4: Test accuracy (without retraining) comparison w.r.t. the proposed weight gram matrix regularization and BN regularization. PR stands for layerwise pruning ratio.
|
| 190 |
+
|
| 191 |
+
<table><tr><td colspan="7">ResNet56 on CIFAR10: Unpruned acc. 93.78%, Params: 0.85M, FLOPs: 0.25G</td></tr><tr><td>Gram Reg</td><td>BN Reg</td><td>PR = 0.3</td><td>PR = 0.5</td><td>PR = 0.7</td><td>PR = 0.9</td><td>PR = 0.95</td></tr><tr><td>✓</td><td>✓</td><td>92.94 (0.14)</td><td>92.48 (0.19)</td><td>90.48 (0.09)</td><td>70.53 (1.69)</td><td>23.05 (2.61)</td></tr><tr><td>✓</td><td>X</td><td>92.79 (0.03)</td><td>92.23 (0.08)</td><td>90.46 (0.21)</td><td>44.25 (2.46)</td><td>16.52 (0.43)</td></tr><tr><td>X</td><td>✓</td><td>92.40 (0.30)</td><td>91.95 (0.04)</td><td>90.26 (0.23)</td><td>26.79 (2.19)</td><td>10.50 (0.63)</td></tr><tr><td colspan="7">VGG19 on CIFAR100: Unpruned acc. 74.02%, Params: 20.08M, FLOPs: 0.80G</td></tr><tr><td>Gram Reg</td><td>BN Reg</td><td>PR = 0.1</td><td>PR = 0.3</td><td>PR = 0.5</td><td>PR = 0.7</td><td>PR = 0.9</td></tr><tr><td>✓</td><td>✓</td><td>73.44 (0.07)</td><td>71.61 (0.12)</td><td>69.28 (0.25)</td><td>65.15 (0.20)</td><td>2.84 (1.13)</td></tr><tr><td>✓</td><td>X</td><td>73.01 (0.13)</td><td>71.26 (0.19)</td><td>68.67 (0.10)</td><td>61.70 (0.46)</td><td>1.75 (0.38)</td></tr><tr><td>X</td><td>✓</td><td>71.97 (0.23)</td><td>70.26 (0.61)</td><td>68.40 (0.30)</td><td>2.10 (0.27)</td><td>1.02 (0.03)</td></tr></table>
|
| 192 |
+
|
| 193 |
+
We further compare to more strong pruning methods. Notably, DMCP (Guo et al., 2020), LeGR (Chin et al., 2020), EagleEye (Li et al., 2020), and CafeNet (Su et al., 2021) have been shown outperformed by CHEX (Hou et al., 2022) (see their Tab. 1) with ResNet50 on ImageNet. Therefore, here we only compare to CHEX. Following CHEX, we employ more advanced training recipe (e.g., cosine LR schedule) referring to TIMM (Wightman et al., 2021). Results in Tab. 2 suggest that our method surpasses CHEX at different FLOPs.
|
| 194 |
+
|
| 195 |
+
# 4.4 ABLATION STUDY
|
| 196 |
+
|
| 197 |
+
This section presents ablation studies to demonstrate the merits of TPP's two major innovations: (1) We propose not to over-penalize the kept weights in orthogonalization (i.e., (c) vs. (b) in Fig. 2). (2) We propose to regularize the two learnable parameters in BN.
|
| 198 |
+
|
| 199 |
+
The results are presented in Tabs. 3 and 4, where we compare the accuracy right after pruning (i.e., without retraining). We have the following major observations: (1) Tab. 3 shows using decorrelate (Fig. 2(c)) is better than using diagonal (Fig. 2(b)), generally speaking. Akin to Tabs. 1 and 10, at a greater sparsity ratio, the advantage of decorrelate is more pronounced, except for too large sparsity (0.95 for ResNet56, 0.9 for VGG19) because too large sparsity will break the trainability beyond repair. (2) For BN regularization, in Tab. 4, when it is switched off, the performance degrades. It also poses the similar trend: BN regularization is more helpful under the larger sparsity.
|
| 200 |
+
|
| 201 |
+
# 5 CONCLUSION
|
| 202 |
+
|
| 203 |
+
Trainability preserving is shown to be critical in neural network pruning, while few works have realized it on the modern large-scale non-linear deep networks. Towards this end, we present a new filter and novel pruning method named trainability preserving pruning (TPP) based on regularization. Specifically, we propose an improved weight gram matrix as regularization target, which does not unnecessarily over-penalize the retained important weights. Besides, we propose to regularize the BN parameters to mitigate its damage to trainability. Empirically, TPP performs as effectively as the ground-truth trainability recovery method and is more effective than other counterpart approaches based on weight orthogonality. Furthermore, on the standard ImageNet-1K benchmark, TPP also matches or even beats many recent SOTA filter pruning approaches. As far as we are concerned, TPP is the first approach that explicitly tackles the trainability preserving problem in structured pruning that easily scales to the large-scale datasets and networks.
|
| 204 |
+
|
| 205 |
+
# REFERENCES
|
| 206 |
+
|
| 207 |
+
Nitin Bansal, Xiaohan Chen, and Zhangyang Wang. Can we gain more from orthogonality regularizations in training deep networks? In NeurIPS, 2018. 3
|
| 208 |
+
Tianlong Chen, Xuxi Chen, Xiaolong Ma, Yanzhi Wang, and Zhangyang Wang. Coarsening the granularity: Towards structurally sparse lottery tickets. In ICML, 2022. 21
|
| 209 |
+
Yu Cheng, Duo Wang, Pan Zhou, and Tao Zhang. Model compression and acceleration for deep neural networks: The principles, progress, and challenges. IEEE Signal Processing Magazine, 35 (1):126-136, 2018. 3
|
| 210 |
+
Ting-Wu Chin, Ruizhou Ding, Cha Zhang, and Diana Marculescu. Towards efficient model compression via learned global ranking. In CVPR, 2020. 9
|
| 211 |
+
Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 3, 6, 8
|
| 212 |
+
Lei Deng, Guoqi Li, Song Han, Luping Shi, and Yuan Xie. Model compression and hardware acceleration for neural networks: A comprehensive survey. Proceedings of the IEEE, 108(4): 485-532, 2020. 3
|
| 213 |
+
Xiaohan Ding, Guiguang Ding, Jungong Han, and Sheng Tang. Auto-balanced filter pruning for efficient convolutional neural networks. In AAAI, 2018. 3
|
| 214 |
+
Trevor Gale, Erich Elsen, and Sara Hooker. The state of sparsity in deep neural networks. arXiv preprint arXiv:1902.09574, 2019. 1
|
| 215 |
+
Shaopeng Guo, Yujie Wang, Quanquan Li, and Junjie Yan. Dmcp: Differentiable markov channel pruning for neural networks. In CVPR, 2020. 9
|
| 216 |
+
S. Han, X. Liu, and H. Mao. EIE: efficient inference engine on compressed deep neural network. ACM Sigarch Computer Architecture News, 44(3):243-254, 2016a. 1
|
| 217 |
+
Song Han, Jeff Pool, John Tran, and William J Dally. Learning both weights and connections for efficient neural network. In NeurIPS, 2015. 1, 3
|
| 218 |
+
Song Han, Huizi Mao, and William J Dally. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. In *ICLR*, 2016b. 1, 3
|
| 219 |
+
Song Han, Junlong Kang, Huizi Mao, Yiming Hu, Xin Li, Yubin Li, Dongliang Xie, Hong Luo, Song Yao, Yu Wang, et al. ESE: Efficient speech recognition engine with sparse lstm on fpga. In ACM/SIGDA International Symposium on Field-Programmable Gate Arrays, 2017. 1
|
| 220 |
+
B. Hassibi and D. G. Stork. Second order derivatives for network pruning: Optimal brain surgeon. In NeurIPS, 1993. 3
|
| 221 |
+
K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In CVPR, 2016. 3, 4, 6
|
| 222 |
+
Yang He, Guoliang Kang, Xuanyi Dong, Yanwei Fu, and Yi Yang. Soft filter pruning for accelerating deep convolutional neural networks. In *IJCAI*, 2018. 3, 8
|
| 223 |
+
Yang He, Yuhang Ding, Ping Liu, Linchao Zhu, Hanwang Zhang, and Yi Yang. Learning filter pruning criteria for deep convolutional neural networks acceleration. In CVPR, 2020. 8
|
| 224 |
+
Yihui He, Xiangyu Zhang, and Jian Sun. Channel pruning for accelerating very deep neural networks. In ICCV, 2017. 3
|
| 225 |
+
Torsten Hoefler, Dan Alistarh, Tal Ben-Nun, Nikoli Dryden, and Alexandra Peste. Sparsity in deep learning: Pruning and growth for efficient inference and training in neural networks. JMLR, 22 (241):1-124, 2021. 1, 3
|
| 226 |
+
Zejiang Hou, Minghai Qin, Fei Sun, Xiaolong Ma, Kun Yuan, Yi Xu, Yen-Kuang Chen, Rong Jin, Yuan Xie, and Sun-Yuan Kung. Chex: Channel exploration for cnn model compression. In CVPR, 2022. 8, 9
|
| 227 |
+
|
| 228 |
+
Lei Huang, Xianglong Liu, Bo Lang, Adams Yu, Yongliang Wang, and Bo Li. Orthogonal weight normalization: Solution to optimization over multiple dependent stiefel manifolds in deep neural networks. In AAAI, 2018. 3, 4, 6
|
| 229 |
+
Lei Huang, Li Liu, Fan Zhu, Diwen Wan, Zehuan Yuan, Bo Li, and Ling Shao. Controllable orthogonalization in training dnns. In CVPR, 2020. 3, 4, 6
|
| 230 |
+
Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In ICML, 2015. 2, 4, 5
|
| 231 |
+
Alex Krizhevsky. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009. 6, 7
|
| 232 |
+
Duong H Le and Binh-Son Hua. Network pruning that matters: A case study on retraining variants. In ICLR, 2021. 1, 6
|
| 233 |
+
V. Lebedev and V. Lempitsky. Fast convnets using group-wise brain damage. In CVPR, 2016. 3
|
| 234 |
+
Y. LeCun, J. S. Denker, and S. A. Solla. Optimal brain damage. In NeurIPS, 1990. 3
|
| 235 |
+
Yann LeCun, Léon Bottou, Yoshua Bengio, Patrick Haffner, et al. Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11):2278-2324, 1998. 6
|
| 236 |
+
Namhoon Lee, Thalaiyasingam Ajanthan, and Philip Torr. Snip: Single-shot network pruning based on connection sensitivity. In ICLR, 2019. 8
|
| 237 |
+
Namhoon Lee, Thalaiyasingam Ajanthan, Stephen Gould, and Philip HS Torr. A signal propagation perspective for pruning neural networks at initialization. In ICLR, 2020. 1, 3, 4
|
| 238 |
+
Bailin Li, Bowen Wu, Jiang Su, and Guangrun Wang. Eagleeye: Fast sub-net evaluation for efficient neural network pruning. In ECCV, 2020. 9
|
| 239 |
+
Hao Li, Asim Kadav, Igor Durdanovic, Hanan Samet, and Hans Peter Graf. Pruning filters for efficient convnets. In *ICLR*, 2017. 1, 3, 6, 7, 8, 16, 17, 18, 19, 20, 21
|
| 240 |
+
Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. In NeurIPS, 2018. 7
|
| 241 |
+
Tuanhui Li, Baoyuan Wu, Yujiu Yang, Yanbo Fan, Yong Zhang, and Wei Liu. Compressing convolutional neural networks via factorized convolutional filters. In CVPR, 2019. 8
|
| 242 |
+
Yuchao Li, Shaohui Lin, Jianzhuang Liu, Qixiang Ye, Mengdi Wang, Fei Chao, Fan Yang, Jincheng Ma, Qi Tian, and Rongrong Ji. Towards compact cnns via collaborative compression. In CVPR, 2021. 8
|
| 243 |
+
Lucas Liebenwein, Cenk Baykal, Harry Lang, Dan Feldman, and Daniela Rus. Provable filter pruning for efficient neural networks. In ICLR, 2020. 8
|
| 244 |
+
Mingbao Lin, Rongrong Ji, Yan Wang, Yichen Zhang, Baochang Zhang, Yonghong Tian, and Ling Shao. Hrank: Filter pruning using high-rank feature map. In CVPR, 2020. 8
|
| 245 |
+
Zechun Liu, Haoyuan Mu, Xiangyu Zhang, Zichao Guo, Xin Yang, Kwang-Ting Cheng, and Jian Sun. Metapruning: Meta learning for automatic neural network channel pruning. In ICCV, 2019a. 8
|
| 246 |
+
Zhuang Liu, Jianguo Li, Zhiqiang Shen, Gao Huang, Shoumeng Yan, and Changshui Zhang. Learning efficient convolutional networks through network slimming. In ICCV, 2017. 2, 3, 5, 6
|
| 247 |
+
Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, and Trevor Darrell. Rethinking the value of network pruning. In ICLR, 2019b. 1, 13
|
| 248 |
+
Christos Louizos, Max Welling, and Diederik P Kingma. Learning sparse neural networks through $l_{-}0$ regularization. In ICLR, 2018. 3
|
| 249 |
+
|
| 250 |
+
Ekdeep Singh Lubana and Robert P Dick. A gradient flow framework for analyzing network pruning. In ICLR, 2021. 1, 3, 4
|
| 251 |
+
Francesco Mezzadri. How to generate random matrices from the classical compact groups. arXiv preprint math-ph/0609050, 2006. 1, 3
|
| 252 |
+
P. Molchanov, S. Tyree, and T. Karras. Pruning convolutional neural networks for resource efficient inference. In ICLR, 2017. 3
|
| 253 |
+
Pavlo Molchanov, Arun Mallya, Stephen Tyree, Iuri Frosio, and Jan Kautz. Importance estimation for neural network pruning. In CVPR, 2019. 3, 8, 18, 20
|
| 254 |
+
Vinod Nair and Geoffrey E Hinton. Rectified linear units improve restricted boltzmann machines. In ICML, 2010. 4
|
| 255 |
+
Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In NeurIPS, 2019. 6, 13
|
| 256 |
+
Hanyu Peng, Jiaxiang Wu, Shifeng Chen, and Junzhou Huang. Collaborative channel pruning for deep networks. In ICML, 2019. 8
|
| 257 |
+
R. Reed. Pruning algorithms - a survey. IEEE Transactions on Neural Networks, 4(5):740-747, 1993. 1, 3
|
| 258 |
+
Alex Renda, Jonathan Frankle, and Michael Carbin. Comparing rewinding and fine-tuning in neural network pruning. In *ICLR*, 2020. 1, 6
|
| 259 |
+
Andrew M Saxe, James L McClelland, and Surya Ganguli. Exact solutions to the nonlinear dynamics of learning in deep linear neural networks. In ICLR, 2014. 1, 3, 4
|
| 260 |
+
Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In ICLR, 2015. 6
|
| 261 |
+
Sidak Pal Singh and Dan Alistarh. Woodfisher: Efficient second-order approximations for model compression. In NeurIPS, 2020. 3
|
| 262 |
+
Xiu Su, Shan You, Tao Huang, Fei Wang, Chen Qian, Changshui Zhang, and Chang Xu. Locally free weight sharing for network width search. In ICLR, 2021. 9
|
| 263 |
+
Vivienne Sze, Yu-Hsin Chen, Tien-Ju Yang, and Joel S Emer. Efficient processing of deep neural networks: A tutorial and survey. Proceedings of the IEEE, 105(12):2295-2329, 2017. 3
|
| 264 |
+
Lloyd N Trefethen and David Bau III. Numerical linear algebra, volume 50. Siam, 1997. 1, 3
|
| 265 |
+
Artem Vysogorets and Julia Kempe. Connectivity matters: Neural network pruning through the lens of effective sparsity. arXiv preprint arXiv:2107.02306, 2021. 3
|
| 266 |
+
Huan Wang, Xinyi Hu, Qiming Zhang, Yuehai Wang, Lu Yu, and Haoji Hu. Structured pruning for efficient convolutional neural networks via incremental regularization. IEEE Journal of Selected Topics in Signal Processing (JSTSP), 14(4):775-788, 2019. 3, 5, 8
|
| 267 |
+
Huan Wang, Can Qin, Yue Bai, and Yun Fu. Dynamical isometry: The missing ingredient for neural network pruning. arXiv preprint arXiv:2021.05916, 2021a. 1, 4, 6, 7, 8, 16
|
| 268 |
+
Huan Wang, Can Qin, Yulun Zhang, and Yun Fu. Neural pruning via growing regularization. In ICLR, 2021b. 1, 3, 5, 8, 13, 14, 16, 19
|
| 269 |
+
Huan Wang, Can Qin, Yulun Zhang, and Yun Fu. Recent advances on neural network pruning at initialization. In *IJCAI*, 2022. 3
|
| 270 |
+
Huan Wang, Can Qin, Yue Bai, and Yun Fu. Why is the state of neural network pruning so confusing? on the fairness, comparison setup, and trainability in network pruning. arXiv preprint arXiv:2301.05219, 2023. 1, 4, 6, 8, 18, 20
|
| 271 |
+
|
| 272 |
+
Jiayun Wang, Yubei Chen, Rudrasis Chakraborty, and Stella X Yu. Orthogonal convolutional neural networks. In CVPR, 2020. 3, 4, 6, 7, 13, 16
|
| 273 |
+
Wei Wen, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li. Learning structured sparsity in deep neural networks. In NeurIPS, 2016. 1, 3, 18, 20
|
| 274 |
+
Ross Wightman, Hugo Touvron, and Hervé Jégou. Resnet strikes back: An improved training procedure in timm. arXiv preprint arXiv:2110.00476, 2021. 9
|
| 275 |
+
Di Xie, Jiang Xiong, and Shiliang Pu. All you need is beyond a good init: Exploring better solution for training extremely deep convolutional neural networks with orthonormality and modulation. In CVPR, 2017. 3, 4, 6, 7, 16
|
| 276 |
+
Huanrui Yang, Wei Wen, and Hai Li. Deephoyer: Learning sparser neural network with differentiable scale-invariant sparsity measures. In *ICLR*, 2020. 3, 18, 20
|
| 277 |
+
Jianbo Ye, Xin Lu, Zhe Lin, and James Z Wang. Rethinking the smaller-norm-less-informative assumption in channel pruning of convolution layers. In ICLR, 2018. 2, 3, 5, 6
|
| 278 |
+
Yulun Zhang, Huan Wang, Can Qin, and Yun Fu. Aligned structured sparsity learning for efficient image super-resolution. In NeurIPS, 2021a. 3
|
| 279 |
+
Yulun Zhang, Huan Wang, Can Qin, and Yun Fu. Learning efficient image super-resolution networks via structure-regularized pruning. In ICLR, 2022. 3
|
| 280 |
+
Zhenyu Zhang, Xuxi Chen, Tianlong Chen, and Zhangyang Wang. Efficient lottery ticket finding: Less data is more. In ICML, 2021b. 3
|
| 281 |
+
Zhuangwei Zhuang, Mingkui Tan, Bohan Zhuang, Jing Liu, Yong Guo, Qingyao Wu, Junzhou Huang, and Jinhui Zhu. Discrimination-aware channel pruning for deep neural networks. In NeurIPS, 2018. 8
|
| 282 |
+
|
| 283 |
+
# A IMPLEMENTATION DETAILS
|
| 284 |
+
|
| 285 |
+
Code reference. We mainly refer to the following code implementations in this work. They are all open-licensed.
|
| 286 |
+
|
| 287 |
+
- Official PyTorch ImageNet example<sup>1</sup>;
|
| 288 |
+
- GReg-1/GReg-2 (Wang et al., 2021b) $^{2}$ ;
|
| 289 |
+
- OrthConv (Wang et al., 2020)<sup>3</sup>;
|
| 290 |
+
- Rethinking the value of network pruning (Liu et al., 2019b) $^4$ .
|
| 291 |
+
|
| 292 |
+
Data split. All the datasets in this paper are public datasets with standard APIs in PyTorch (Paszke et al., 2019). We employs these standard APIs for the train/test data split to keep fair comparison with other methods.
|
| 293 |
+
|
| 294 |
+
Training setups and hyper-parameters. Tab. 5 summarizes the detailed training setups. For the hyper-parameters that are introduced in our TPP method: regularization granularity $\Delta$ , regularization ceiling $\tau$ and regularization update interval $K_{u}$ , we summarize them in Tab. 6. We mainly refer to the official code of GReg-1 (Wang et al., 2021b) when setting up these hyper-parameters, since we tap into a similar growing regularization scheme as GReg-1 does.
|
| 295 |
+
|
| 296 |
+
For small datasets (CIFAR and MNIST), each reported result is averaged by at least 3 random runs, mean and std reported. For ImageNet-1K, we cannot run multiple times due to our limited resource budget. This said, in general the results on ImageNet have been shown pretty stable.
|
| 297 |
+
|
| 298 |
+
Table 5: Summary of training setups. In the parentheses of SGD are the momentum and weight decay. For LR schedule, the first number is initial LR; the second (in brackets) is the epochs when LR is decayed by factor 1/10; and #epochs stands for the total number of epochs.
|
| 299 |
+
|
| 300 |
+
<table><tr><td>Dataset</td><td>MNIST</td><td>CIFAR10/100</td><td>ImageNet</td></tr><tr><td>Solver</td><td>SGD (0.9, 1e-4)</td><td>SGD (0.9, 5e-4)</td><td>SGD (0.9, 1e-4)</td></tr><tr><td>Batch size</td><td>100</td><td colspan="2">CIFAR10: 128, others: 256</td></tr><tr><td>LR schedule (scratch)</td><td>1e-2, [30,60], #epochs:90</td><td>1e-1, [100,150], #epochs:200</td><td>1e-1, [30,60], #epochs:90</td></tr><tr><td>LR schedule (prune)</td><td colspan="3">Fixed (1e-3)</td></tr><tr><td>LR schedule (retrain)</td><td>1e-2,[30,60], #epochs:90</td><td>1e-2, [60,90], #epochs:120</td><td>1e-2, [30,60,75], #epochs:90</td></tr></table>
|
| 301 |
+
|
| 302 |
+
Table 6: Hyper-parameters of our methods.
|
| 303 |
+
|
| 304 |
+
<table><tr><td>Dataset</td><td>MNIST</td><td>CIFAR10/100</td><td>ImageNet</td></tr><tr><td>Regularization granularity Δ</td><td colspan="3">1e-4</td></tr><tr><td>Regularization ceiling τ</td><td colspan="3">1</td></tr><tr><td>Regularization update interval Ku</td><td>10 iterations</td><td colspan="2">5 iterations</td></tr></table>
|
| 305 |
+
|
| 306 |
+
Hardware and running time. We conduct all our experiments using 4 NVIDIA V100 GPUs (16GB memory per GPU). It takes roughly 41 hrs to prune ResNet50 on ImageNet using our TPP method (pruning and 90-epoch retraining both included). Among them, 12 hrs (namely, close to $30\%$ ) are spent on pruning and 29 hrs are spent on retraining (about 20 mins per epoch).
|
| 307 |
+
|
| 308 |
+
Layerwise pruning ratios. The layerwise pruning ratios are pre-specified in this paper. For the ImageNet benchmark, we exactly follow GReg (Wang et al., 2021b) for the layerwise pruning ratios to keep fair comparison to it. The specific numbers are summarized in Tab. 7. Each number is the pruning ratio shared by all the layers of the same stage in ResNet34/50. On top of these ratios, some layers are skipped, such as the last CONV layer in a residual block. The best way to examine the detailed layerwise pruning ratio is to check the code at: https://github.com/MingSun-Tse/TPP.
|
| 309 |
+
|
| 310 |
+
Table 7: A brief summary of the layerwise pruning ratios (PRs) of ImageNet experiments.
|
| 311 |
+
|
| 312 |
+
<table><tr><td>Model</td><td>Speedup</td><td>Pruned top-1 acc. (%)</td><td>PR</td></tr><tr><td>ResNet34</td><td>1.32×</td><td>73.77</td><td>[0, 0.50, 0.60, 0.40, 0, 0]</td></tr><tr><td>ResNet50</td><td>1.49×</td><td>76.44</td><td>[0, 0.30, 0.30, 0.30, 0.14, 0]</td></tr><tr><td>ResNet50</td><td>2.31×</td><td>75.60</td><td>[0, 0.60, 0.60, 0.60, 0.21, 0]</td></tr><tr><td>ResNet50</td><td>2.56×</td><td>75.12</td><td>[0, 0.74, 0.74, 0.60, 0.21, 0]</td></tr><tr><td>ResNet50</td><td>3.06×</td><td>74.51</td><td>[0, 0.68, 0.68, 0.68, 0.50, 0]</td></tr></table>
|
| 313 |
+
|
| 314 |
+
# B SENSITIVITY ANALYSIS OF HYPER-PARAMETERS
|
| 315 |
+
|
| 316 |
+
Among the three hyper-parameters in Tab. 6, regularization ceiling $\tau$ works as a termination condition. We only require it to be large enough to ensure the weights are compressed to a very small amount. It does not have to be 1. The final performance is also less sensitive to it. The pruned performance seems to be more sensitive to the other two hyper-parameters, so here we conduct hyper-parameter sensitivity analysis to check their robustness.
|
| 317 |
+
|
| 318 |
+
Results are presented in Tab. 8 and Tab. 9. Pruning ratio 0.7 (for ResNet56) and 0.5 (for VGG19) are chosen here because the resulted sparsity is the most representative (i.e., not too large or small). (1) For $K_{u}$ , in general, a larger $K_{u}$ tends to deliver a better result. This is no surprise since a larger $K_{u}$ allows more iterations for the network to adapt and recover when undergoing the penalty. (2) For $\Delta$ , we do not see a clear pattern here: either a small or large $\Delta$ can achieve the best result (for different networks). On the whole, when varying the hyper-parameters within a reasonable range, the performance is pretty robust, no catastrophic failures. Moreover, note that the default setting is actually not the best for both $K_{u}$ and $\Delta$ . This is because we did not heavily search the best hyper-parameters; however, they still achieve encouraging performance compared to the counterpart methods, as we have shown in the main text.
|
| 319 |
+
|
| 320 |
+
Table 8: Robustness analysis of ${K}_{u}$ on the CIFAR10 and 100 datasets with our TPP algorithm. In default, ${K}_{u} = {10}$ . Layerwise $\mathrm{{PR}} = {0.7}$ for ResNet56 and 0.5 for VGG19. The best is highlighted in red and the worst in blue.
|
| 321 |
+
|
| 322 |
+
<table><tr><td>Ku</td><td>1</td><td>5</td><td>10</td><td>15</td><td>20</td></tr><tr><td>Acc. (%, ResNet56)</td><td>92.24±0.03</td><td>92.42±0.14</td><td>92.35±0.12</td><td>92.50±0.10</td><td>92.31±0.16</td></tr><tr><td>Acc. (%, VGG19)</td><td>71.33±0.06</td><td>71.45±0.21</td><td>71.61±0.08</td><td>71.43±0.21</td><td>71.68±0.18</td></tr></table>
|
| 323 |
+
|
| 324 |
+
Table 9: Robustness analysis of $\Delta$ on the CIFAR10 and 100 datasets with our TPP algorithm. In default, $\Delta = {1e} - 4$ . Layerwise $\mathrm{{PR}} = {0.7}$ for ResNet56 and 0.5 for VGG19. The best is highlighted in red and the worst in blue.
|
| 325 |
+
|
| 326 |
+
<table><tr><td>Δ</td><td>1e-5</td><td>5e-5</td><td>1e-4</td><td>5e-4</td><td>1e-3</td></tr><tr><td>Acc. (%, ResNet56)</td><td>92.37±0.12</td><td>92.29±0.10</td><td>92.35±0.12</td><td>92.40±0.15</td><td>92.44±0.13</td></tr><tr><td>Acc. (%, VGG19)</td><td>71.39±0.19</td><td>71.37±0.14</td><td>71.61±0.08</td><td>71.58±0.31</td><td>71.25±0.31</td></tr></table>
|
| 327 |
+
|
| 328 |
+
# C ALGORITHM DETAILS
|
| 329 |
+
|
| 330 |
+
The details of our TPP method is summarized in Algorithm 1.
|
| 331 |
+
|
| 332 |
+
Algorithm 1 Trainability Preserving Pruning (TPP)
|
| 333 |
+
1: Input: Pretrained model $\Theta$ , layerwise pruning ratio $r_l$ of $l$ -th layer, for $l \in \{1, 2, \dots, L\}$ .
|
| 334 |
+
2: Input: Regularization ceiling $\tau$ , penalty coefficient update interval $K_u$ , penalty granularity $\Delta$ .
|
| 335 |
+
3: Init: Iteration $i = 0$ . $\lambda_j = 0$ for all filter $j$ . Set pruned filter indices $S_l$ by $L_1$ -norm sorting.
|
| 336 |
+
4: while $\lambda_j \leq \tau$ , for $j \in S_l$ do
|
| 337 |
+
5: if $i \% K_u = 0$ then
|
| 338 |
+
6: $\lambda_j = \lambda_j + \Delta$ for $j \in S_l$ . Update regularization co-efficient in Eq. (6)
|
| 339 |
+
7: end if
|
| 340 |
+
8: Network forward, loss (Eq. (6)) backward, parameter update by stochastic gradient descent.
|
| 341 |
+
9: Update iteration: $i = i + 1$ .
|
| 342 |
+
10: end while
|
| 343 |
+
11: Remove filters in $S_l$ to obtain a smaller model $\Theta'$ .
|
| 344 |
+
12: Retrain $\Theta'$ to regain accuracy.
|
| 345 |
+
13: Output: Retrained model $\Theta'$ .
|
| 346 |
+
|
| 347 |
+
# D RESULTS OMITTED FROM THE MAIN TEXT
|
| 348 |
+
|
| 349 |
+
Loss surface visualization. The loss surface visualization of ResNet56 on CIFAR10 is presented in Fig. 4.
|
| 350 |
+
|
| 351 |
+
VGG19 on CIFAR100. The results of VGG19 on CIFAR100 is shown in Tab. 10.
|
| 352 |
+
|
| 353 |
+
Examination of the early retraining phase. To further understand how pruning hurts trainability and how our TPP method maintains it, in Tab. 11, we list the mean JSVs of the first 10-epoch retraining (at pruning ratio 0.9). Note the obvious mean JSV gap between LR 0.01 and LR 0.001 without OrthP: LR 0.01 can reach mean JSV 0.65 just after 1 epoch of retraining while LR 0.001 takes over 8 epochs. When OrthP is used, this gap greatly shrinks. We also list the test accuracy of the first 10-epoch retraining in Tab. 12. Particularly note that the test accuracy correlates well with the mean JSV trend under each setting, implying that the damaged trainability primarily answers for the under-performance of LR 0.001 after pruning.
|
| 354 |
+
|
| 355 |
+
Then, when TPP is used in place of OrthP, we can see after 1-epoch retraining, the model can achieve mean JSV above 1 and test accuracy over $90\%$ , which are particularly similar to the effect of using OrthP. These re-iterate that the proposed TPP method can work just as effectively as the ground-truth trainability recovery method OrthP on this toy setup.
|
| 356 |
+
|
| 357 |
+

|
| 358 |
+
(a) $L_{1}$ (Li et al., 2017)
|
| 359 |
+
|
| 360 |
+

|
| 361 |
+
Figure 4: Loss surface visualization of pruned models by different methods (w/o retraining). ResNet56 on CIFAR10. Pruning ratio: 0.9 (zoom in to examine the details).
|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
(b) GReg-2 (Wang et al., 2021b)
|
| 365 |
+
(c) TPP (ours)
|
| 366 |
+
|
| 367 |
+
Table 10: Test accuracy (\%) comparison among different dynamical isometry maintenance or recovery methods on VGG19 on CIFAR100. Scratch stands for training from scratch. KernOrth means Kernel Orthogonalization (Xie et al., 2017); OrthConv means Convolutional Orthogonalization (Wang et al., 2020). Two retraining LR schedules are evaluated here: initial LR $1e-2$ vs. $1e-3$ . Acc. diff. refers to the accuracy gap of LR $1e-3$ against LR $1e-2$ .
|
| 368 |
+
|
| 369 |
+
<table><tr><td colspan="6">VGG19 on CIFAR100: Unpruned acc. 74.02%, Params: 20.08M, FLOPs: 0.80G</td></tr><tr><td>Layerwise PR</td><td>0.1</td><td>0.3</td><td>0.5</td><td>0.7</td><td>0.9</td></tr><tr><td>Sparsity/Speedup</td><td>19.24%/1.23×</td><td>51.01%/1.97×</td><td>74.87%/3.60×</td><td>90.98%/8.84×</td><td>98.96%/44.22×</td></tr><tr><td></td><td></td><td>Initial</td><td>retraining</td><td>LR 1e-2</td><td></td></tr><tr><td>Scratch</td><td>72.84 (0.25)</td><td>71.88 (0.14)</td><td>70.79 (0.08)</td><td>66.51 (0.11)</td><td>54.37 (0.40)</td></tr><tr><td>L1(Li et al., 2017)</td><td>74.01 (0.18)</td><td>73.01 (0.22)</td><td>71.49 (0.14)</td><td>66.05 (0.04)</td><td>51.36 (0.11)</td></tr><tr><td>L1+OrthP (Wang et al., 2021a)</td><td>74.00 (0.04)</td><td>72.30 (0.49)</td><td>68.09 (0.24)</td><td>62.22 (0.15)</td><td>48.07 (0.31)</td></tr><tr><td>L1+KernOrth (Xie et al., 2017)</td><td>73.72 (0.26)</td><td>72.53 (0.09)</td><td>71.23 (0.10)</td><td>65.90 (0.14)</td><td>50.75 (0.30)</td></tr><tr><td>L1+OrthConv (Wang et al., 2020)</td><td>73.18 (0.10)</td><td>72.25 (0.31)</td><td>70.82 (0.11)</td><td>64.51 (0.43)</td><td>48.31 (0.18)</td></tr><tr><td>KernOrth (Xie et al., 2017) + L1</td><td>73.73 (0.23)</td><td>72.41 (0.12)</td><td>70.31 (0.12)</td><td>64.10 (0.19)</td><td>50.72 (0.87)</td></tr><tr><td>OrthConv (Wang et al., 2020) + L1</td><td>73.55 (0.18)</td><td>72.67 (0.09)</td><td>71.24 (0.23)</td><td>65.66 (0.10)</td><td>50.53 (0.46)</td></tr><tr><td>TPP (ours)</td><td>74.02 (0.24)</td><td>73.19 (0.07)</td><td>71.61 (0.08)</td><td>67.78 (0.31)</td><td>57.70 (0.37)</td></tr><tr><td></td><td></td><td>Initial</td><td>retraining</td><td>LR 1e-3</td><td></td></tr><tr><td>L1(Li et al., 2017)</td><td>73.67 (0.05)</td><td>72.04 (0.12)</td><td>70.21 (0.02)</td><td>64.72 (0.17)</td><td>48.43 (0.44)</td></tr><tr><td>TPP (ours)</td><td>73.83 (0.02)</td><td>72.29 (0.07)</td><td>71.16 (0.12)</td><td>67.47 (0.17)</td><td>56.73 (0.34)</td></tr><tr><td>Acc. diff. (L1)</td><td>-0.34</td><td>-0.97</td><td>-1.28</td><td>-1.33</td><td>-2.93</td></tr><tr><td>Acc. diff. (TPP)</td><td>-0.19</td><td>-0.90</td><td>-0.45</td><td>-0.31</td><td>-0.97</td></tr></table>
|
| 370 |
+
|
| 371 |
+
# E MORE ANALYTICAL RESULTS
|
| 372 |
+
|
| 373 |
+
In this section, we add more analytical results to help readers better understand how TPP works.
|
| 374 |
+
|
| 375 |
+
# E.1 COMPARISON UNDER SIMILAR TOTAL EPOCHS
|
| 376 |
+
|
| 377 |
+
In Tab. 1, TPP takes a few epochs for regularized training before the sparsifying action, while $L_{1}$ pruning is one-shot, taking no epochs. Namely, the total training cost of TPP is larger than those one-shot methods. It is of interest how the comparison will change if these one-shot methods are given more epochs for training.
|
| 378 |
+
|
| 379 |
+
First, note that the results of $L_{1} + \mathrm{KernOrth}$ , $L_{1} + \mathrm{OrthConv}$ , $\mathrm{KernOrth} + L_{1}$ , and $\mathrm{OrthConv} + L_{1}$ in Tab. 1 also involve training (the KernOrth/OrthConv is essentially a regularized training), which takes 50k iterations. We make following changes:
|
| 380 |
+
|
| 381 |
+
- Our TPP takes 100k iterations based on the default hyper-parameter setup, so to make a fair comparison, we decrease the regularization update interval $K_{u}$ from 10 to 5, making the regularization of TPP also take 50k iterations.
|
| 382 |
+
- Meanwhile, we add 128 retraining epochs (50k / 391 iters per epoch ≈ 128 epochs) to the $L_{1}$ and $L_{1} + \mathrm{OrthP}$ methods (when their retraining epochs are increased, the LR decay epochs are proportionally scaled); plus the original 120 epochs, the total epochs are 248 now.
|
| 383 |
+
|
| 384 |
+
Table 11: Mean JSV of the first 10 epochs under different retraining settings. Epoch 0 refers to the model just pruned, before any retraining. Pruning ratio is 0.9. Note, with OrthP, the mean JSV is 1 because OrthP can achieve exact isometry
|
| 385 |
+
|
| 386 |
+
<table><tr><td>Epoch</td><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td>10</td></tr><tr><td>LR=10-2, w/o OrthP</td><td>0.00</td><td>0.65</td><td>0.89</td><td>1.01</td><td>0.97</td><td>1.10</td><td>1.14</td><td>1.27</td><td>1.33</td><td>1.29</td><td>1.42</td></tr><tr><td>LR=10-3, w/o OrthP</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.10</td><td>0.25</td><td>0.35</td><td>0.42</td><td>0.51</td><td>0.74</td><td>0.86</td></tr><tr><td>LR=10-2, w/ OrthP</td><td>1.00</td><td>1.23</td><td>1.42</td><td>1.43</td><td>1.40</td><td>1.47</td><td>1.51</td><td>1.56</td><td>1.60</td><td>1.65</td><td>1.68</td></tr><tr><td>LR=10-3, w/ OrthP</td><td>1.00</td><td>1.50</td><td>1.64</td><td>1.73</td><td>1.84</td><td>1.87</td><td>1.93</td><td>1.96</td><td>1.99</td><td>2.00</td><td>2.04</td></tr><tr><td>LR=10-2, w/ TPP (ours)</td><td>2.98</td><td>2.15</td><td>2.01</td><td>1.67</td><td>1.97</td><td>2.10</td><td>1.97</td><td>2.08</td><td>2.05</td><td>2.06</td><td>2.06</td></tr><tr><td>LR=10-3, w/ TPP (ours)</td><td>2.98</td><td>2.96</td><td>2.95</td><td>2.99</td><td>2.99</td><td>3.01</td><td>3.21</td><td>3.19</td><td>3.05</td><td>3.04</td><td>3.02</td></tr></table>
|
| 387 |
+
|
| 388 |
+
Table 12: Test accuracy (\%) of the first 10 epochs corresponding to Tab. 11 under different retraining settings. Epoch 0 refers to the model just pruned, before any retraining. Pruning ratio is 0.9.
|
| 389 |
+
|
| 390 |
+
<table><tr><td>Epoch</td><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td>10</td></tr><tr><td>LR=10-2, w/o OrthP</td><td>9.74</td><td>64.34</td><td>80.01</td><td>80.23</td><td>81.77</td><td>85.80</td><td>85.82</td><td>86.21</td><td>86.35</td><td>86.60</td><td>86.15</td></tr><tr><td>LR=10-3, w/o OrthP</td><td>9.74</td><td>9.74</td><td>9.74</td><td>11.89</td><td>21.34</td><td>27.75</td><td>32.96</td><td>35.38</td><td>49.66</td><td>64.89</td><td>68.97</td></tr><tr><td>LR=10-2, w/ OrthP</td><td>9.74</td><td>91.05</td><td>91.39</td><td>91.33</td><td>91.37</td><td>91.74</td><td>91.69</td><td>90.74</td><td>91.39</td><td>91.58</td><td>91.44</td></tr><tr><td>LR=10-3, w/ OrthP</td><td>9.74</td><td>90.81</td><td>91.59</td><td>91.77</td><td>91.85</td><td>92.04</td><td>92.12</td><td>92.22</td><td>92.12</td><td>92.33</td><td>92.25</td></tr><tr><td>LR=10-2, w/ TPP (ours)</td><td>89.21</td><td>91.54</td><td>91.01</td><td>91.45</td><td>91.83</td><td>91.56</td><td>90.89</td><td>91.33</td><td>90.68</td><td>91.54</td><td>91.21</td></tr><tr><td>LR=10-3, w/ TPP (ours)</td><td>89.21</td><td>92.12</td><td>91.82</td><td>92.09</td><td>92.15</td><td>91.95</td><td>92.00</td><td>92.02</td><td>92.09</td><td>92.08</td><td>92.08</td></tr></table>
|
| 391 |
+
|
| 392 |
+
Now all the comparison methods in Tab. 1 have the same training cost (i.e., the same 248 total epochs). The new results of $L_{1}$ , $L_{1} + \mathrm{OrthP}$ , and TPP under this strict comparison setup are presented below:
|
| 393 |
+
|
| 394 |
+
Table 13: Test accuracy comparison under the same total epochs (ResNet56 on CIFAR10).
|
| 395 |
+
|
| 396 |
+
<table><tr><td>Layerwise PR</td><td>0.3</td><td>0.5</td><td>0.7</td><td>0.9</td><td>0.95</td></tr><tr><td>L1(Li et al., 2017)</td><td>93.65 (0.14)</td><td>93.38 (0.16)</td><td>92.11 (0.16)</td><td>87.17 (0.26)</td><td>83.94 (0.45)</td></tr><tr><td>L1(Li et al., 2017)+OrthP</td><td>93.58 (0.03)</td><td>93.30 (0.10)</td><td>91.69 (0.13)</td><td>85.75 (0.26)</td><td>82.30 (0.20)</td></tr><tr><td>TPP (ours)</td><td>93.76 (0.10)</td><td>93.45 (0.05)</td><td>92.42 (0.14)</td><td>89.54 (0.08)</td><td>85.98 (0.29)</td></tr></table>
|
| 397 |
+
|
| 398 |
+
We make the following observations:
|
| 399 |
+
|
| 400 |
+
- For $L_{1}$ pruning, more retraining epochs do not always help. Comparing these results to Tab. 1, we may notice at small PR (0.3, 0.5), the accuracy drops a little (this probably is due to overfitting – when the PR is small, the pruned model does not need so many epochs to recover; while too long training triggers overfitting). For larger PR (like 0.95), more epochs help quite significantly (improving the accuracy by 0.91%).
|
| 401 |
+
- $L_{1} + \mathrm{OrthP}$ still underperforms $L_{1}$ , same as in Tab. 1.
|
| 402 |
+
- Despite using fewer epochs, TPP is still pretty robust - Compared to Tab. 1, the performance varies by a very marginal gap ( $0.1\%$ , within the std range, so not a statistically significant gap). In general, TPP is still the best among all the compared methods, and, the advantage is more obvious at larger PRs, implying TPP is more valuable in more aggressive pruning cases.
|
| 403 |
+
|
| 404 |
+
# E.2 TPP + Random BASE MODELS
|
| 405 |
+
|
| 406 |
+
Pruning is typically conducted on a pretrained model. TPP is also brought up in this context. This said, the fundamental idea of TPP, i.e., the proposed regularized training which can preserve trainability from the sparsifying action, is actually independent of the base model. Therefore, we may expect TPP can also surpass the $L_{1}$ pruning method (Li et al., 2017) in the case of pruning a random model – this is confirmed in Tab. 14. As expected, the damaged trainability problem does not
|
| 407 |
+
|
| 408 |
+
only exit for pruning pretrained models, but also exits for pruning random models; so TPP performs better than $L_{1}$ pruning, especially in the large sparsity regime.
|
| 409 |
+
|
| 410 |
+
Table 14: Test accuracy of applying TPP vs. $L_{1}$ pruning (Li et al., 2017) to random base model.
|
| 411 |
+
|
| 412 |
+
<table><tr><td colspan="6">ResNet56 on CIFAR10: Unpruned acc. 93.78%, Params: 0.85M, FLOPs: 0.25G</td></tr><tr><td>Layerwise PR</td><td>0.3</td><td>0.5</td><td>0.7</td><td>0.9</td><td>0.95</td></tr><tr><td>L1 pruning (Li et al., 2017)</td><td>88.98 (0.13)</td><td>88.79 (0.18)</td><td>87.60 (0.21)</td><td>85.09 (0.09)</td><td>82.68 (0.32)</td></tr><tr><td>TPP (ours)</td><td>91.93 (0.13)</td><td>91.27 (0.16)</td><td>90.36 (0.16)</td><td>87.60 (0.09)</td><td>85.36 (0.18)</td></tr></table>
|
| 413 |
+
|
| 414 |
+
Moreover, in Fig. 5, we show the mean JSV and test accuracy when pruning a random model with different schemes $(L_{1}, L_{1} + \mathrm{OrthP}$ , and our TPP). We observe that, in general, the JSV and test accuracy of pruning a random model pose similar patterns to pruning a pretrained model (Fig. 3):
|
| 415 |
+
|
| 416 |
+
- Using $L_{1}$ , the $\mathrm{LR} = 0.01$ achieves "better" (not really better, but due to damaged trainability, the performance of $\mathrm{LR} = 0.001$ has been underestimated, see Wang et al. (2023) for more detailed discussions) JSV and test accuracy; note the test accuracy solid line is above the dashed line by an obvious margin.
|
| 417 |
+
- While using $L_{1} + \mathrm{OrthP}$ or our TPP, the LR=0.001 can actually match LR=0.01. Same as the case of pruning a pretrained model, here, TPP behaves similarly to the oracle trainability recovery method OrthP.
|
| 418 |
+
- To summarize, the trainability-preserving effect of TPP also generalizes to the case of pruning random networks.
|
| 419 |
+
|
| 420 |
+

|
| 421 |
+
|
| 422 |
+

|
| 423 |
+
87.86/82.47/0.0002
|
| 424 |
+
(a) $L_{1}$
|
| 425 |
+
Figure 5: Mean JSV and test accuracy during retraining with different methods (network: MLP-7-Linear, dataset: MNIST) when pruning a random model. Below each plot are, in order, the best accuracy of LR 0.01, the best accuracy of LR 0.001, and the mean JSV right after pruning (i.e., without retraining).
|
| 426 |
+
In Fig. 6, we show the mean JSV and test accuracy over the overall process (including the regularized training and retraining) of TPP applied to a random model.
|
| 427 |
+
|
| 428 |
+

|
| 429 |
+
|
| 430 |
+

|
| 431 |
+
92.78/92.71/1.0000
|
| 432 |
+
(b) $L_{1} + \mathrm{OrthP}$
|
| 433 |
+
|
| 434 |
+

|
| 435 |
+
|
| 436 |
+

|
| 437 |
+
92.82/92.71/2.3617
|
| 438 |
+
(c) TPP (ours)
|
| 439 |
+
|
| 440 |
+
# E.3 TPP + MORE ADVANCED PRUNING CRITERIA
|
| 441 |
+
|
| 442 |
+
In the main text, we demonstrate the effectiveness of TPP, which employs the magnitude $(L_{1}$ -norm) of filters to decide masks. It is of interest whether the effectiveness of TPP can carry over to other more advanced pruning criteria. Here we combine TPP with other 3 more advanced criteria: SSL (Wen et al., 2016), DeepHoyer (Yang et al., 2020), and Taylor-FO (Molchanov et al., 2019). SSL and DeepHoyer are regularization-based pruning methods like ours; differently, their layerwise pruned indices (as well as the pruning ratio) is not pre-specified, but "learned" by the regularized training. As such, the layerwise pruning ratios are not uniform (see Fig. 7 for an example). Taylor-FO is a more complex pruning criterion than magnitude by taking into account the first-order gradient information.
|
| 443 |
+
|
| 444 |
+

|
| 445 |
+
|
| 446 |
+

|
| 447 |
+
Figure 6: Mean JSV and test accuracy during regularized training and retraining with TPP (network: MLP-7-Linear, dataset: MNIST) when pruning a random model. The model spends 100 epochs on regularized training (LR schedule at this stage is the same, 0.001, fixed); then pruned by $L_{1}$ pruning (Li et al., 2017); then retrained with different LR schedules (0.01 vs. 0.001). As seen, pruning the TPP regularized model does not make the mean JSV drop significantly at the pruning point (epoch 100), namely, trainability preserved. In stark contrast, when applying $L_{1}$ pruning to a normally trained (i.e., not TPP regularized) model, mean JSV drops from 2.4987 to 0.0040 (see Fig. 3), namely, trainability not preserved. Note, when trainability is preserved, retraining LR 0.01 and 0.001 do not pose obvious test accuracy gap; while trainability is not preserved, the gap would be obvious - see Fig. 5(a).
|
| 448 |
+
|
| 449 |
+
Specifically, we first use these methods to decide the layerwise pruned indices, given a total pruning ratio. Then, we inherit these layerwise pruned indices when using our TPP method.
|
| 450 |
+
|
| 451 |
+
Results are shown in Tab. 15. We observe, at small PRs (0.3-0.7), TPP performs similarly to $L_{1}$ . This agrees with Tab. 1, where TPP is comparable to $L_{1}$ . At large PRs (0.9, 0.95), the advantage of TPP starts to expose more – at PR 0.9/0.95, TPP beats $L_{1}$ by 0.9/1.44 with SSL learned pruned indices, which is a statistically significant advantage as indicated by the std (and again, when PR is larger, the advantage of TPP is generally more pronounced). This table shows the advantage of TPP indeed can carry over to other layerwise pruning ratios derived from more advanced pruning criteria.
|
| 452 |
+
|
| 453 |
+
# E.4 TRAINING CURVE PLOTS: TPP vs. $L_{1}$ PRUNING
|
| 454 |
+
|
| 455 |
+
In Fig. 8, we show the training curves of our TPP compared to $L_{1}$ pruning with ResNet56 on CIFAR10.
|
| 456 |
+
|
| 457 |
+
# E.5 PRUNING RESNET50 ON IMAGENET WITH LARGER SPARSITY RATIOS
|
| 458 |
+
|
| 459 |
+
It is noted that our method only beats some of the compared methods marginally ( $< 0.5\%$ top-1 accuracy) in low sparsity regime (around $2 \times \sim 3 \times$ speedup, see Tab. 2). This is mainly because when the sparsity is low, network trainability is not seriously damaged, thus our trainability-preserving method cannot fully expose its advantage. Here we showcase a scenario that trainability is intentionally damaged more dramatically.
|
| 460 |
+
|
| 461 |
+
In Tab. 2, when pruning ResNet50, researchers typically do not prune all the layers – the last CONV layer in a residual block is usually spared (Li et al., 2017; Wang et al., 2021b), for the sake of performance. Here we intentionally prune all the layers (only excluding the first CONV and last
|
| 462 |
+
|
| 463 |
+
Table 15: Comparison between $L_{1}$ pruning (Li et al., 2017) and our TPP with pruned indices derived from more advanced pruning criterion (Taylor-FO (Molchanov et al., 2019)) or regularization schemes (SSL (Wen et al., 2016), DeepHoyer (Yang et al., 2020)). Network/Dataset: ResNet56/CIFAR10: Unpruned acc. $93.78\%$ , Params: 0.85M, FLOPs: 0.25G. Total PR represents the pruning ratio (PR) of the whole network. Note, due to the non-uniform layerwise PRs, the speedup below, which depends on the feature map spatial size, can be quite different from each other, even under the same total PR.
|
| 464 |
+
|
| 465 |
+
<table><tr><td>Total PR</td><td>0.3</td><td>0.5</td><td>0.7</td><td>0.9</td><td>0.95</td></tr><tr><td>Sparsity/Speedup</td><td>22.86%/1.66×</td><td>47.09%/2.25×</td><td>72.28%/3.32×</td><td>92.49%/7.14×</td><td>95.87%/9.77×</td></tr><tr><td>L1(w/ SSL pruned indices)</td><td>93.87±0.02</td><td>93.47±0.04</td><td>92.76±0.15</td><td>89.00±0.13</td><td>84.75±0.21</td></tr><tr><td>TPP (w/ SSL layerwise pruned indices)</td><td>93.86±0.09</td><td>93.50±0.15</td><td>92.81±0.05</td><td>89.90±0.07</td><td>86.19±0.22</td></tr><tr><td>Sparsity/Speedup</td><td>26.87%/1.54×</td><td>52.11%/1.95×</td><td>76.50%/2.74×</td><td>93.21%/6.15×</td><td>96.02%/9.60×</td></tr><tr><td>L1(w/ DeepHoyer pruned indices)</td><td>93.81±0.09</td><td>93.81±0.10</td><td>92.33±0.06</td><td>87.61±0.15</td><td>84.27±0.10</td></tr><tr><td>TPP (w/ DeepHoyer pruned indices)</td><td>93.88±0.13</td><td>93.59±0.04</td><td>92.58±0.20</td><td>88.76±0.17</td><td>85.64±0.18</td></tr><tr><td>Sparsity/Speedup</td><td>31.21%/1.45×</td><td>49.94%/1.99×</td><td>70.75%/3.59×</td><td>90.66%/11.58×</td><td>95.41%/19.85×</td></tr><tr><td>L1(w/ Taylor-FO pruned indices)</td><td>93.91±0.11</td><td>93.50±0.05</td><td>92.24±0.12</td><td>87.28±0.32</td><td>83.31±0.43</td></tr><tr><td>TPP (w/ Taylor-FO pruned indices)</td><td>93.91±0.09</td><td>93.64±0.14</td><td>92.32±0.13</td><td>88.06±0.10</td><td>85.34±0.32</td></tr></table>
|
| 466 |
+
|
| 467 |
+

|
| 468 |
+
|
| 469 |
+

|
| 470 |
+
|
| 471 |
+

|
| 472 |
+
|
| 473 |
+

|
| 474 |
+
|
| 475 |
+

|
| 476 |
+
Figure 7: Layerwise pruning ratios learned by SSL (Wen et al., 2016) with ResNet56 on CIFAR10, given different total pruning ratios (indicated in the title of each sub-figure).
|
| 477 |
+
|
| 478 |
+
classifier FC) in ResNet50. For the $L_{1}$ pruning method (Li et al., 2017) (we report a stronger version re-implemented by Wang et al. (2023)), different layers are pruned independently since the layerwise pruning ratio has been specified. All the hyper-parameters of the retraining process are maintained the same for fair comparison, per the spirit brought up in Wang et al. (2023).
|
| 479 |
+
|
| 480 |
+

|
| 481 |
+
|
| 482 |
+

|
| 483 |
+
|
| 484 |
+

|
| 485 |
+
|
| 486 |
+

|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
Figure 8: Training curves during retraining with ResNet56 on CIFAR10 at different pruning ratios (PRs). We can observe that at large PRs (0.9, 0.95), TPP significantly accelerates the optimization in the comparison to $L_{1}$ (Li et al., 2017), because of better trainability preserved before retraining.
|
| 490 |
+
|
| 491 |
+
The results in Tab. 16 show that, when the trainability is impaired more, our TPP beats $L_{1}$ by 0.77 to 2.17 top-1 accuracy on ImageNet, much more significant than Tab. 2.
|
| 492 |
+
|
| 493 |
+
Table 16: Top-1 accuracy comparison between TPP and $L_{1}$ pruning with larger pruning ratios (PRs). All layers but the 1st CONV and last FC layer (including the downsample layers and the 3rd CONV in a residual block) are pruned. Uniform layerwise pruning ratio is used.
|
| 494 |
+
|
| 495 |
+
<table><tr><td>Layerwise PR</td><td>0.5</td><td>0.7</td><td>0.9</td><td>0.95</td></tr><tr><td>Sparsity/Speedup</td><td>72.94%/3.63×</td><td>89.34%/8.45×</td><td>98.25%/25.34×</td><td>99.34%/31.45×</td></tr><tr><td>L1(Li et al., 2017)</td><td>71.25</td><td>66.02</td><td>47.96</td><td>33.21</td></tr><tr><td>TPP(ours)</td><td>73.42 (+2.17)</td><td>68.16 (+2.14)</td><td>49.19 (+1.23)</td><td>33.98 (+0.77)</td></tr></table>
|
| 496 |
+
|
| 497 |
+
# F MORE DISCUSSIONS
|
| 498 |
+
|
| 499 |
+
Can TPP be useful for finding lottery ticket subnetwork in a filter level?
|
| 500 |
+
|
| 501 |
+
To our best knowledge, filter-level winning tickets (WT) are still hard to find even using the original LTH pipeline. Few attempts in this direction have succeeded – $E.g.$ , Chen et al. (2022) tried, but they can only achieve a bit marginal sparsity ( $30\%$ ) with filter-level WT (see their Fig. 3, ResNet50 on ImageNet) while weight-level WT typically can be found at over $90\%$ sparsity. This said, we do think this paper can contribute in that direction since preserving trainability is also a central issue in LTH, too.
|
2023/Trainability Preserving Neural Pruning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:391f3c6fd09086574957dd45b380d69a0a2e665ac1a06afdad56864eb42d8205
|
| 3 |
+
size 1480729
|
2023/Trainability Preserving Neural Pruning/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/76908929-7dc3-4c81-8727-8f6a4180bb3b_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/76908929-7dc3-4c81-8727-8f6a4180bb3b_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/76908929-7dc3-4c81-8727-8f6a4180bb3b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e004401ac6b2075ed77a8b44b4fe8ca21db31b9e07bdebb38ec6862a54830962
|
| 3 |
+
size 454276
|
2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/full.md
ADDED
|
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TRAINABLE WEIGHT AVERAGING: EFFICIENT TRAINING BY OPTIMIZING HISTORICAL SOLUTIONS
|
| 2 |
+
|
| 3 |
+
Tao Li $^{1}$ , Zhehao Huang $^{1}$ , Qinghua Tao $^{2}$ , Yingwen Wu $^{1}$ & Xiaolin Huang $^{*1}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Department of Automation, Shanghai Jiao Tong University $^{2}$ ESAT-STADIUS, KU Leuven
|
| 6 |
+
|
| 7 |
+
# ABSTRACT
|
| 8 |
+
|
| 9 |
+
Stochastic gradient descent (SGD) and its variants are considered as the de-facto methods to train deep neural networks (DNNs). While recent improvements to SGD mainly focus on the descent algorithm itself, few works pay attention to utilizing the historical solutions—as an iterative method, SGD has gone through substantial explorations before convergence. Recently, an interesting attempt is stochastic weight averaging (SWA), which significantly improves the generalization by simply averaging the solutions at the tail stage of training. In this paper, we realize that the averaging coefficients could be determined in a trainable manner and propose Trainable Weight Averaging (TWA), a novel optimization method in the reduced subspace spanned by historical solutions. TWA has much greater flexibility and can be applied to the head stage of training to achieve training efficiency while preserving good generalization capability. Further, we propose a distributed training scheme to resolve the memory burden of large-scale training with efficient parallel computation. In the extensive numerical experiments, (i) TWA achieves consistent improvements over SWA with less sensitivity to learning rate; (ii) applying TWA in the head stage of training largely speeds up the convergence, resulting in over $40\%$ time saving on CIFAR and $30\%$ on ImageNet with improved generalization compared with regular training. The code of implementation is available https://github.com/nblt/TWA.
|
| 10 |
+
|
| 11 |
+
# 1 INTRODUCTION
|
| 12 |
+
|
| 13 |
+
Training deep neural networks (DNNs) usually requires a large amount of time. As the sizes of models and datasets grow larger, more efficient optimization methods together with better performance are increasingly demanded. In the existing works, great efforts have been made to improve the efficiency of stochastic gradient descent (SGD) and its variants, which mainly focus on adaptive learning rates (Duchi et al., 2011; Zeiler, 2012; Kingma & Ba, 2015; Loshchilov & Hutter, 2019; Yao et al., 2021; Heo et al., 2021) or accelerated schemes (Polyak, 1964; Nesterov, 1983; 1988; 2003). As an iterative descent method, SGD generates a series of solutions during optimization. These historical solutions provide dynamic information about the training and have brought many interesting perspectives, e.g., trajectory (Li et al., 2022), landscape (Garipov et al., 2018), to name a few. In fact, they can also be utilized to improve the training performance, resulting in the so-called stochastic weight averaging (SWA) (Izmailov et al., 2018), which shows significantly better generalization by simply averaging the tail stage explorations of SGD. A similar idea could be found in Szegedy et al. (2016), which designs an exponential moving average (EMA) and considers that a heuristic strategy could be better than equivalently averaging. The success of SWA and EMA encourages more in-depth investigations on the roles of historical solutions obtained during the training (Athiwaratkun et al., 2018; Nikishin et al., 2018; Yang et al., 2019).
|
| 14 |
+
|
| 15 |
+
In this paper, our main purpose is to utilize historical solutions by optimizing them, rather than using fixed averaging (e.g., SWA) or a heuristic combination (e.g., EMA). With such an optimized averaging scheme, we can achieve higher accuracy using only the solutions in a relatively early state (i.e. the head stage). In other words, we speed up the training and meanwhile improve the performance. The idea of utilizing these early solutions in DNNs' training is mainly inspired by two facts. On the
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: TWA intuition.
|
| 19 |
+
|
| 20 |
+
Input: Sampled weights $\{\pmb{w}_i\}_{i=1}^n$ , Batch size $b$ , Loss function $L: \mathcal{W} \times \mathcal{X} \times \mathcal{Y} \to \mathbb{R}_+$ , Learning rate $\eta$ .
|
| 21 |
+
|
| 22 |
+
Output: Model trained with TWA
|
| 23 |
+
|
| 24 |
+
Orthogonalize $\{\pmb{w}_i\}_{i=1}^n$ to $\{\pmb{e}_i\}_{i=1}^n$ ;
|
| 25 |
+
|
| 26 |
+
Initialize $\pmb{w}_{\mathrm{twa}}^{(0)}$ $t = 0$ $\pmb {P} = [e_1,e_2,\dots ,e_n]$
|
| 27 |
+
|
| 28 |
+
while not converged do
|
| 29 |
+
|
| 30 |
+
Sample batch data: $\mathcal{B} = \{(x_k,y_k)\}_{k = 1}^b$
|
| 31 |
+
|
| 32 |
+
Compute gradient: $\pmb {g} = \nabla_{\pmb{w}}L_{B}(\pmb{w}_{\mathrm{twa}}^{(t)});$
|
| 33 |
+
|
| 34 |
+
Update weights: $\pmb{w}_{\mathrm{twa}}^{(t + 1)} = \pmb{w}_{\mathrm{twa}}^{(t)} - \eta \pmb {P}(\pmb{P}^{\top}\pmb {g})$ $t = t + 1$
|
| 35 |
+
|
| 36 |
+
end while
|
| 37 |
+
|
| 38 |
+
Return $\pmb{w}_{\mathrm{twa}}^{(t)}$
|
| 39 |
+
|
| 40 |
+
Algorithm 1: TWA algorithm.
|
| 41 |
+
|
| 42 |
+
one hand, high test accuracy commonly starts appearing at an early stage. For example, a PreAct ResNet-164 model (He et al., 2016) achieves over $80\%$ test accuracy within 10 training epochs on CIFAR-10 (Krizhevsky & Hinton, 2009), while requiring to complete the whole 200 epochs to reach its final $95\%$ accuracy. This observation also coincides with the recent findings that the key connectivity patterns of DNNs emerge early in training (You et al., 2020; Frankle et al., 2020), indicating a well-explored solution space formed. On the other hand, simply averaging the solutions collected at the SWA stage immediately provides a huge accuracy improvement, e.g. over $16\%$ on CIFAR-100 with Wide ResNet-28-10 (Zagoruyko & Komodakis, 2016) than before averaging (Izmailov et al., 2018). These facts point out a promising direction that sufficiently utilizing these early explorations may be capable of quickly composing the final solution while obtaining good accuracy.
|
| 43 |
+
|
| 44 |
+
As the model parameters go through a rapid evolution at the early stage of training, a simple averaging strategy with fixed weighting coefficients as in SWA and EMA can result in large estimation errors. We introduce a Trainable Weight Averaging (TWA), which allows explicit adjustments for the averaging coefficients in a trainable manner. Specifically, we construct a subspace that contains all sampled solutions during the training and then conduct efficient optimization therein. As optimization in such a subspace takes into account all possible averaging choices, we are able to adaptively search for a good set of averaging coefficients regardless of the quality of sampled solutions and largely reduce the estimation errors. The proposed optimization scheme is essentially the gradient projection onto a tiny subspace. Hence, the degree of freedom for training is substantially reduced from the original millions to dozens or hundreds (equal to the dimension of the subspace), making TWA enjoy fast convergence and meanwhile immune to overfitting, the latter explains that better training accuracy of TWA over SWA or EMA can lead to better test accuracy. In extensive experiments with various network architectures on different tasks, we reach superior performance with TWA applied to the head stage of training. For instance, we attain $1.5 \sim 2.2\%$ accuracy improvement on CIFAR-100 and $0.1\%$ on ImageNet with over $40\%$ and $30\%$ training epochs reduced, respectively, compared with the regular training.
|
| 45 |
+
|
| 46 |
+
In summary, we make the following contributions:
|
| 47 |
+
|
| 48 |
+
- We propose Trainable Weight Averaging (TWA) that allows the averaging coefficients determined in a trainable manner instead of a pre-defined strategy. It brings consistent improvements over SWA or EMA with reduced estimation error.
|
| 49 |
+
- We successfully apply TWA to the head stage of training, resulting in a great time saving (e.g. over $40\%$ on CIFAR and $30\%$ on ImageNet) compared to regular training along with improved performance and reduced generalization gap.
|
| 50 |
+
- Our TWA is easy to implement and can be flexibly plugged into different stages of training to bring consistent improvements. It provides a new scheme for achieving efficient DNNs' training by sufficiently utilizing historical explorations.
|
| 51 |
+
|
| 52 |
+
# 2 METHOD
|
| 53 |
+
|
| 54 |
+
In this section, we first formulate the optimization target of TWA. Then a detailed training algorithm is introduced, which consists of two phases: Schmidt orthogonalization and projected optimization.
|
| 55 |
+
|
| 56 |
+
Note in this paper, the model's weights are aligned as a vector, i.e., $\pmb{w} \in \mathbb{R}^{D}$ , where $D$ denotes the number of parameters.
|
| 57 |
+
|
| 58 |
+
# 2.1 OPTIMIZATION TARGET
|
| 59 |
+
|
| 60 |
+
In SWA, weight averaging is simply given by $\boldsymbol{w}_{\mathrm{swa}} = \frac{1}{n}\sum_{i=1}^{n}\boldsymbol{w}_i$ , where $n$ solutions of the network collected at the tail of training are equally weighted. Such an averaging strategy has been proven quite effective with improved generalization ability. However, equally averaging could not always be a perfect solution, which motivates some heuristic modifications on weighting strategy, e.g., EMA. Both SWA and EMA are fixed averaging strategies, which may not adequately adapt to the head stage of training and would result in estimation errors, due to the fact that early historical solutions have not stepped into a stationary distribution.
|
| 61 |
+
|
| 62 |
+
In this paper, we propose to optimize the averaging coefficients of different weights with the hope of reducing the corresponding estimation error. Specifically, the set of possible TWA solutions considered, i.e., $\boldsymbol{w}_{\mathrm{twa}}$ , can be represented as follows:
|
| 63 |
+
|
| 64 |
+
$$
|
| 65 |
+
A = \left\{\alpha_ {1} \boldsymbol {w} _ {1} + \alpha_ {2} \boldsymbol {w} _ {2} + \dots + \alpha_ {n} \boldsymbol {w} _ {n} \mid \alpha_ {i} \in \mathbb {R} \right\}. \tag {1}
|
| 66 |
+
$$
|
| 67 |
+
|
| 68 |
+
The weight vectors between consecutive solutions could have a high cosine similarity. To decouple them and for better optimization, we will further orthogonalize $\{\pmb{w}_i\}_{i=1}^n$ and find a set of orthogonal bases $\{e_i\}_{i=1}^n$ to support the solution space, i.e., $A = \{\beta_1 e_1 + \beta_2 e_2 + \dots + \beta_n e_n \mid \beta_i \in \mathbb{R}\}$ . Then we search for a good solution $\pmb{w}_{\mathrm{twa}}$ in $A$ by optimizing the following problem,
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\min _ {\beta_ {1}, \beta_ {2}, \dots , \beta_ {n}} \mathbb {E} _ {(\boldsymbol {x}, \boldsymbol {y}) \sim \mathcal {D}} [ \mathcal {L} (f (\boldsymbol {w} _ {\mathrm {t w a}}; \boldsymbol {x}), \boldsymbol {y}) ] + \frac {\lambda}{2} \sum_ {i = 1} ^ {n} \beta_ {i} ^ {2}, \tag {2}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\begin{array}{l} \text {s . t .} \quad \boldsymbol {w} _ {\mathrm {t w a}} = \beta_ {1} \boldsymbol {e} _ {1} + \beta_ {2} \boldsymbol {e} _ {2} + \dots + \beta_ {n} \boldsymbol {e} _ {n}, \end{array}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
where $\mathcal{L}(\cdot ,\cdot)$ is the loss function as in regular training and the second term is a regularization coefficient $\lambda >0$ . Note that both SWA and EMA are special solutions of (2) without optimization.
|
| 79 |
+
|
| 80 |
+
Optimizing over $\beta_{i}$ brings benefits in the view of training loss, and a good generalization ability could also be expected: in regular training, the number of optimization variables is $D$ , which is very large, but in (2), there are only $n$ averaging coefficients $\{\beta_i\}_{i = 1}^n$ to be optimized. The significant dimensionality reduction could benefit better generalization.
|
| 81 |
+
|
| 82 |
+
# 2.2 TRAINING ALGORITHM
|
| 83 |
+
|
| 84 |
+
Instead of directly optimizing $\beta_{i}$ , we note that there exists a bijection between the coefficient space $\{\beta_{i}\}_{i = 1}^{n}\in \mathbb{R}^{n}$ and the parameter space $\mathbb{R}^D$ , i.e., each set of the coefficients is uniquely mapped to one point in the parameter space, which forms a complete subspace (with dimensionality $n$ ). We could alternatively optimize these coefficients in such a subspace.
|
| 85 |
+
|
| 86 |
+
We first focus on finding a set of orthogonal bases $\{e_i\}_{i = 1}^n$ to span the subspace that covers $\{\pmb {w}_i\}_{i = 1}^n$ . This is a standard Schmidt orthogonalization, which sequentially takes the following steps:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\left\{ \begin{array}{l} \boldsymbol {e} _ {k} = \boldsymbol {w} _ {k} - \left(\boldsymbol {w} _ {k} ^ {\top} \boldsymbol {e} _ {1}\right) \boldsymbol {e} _ {1} - \left(\boldsymbol {w} _ {k} ^ {\top} \boldsymbol {e} _ {2}\right) \boldsymbol {e} _ {2} - \dots - \left(\boldsymbol {w} _ {k} ^ {\top} \boldsymbol {e} _ {k - 1}\right) \boldsymbol {e} _ {k - 1}, \\ \boldsymbol {e} _ {k} = \boldsymbol {e} _ {k} / \| \boldsymbol {e} _ {k} \| _ {2}. \end{array} \right. \tag {3}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
We then initialize the $\boldsymbol{w}_{\mathrm{twa}}^{(0)}$ as one point in the subspace (e.g. $\frac{1}{n}\sum_{i=1}^{n}\boldsymbol{w}_i$ ), and optimize the network's parameters therein. Let $\boldsymbol{P} = [e_1, e_2, \dots, e_n]$ , such optimization can be easily achieved by projecting the gradient onto the subspace via projection matrix $\boldsymbol{PP}^\top$ . We summarize the detailed training procedures of the proposed TWA in Algorithm 1 with an intuitive illustration in Figure 1. The detailed implementation is described in Appendix B.
|
| 93 |
+
|
| 94 |
+
# 3 OPTIMIZATION PROVIDES BETTER FLEXIBILITY
|
| 95 |
+
|
| 96 |
+
The key difference between SWA and TWA is that the averaging coefficients in TWA are determined in a trainable manner, or more precisely, are data-dependent. This potentially enables more precise estimation for the center minima and better tolerance for the outliers that are not aware by SWA.
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
Figure 2: An efficient parallel scheme for distributed training.
|
| 100 |
+
|
| 101 |
+
Notice that in the view of coefficient optimization, there is no essential difference between SWA and EMA, which both provide specific and data-independent solutions. Thus, we in the following only compare TWA with SWA.
|
| 102 |
+
|
| 103 |
+
Mandt et al. (2017) demonstrated that under appropriate assumptions, running SGD with a constant learning rate is equivalent to sampling from a stationary Gaussian distribution, and the variance of the distribution is controlled by the learning rate. Accordingly, we assume the solutions at the tail stage of SGD training are sampled from a Gaussian distribution $\mathcal{N}(\boldsymbol{\mu}, \boldsymbol{\Sigma})$ centered at the minimum $\boldsymbol{\mu}$ with covariance $\boldsymbol{\Sigma}$ . Approximately, the sampled solutions $\{\boldsymbol{w}_i\}_{i=1}^n$ are independent random variables from $\mathcal{N}(\boldsymbol{\mu}, \boldsymbol{\Sigma})$ , as long as there are sufficient iterations between adjacent samplings. SWA and TWA provide two estimators for the minimum $\boldsymbol{\mu}$ , i.e., $\boldsymbol{w}_{\mathrm{swa}}$ and $\boldsymbol{w}_{\mathrm{twa}}$ . As an averaged solution, $\boldsymbol{w}_{\mathrm{swa}}$ has statistically better estimation than any single solution due to the effect of variance reduction, while $\boldsymbol{w}_{\mathrm{twa}}$ is approaching the center by minimizing the training loss. As long as the training loss serves as meaningful supervision (which holds under the typical assumption that $\boldsymbol{\mu}$ is the center minimum with lowest training loss Mandt et al. (2017); Izmailov et al. (2018)), $\boldsymbol{w}_{\mathrm{twa}}$ could approach $\boldsymbol{\mu}$ better than $\boldsymbol{w}_{\mathrm{swa}}$ with the posterior optimization for averaging coefficients. In this regard, $\boldsymbol{w}_{\mathrm{twa}}$ could have a lower expected variance: $\mathbb{E}\left(\|\boldsymbol{w}_{\mathrm{twa}} - \boldsymbol{\mu}\|_2^2\right) \leq \mathbb{E}\left(\|\boldsymbol{w}_{\mathrm{swa}} - \boldsymbol{\mu}\|_2^2\right)$ .
|
| 104 |
+
|
| 105 |
+
The advantages of optimizing averaging coefficients could be more prominent in the head stage of training, where the weights are going through a rapid evolution. A simple averaging strategy as SWA could introduce a large estimation error (as illustrated in Table 3 and 4), while TWA enables correcting it to a smaller estimation error via optimization. In fact, TWA provides much more flexibility to sufficiently utilize historical solutions and produces an optimized solution adaptively.
|
| 106 |
+
|
| 107 |
+
# 4 AN EFFICIENT IMPLEMENTATION FOR DISTRIBUTED TRAINING
|
| 108 |
+
|
| 109 |
+
The above discussion shows promising improvement by optimizing the historical solutions. The only issue one may worry about is the burden in storage (the additional time complexity is small as shown in Table 6). During optimization, TWA requires the projection matrix $P$ involving dozens or hundreds of historical weights, which indeed poses a challenge for large models on storage burden. It is preferable to locate $P$ in GPUs to enable efficient matrix operations. However, the size of $P$ increases as the model becomes larger, making it prohibitive to store in a single GPU. To cope with this, we design an efficient scheme with parallel distributed training to enable a) partition of the memory burden of $P$ into multiple GPUs and b) efficient parallel computation of gradient projection. As a result, we successfully optimize more than 900 historical solution coefficients for ResNet-50 on ImageNet task by 4 v100 GPUs. In our experiments, we use at most 300 historical solutions and there is still available space for larger tasks.
|
| 110 |
+
|
| 111 |
+
Suppose that there are $k$ GPUs for parallel training. We first uniformly divide $P$ into $k$ sub-matrices as $P = [P_{1}, P_{2}, \dots, P_{k}]$ , where each GPU stores a local sub-matrix $P_{i}$ , $i = 1, \dots, k$ . Recall that for an iteration in distributed training, each GPU computes a local gradient $g_{i}$ and synchronizes it with other GPUs to obtain the global gradient through an efficient all-reduce operation (Rabenseifner, 2004). We mimic such a process for gradient projection: the local projected gradient $P_{i} P_{i}^{\top} g$ is firstly computed in each card and then synchronized with others to obtain the global projected gradient with another all-reduce operation. We illustrate such a process in Figure 2.
|
| 112 |
+
|
| 113 |
+
For averaging $n$ historical solutions with per size $B$ , the memory burden for each GPU card is reduced to $\lceil n / k \rceil B$ , while the computation of gradient projection is also reduced to $\mathcal{O}(\lceil n / k \rceil D)$ ( $D$
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
Figure 3: Performance comparisons on before and after TWA w.r.t. different epochs of weights used. "SGD final" indicates the accuracy reached by regular SGD training and "TWA" is the corresponding accuracy reached by Algorithm 1 with these epochs of weights. The final accuracy of SGD training is plotted for reference. TWA dramatically lifts the SGD accuracy and outperforms the final accuracy of SGD within 100 epochs. The experiments are repeated over 3 trials.
|
| 117 |
+
|
| 118 |
+
is the number of the parameters). Hence, we can achieve efficient TWA training by making full use of the remaining memory of each GPU aside the forward/backward training.
|
| 119 |
+
|
| 120 |
+
# 5 EXPERIMENTS
|
| 121 |
+
|
| 122 |
+
In this section, a series of numerical experiments are conducted, demonstrating the effectiveness of our proposed TWA for fast convergence and better performance. First, we show that TWA improves SWA in the existing SWA settings, i.e., used in the tail stage of training. Second, we apply TWA to the head stage of training, which brings significant efficiency improvements together with better performance. Then, we visualize loss / accuracy surfaces to demonstrate the improvements of TWA.
|
| 123 |
+
|
| 124 |
+
# 5.1 EXPERIMENTAL SETTINGS
|
| 125 |
+
|
| 126 |
+
Datasets. We experiment over three benchmark image datasets, i.e., CIFAR-10, CIFAR-100 (Krizhevsky & Hinton, 2009), and ImageNet (Deng et al., 2009). Following prior works (Izmailov et al., 2018; Yang et al., 2019), we apply standard preprocessing for experiments on CIFAR datasets, and adopt the preprocessing and data augmentation procedures in the public Pytorch example on ImageNet (Paszke et al., 2017).
|
| 127 |
+
|
| 128 |
+
Architectures. We use two representative architectures, VGG-16 (Simonyan & Zisserman, 2014) and PreAct ResNet-164 (He et al., 2016) on CIFAR experiments. For ImageNet, we use ResNet-18 and ResNet-50 (He et al., 2016).
|
| 129 |
+
|
| 130 |
+
Training. The main body of experiments contains two parts: (1) for the tail stage of training, we use the same hyper-parameters as in SWA (Izmailov et al., 2018) and then a larger tail learning rate is also tried. (2) for the head stage of training, we adopt the standard training protocol with a step-wise learning rate. For CIFAR, we run all experiments with 3 seeds and report the mean test accuracy. We use SGD optimizer with momentum 0.9, weight decay $10^{-4}$ , and batch size 128. We train the models for 200 epochs with an initial learning rate 0.1 and decay it by 10 at the 100th and the 150th epochs. For ImageNet, we follow official PyTorch implementation<sup>1</sup>. For TWA, we sample solutions once after each epoch training for CIFAR and uniformly sample 5 times per epoch for ImageNet. We use a scaled learning rate (Figure 4), which takes 10 epochs of training for CIFAR and 2 epochs for ImageNet for fast convergence. The regularization coefficient $\lambda$ defaults to $10^{-5}$ . More details (including the number of the historical solutions used) could be found in Appendix A.
|
| 131 |
+
|
| 132 |
+
# 5.2 IMPROVING SWA SOLUTIONS
|
| 133 |
+
|
| 134 |
+
In this part, we compare SWA and TWA in the original SWA settings. Specifically, TWA and SWA use the same weights sampled from the tail stage of training. For CIFAR, we try two different tail learning rates: 0.05, the recommended one in (Izmailov et al., 2018), and 0.10, a larger one for the case with greater variance. The results in Table 1 show that TWA brings consistent improvements
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
Figure 4: Left: Scaled learning rate schedules with different scaling factors; Middle and Right: Test accuracy curves of TWA w.r.t. to different schedules on CIFAR-10/100. Training in subspace shows high robustness to scaled learning rate, which enlarges the learning rate and reduces the corresponding training epochs. In this way, TWA achieves very fast convergence.
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
|
| 143 |
+
over SWA. Especially when the learning rate is not well-tuned, SWA's performance suffers a distinctive drop, but TWA is less sensitive since the estimation error could be well controlled through training. For instance, in the case of CIFAR-100 with VGG-16 and a tail learning rate of 0.10, the estimation error of SWA substantially increases while TWA achieves a significant accuracy improvement, i.e., $4.18\%$ , over SWA. Note that for a fair comparison, TWA starts from the last sampled weights, not the averaged solution of SWA.
|
| 144 |
+
|
| 145 |
+
Table 1: Test accuracy (%) on CIFAR-10/100 for tail training with different learning rates
|
| 146 |
+
|
| 147 |
+
<table><tr><td rowspan="2">DATASET</td><td rowspan="2">MODEL</td><td colspan="2">SWA_LR = 0.05</td><td colspan="2">SWA_LR = 0.10</td></tr><tr><td>SWA</td><td>TWA (+10)</td><td>SWA</td><td>TWA (+10)</td></tr><tr><td rowspan="2">CIFAR-10</td><td>VGG16</td><td>94.01 ± 0.04</td><td>94.16 ± 0.14</td><td>91.03 ± 0.14</td><td>92.41 ± 0.15</td></tr><tr><td>PRERESNET-164</td><td>95.58 ± 0.09</td><td>95.65 ± 0.13</td><td>91.58 ± 0.45</td><td>92.61 ± 0.09</td></tr><tr><td rowspan="2">CIFAR-100</td><td>VGG16</td><td>74.71 ± 0.03</td><td>75.73 ± 0.18</td><td>65.52 ± 0.25</td><td>69.70 ± 0.45</td></tr><tr><td>PRERESNET-164</td><td>80.20 ± 0.41</td><td>80.35 ± 0.27</td><td>78.14 ± 0.48</td><td>78.87 ± 0.27</td></tr></table>
|
| 148 |
+
|
| 149 |
+
On ImageNet, we experiment with ResNet-18/50 (He et al., 2016). Following (Izmailov et al., 2018; Yang et al., 2019), we start from pre-trained models in torchvision.models and collect model weights by running SGD optimizer up to 10 epochs (with a constant learning rate 0.005). In Table 2, we report the test accuracy and observe that with more sampling epochs, both TWA and SWA achieve better performance. Notably, TWA performs better than SWA by $0.1 \sim 0.3\%$ , and such improvements are more obvious in the "5 EPOCHS" case. For example, using 5 epochs of sampled weights, TWA achieves $70.23\%$ accuracy with ResNet-18 and $76.78\%$ accuracy with ResNet-50, outperforming the SWA counterparts with 10 epochs. This indicates that TWA requires fewer historical solution samples to achieve a comparable or even better performance than SWA, due to its ability to reduce the estimation variance with optimized averaging coefficients.
|
| 150 |
+
|
| 151 |
+
Table 2: Top-1 accuracy (%) on ImageNet for tail training with different averaging epochs
|
| 152 |
+
|
| 153 |
+
<table><tr><td rowspan="2">MODEL</td><td rowspan="2">PRETRAINED</td><td colspan="2">5 EPOCHS</td><td colspan="2">10 EPOCHS</td></tr><tr><td>SWA</td><td>TWA (+1)</td><td>SWA</td><td>TWA (+1)</td></tr><tr><td>RESNET-18</td><td>69.76</td><td>70.02</td><td>70.28</td><td>70.12</td><td>70.32</td></tr><tr><td>RESNET-50</td><td>76.13</td><td>76.62</td><td>76.78</td><td>76.74</td><td>76.93</td></tr></table>
|
| 154 |
+
|
| 155 |
+
# 5.3 EFFICIENT TRAINING AND BETTER GENERALIZATION
|
| 156 |
+
|
| 157 |
+
In the head stage of training, SWA usually fails due to the large estimation variance from fast-evolving solutions and large learning rate. Since TWA could reduce the variance and be less sensitive to the learning rate, it can also be expected to work well in this stage. If so, it is promising to simultaneously attain generalization improvements and training efficiency.
|
| 158 |
+
|
| 159 |
+
We first investigate the experiments on CIFAR datasets. The original training schedule contains 200 epochs and we take the first 100 epoch explorations for TWA. The results are given in Table 3. It can be observed that TWA achieves better performance compared to the regular SGD training with a significantly reduced generalization gap. For instance, we attain $1.52\%$ accuracy improvement on CIFAR-100 with VGG-16 while the generalization gap is reduced by $9.56\%$ . This suggests that a better solution could already be composed using these historical solutions without further training by more delicate learning rates, which instead may bring overfitting problems and harm the generalization. By comparisons, we also apply SWA to average these samples, which shows degraded performance due to the existence of estimation error. Apart from the good performance in accuracy, TWA also manifests its great potential in improving the training efficiency: we use only 10 epochs to complete the convergence, while the regular SGD needs 100 epochs. As TWA and SGD have nearly the same computation overhead per epoch, the time-saving is around $45\%$ in TWA.
|
| 160 |
+
|
| 161 |
+
Table 3: Test accuracy (%) and generalization gap (%) on CIFAR-10/100 for the head training
|
| 162 |
+
|
| 163 |
+
<table><tr><td rowspan="2">DATASET</td><td rowspan="2">MODEL</td><td colspan="2">SGD (200 EPOCHS)</td><td>SWA (100 EPOCHS)</td><td colspan="2">TWA (100 + 10 EPOCHS)</td></tr><tr><td>ACCURACY</td><td>GAP</td><td>ACCURACY</td><td>ACCURACY</td><td>GAP</td></tr><tr><td rowspan="2">CIFAR-10</td><td>VGG16</td><td>93.54 ± 0.11</td><td>6.42</td><td>92.40 ± 0.08</td><td>93.79 ± 0.18</td><td>5.59 (↓ 0.83)</td></tr><tr><td>PRERESNET-164</td><td>95.11 ± 0.17</td><td>4.86</td><td>92.52 ± 0.04</td><td>95.19 ± 0.04</td><td>4.11 (↓ 0.75)</td></tr><tr><td rowspan="2">CIFAR-100</td><td>VGG16</td><td>72.72 ± 0.17</td><td>26.70</td><td>69.18 ± 0.24</td><td>74.24 ± 0.24</td><td>17.14 (↓ 9.56)</td></tr><tr><td>PRERESNET-164</td><td>75.85 ± 0.18</td><td>24.10</td><td>73.36 ± 0.34</td><td>78.11 ± 0.19</td><td>20.02 (↓ 4.08)</td></tr></table>
|
| 164 |
+
|
| 165 |
+
Generally, utilizing more epochs of explorations can provide a better estimation for the center minimum and hence lead to better performance. Then, we also study the impact of different averaging epochs, and the results are illustrated in Figure 3, where the final accuracy of SGD and the accuracy reached by SGD before averaging are also given for reference. It could be observed that the model's performance is consistently improved with more epochs of explorations. Notably, although each historical solution in a relatively short period of explorations is not good, satisfied solutions have already emerged in the subspace spanned by these solutions. Then through proper optimization in subspace, TWA could find them out, e.g., on CIFAR-100 with PreAct ResNet-164 model, averaging over 50 epochs via TWA has already matched the final performance of regular SGD training.
|
| 166 |
+
|
| 167 |
+
Table 4: Top-1 accuracy (%) and generalization gap (%) on ImageNet for head training
|
| 168 |
+
|
| 169 |
+
<table><tr><td rowspan="2">MODEL</td><td colspan="2">SGD (90 EPOCHS)</td><td>SWA (60 EPOCHS)</td><td colspan="2">TWA (60 + 2 EPOCHS)</td></tr><tr><td>ACCURACY</td><td>GAP</td><td>ACCURACY</td><td>ACCURACY</td><td>GAP</td></tr><tr><td>RESNET-18</td><td>69.82</td><td>-1.59</td><td>62.19</td><td>69.82</td><td>-2.36 (↓ 0.77)</td></tr><tr><td>RESNET-50</td><td>75.82</td><td>0.25</td><td>67.66</td><td>75.90</td><td>-0.68 (↓ 0.93)</td></tr></table>
|
| 170 |
+
|
| 171 |
+
For ImageNet, the efforts required for each epoch training are much greater, and hence efficient methods to reduce the training epochs are highly desirable. The comparison results of SGD/SWA/TWA are shown in Table 4. Besides the reduced generalization gap, TWA takes only 2 epochs to average the historical solutions of the first 60 epochs, reaching comparable or even better performance than regular SGD training with 90 epochs. For comparison, Lookahead (Zhang et al., 2019) is another advanced optimizer recently proposed for improving convergence and reported $75.49\%$ accuracy at the 60th epoch (Table 2 in Zhang et al. (2019)) with an aggressive learning rate decay (i.e., the learning rate is decayed at the 30th, 48th, and 58th epochs), while our TWA reaches $75.70\%$ with the same budget but simply using the conventional decay.
|
| 172 |
+
|
| 173 |
+
TWA is very flexible and can be readily applied to different training stages, and we also conduct an experiment by averaging the solutions of the final training period (i.e. 61-90 epochs) and simply performing TWA for one epoch training, as presented in Table 5. Such cheap training still shows to bring significant improvements (e.g. $+0.51\%$ on ResNet-50 for Ima
|
| 174 |
+
|
| 175 |
+
Table 5: Top-1 accuracy (%) on ImageNet for tail training and short TWA
|
| 176 |
+
|
| 177 |
+
<table><tr><td>MODEL</td><td>SGD</td><td>TWA (+1 EPOCH)</td></tr><tr><td>RESNET-18</td><td>69.82</td><td>70.37</td></tr><tr><td>RESNET-50</td><td>75.82</td><td>76.34</td></tr></table>
|
| 178 |
+
|
| 179 |
+
geNet). Thus, TWa can serve as an effective approach for composing a better final solution.
|
| 180 |
+
|
| 181 |
+
Scaled learning rate The optimization of TWA is conducted in a very low-dimensional space, which also suppresses the sensitivity of the learning rate. In fact, we can allow a very large learning rate to accelerate the training. Thereby, we design a scaled learning rate, which linearly scales up the learning rate and reduces the training epochs accordingly, as shown in Figure 4. Within an appropriate range, scaling the learning rate largely speeds up the convergence without affecting the final performance. For example, with the learning rate of 4, TWA approaches the final accuracy with only 1 epoch and converges within 5 epochs.
|
| 182 |
+
|
| 183 |
+
Wall-clock time comparison In the above experiments, we report the number of training epochs, since the wall-clock time per epoch for SGD and TWA is similar. As an illustration, Table 6 provides the wall-clock time of training PreAct ResNet-164 for CIFAR-100 on one Nvidia Geforce GTX 2080 TI.
|
| 184 |
+
|
| 185 |
+
Comparison with EMA EMA serves as an alternative to SWA, which averages the model weights along the training trajectory with exponential decay. It requires a hyper-parameter $\gamma$ to control the averaging horizons. As a manually defined averaging strategy, EMA could be sensitive to learning rates, datasets, architectures, etc. Here we compare the performance of EMA with SWA and TWA in the head stage of training, where we try $\gamma = 0.99 / 0.999$ . The results are illustrated in Figure 5. We observe that the performance of EMA varies significantly with differ
|
| 186 |
+
|
| 187 |
+
Table 6: Wall-clock time per epoch
|
| 188 |
+
|
| 189 |
+
<table><tr><td>OPTIMIZER</td><td>TIME PER EPOCH</td></tr><tr><td>SGD</td><td>59.04s</td></tr><tr><td>TWA</td><td>60.20s</td></tr></table>
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
Figure 5: Comparisons with EMA.
|
| 193 |
+
|
| 194 |
+
ent choices of $\gamma$ . It could perform notably better than SWA in the early stage of training as more weight is paid to the latest solutions. Note that both EMA and SWA are fixed averaging strategies for adapting different training stages (essentially could be viewed as particular solutions of TWA). By optimizing the averaging coefficients, TWA could consistently achieve better performance.
|
| 195 |
+
|
| 196 |
+
Optimized Averaging Coefficients In Figure 6, we visualize the averaging coefficients $\alpha_{i}$ learned by TWA. Detailed derivation could be found in Appendix D. We observe all historical solutions could contribute to the final solutions. Solutions from the latter training stage are attached with more importance as expected. Different from the fix averaging strategies like SWA or EMA, such averaging coefficients enable to take full advantage of the historical solutions through delicate optimization and better adapt to the training dynamics.
|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
Figure 6: Averaging coefficients of TWA.
|
| 200 |
+
|
| 201 |
+
# 5.4 LANDSCAPE VISUALIZATION
|
| 202 |
+
|
| 203 |
+
Following (Garipov et al., 2018; Izmailov et al., 2018), we visualize the training loss and test error surfaces of SWA and TWA in Figure 7 on CIFAR-100 with PreAct ResNet-164. We set the SGD solution after 125 training epochs as the origin and plot the TWA and SWA solutions on the plane. For the case with a default learning rate of 0.05, TWA achieves slightly better test accuracy with lower training loss. This shows that in the subspace, minimizing the training loss is meaningful and results in lower test errors. Especially for the case with a larger learning rate of 0.10, the superiority of TWA over SWA is more significant (over $0.7\%$ improvement on test accuracy), since the variance grows larger and the variance reduction effect of TWA becomes more obvious.
|
| 204 |
+
|
| 205 |
+
# 6 RELATED WORK
|
| 206 |
+
|
| 207 |
+
Improving the model's generalization capability is of great importance and has received wide attention. The recent efforts mainly focus on two aspects: (1) proper regularization terms to search for more flat minimum (Keskar et al., 2016; Li et al., 2018), such as weight decay (Krogh & Hertz, 1991), dropout (Srivastava et al., 2014), label smoothing (Szegedy et al., 2016), Shake-Shake (Gastaldi,
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
(a) $\mathrm{SWA\_LR} = 0.05$
|
| 215 |
+
(b) $\mathrm{SWA\_LR} = 0.10$
|
| 216 |
+
Figure 7: Train loss and test error surface of TWA and SWA with different SWA_LR.
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
|
| 220 |
+
2017), MixUp (Zhang et al., 2018), SAM (Foret et al., 2020) and AMP (Zheng et al., 2021); (2) effective data augmentation to diversify the dataset, such as Cutout (DeVries & Taylor, 2017), AutoAugment (Cubuk et al., 2019) and RandAugment (Cubuk et al., 2020). Different from these techniques, we improve the generalization ability by constraining the training in a low-dimensional subspace spanned by historical explorations, which regularizes the model complexity. We note that TWA is orthogonal to these methods, and it is promising to combine them for boosted improvements.
|
| 221 |
+
|
| 222 |
+
A lot of efforts have been made to speed up the DNNs' training. Apart from the well-known methods on adaptive learning rates, e.g. Adam (Kingma & Ba, 2015) and accelerated schemes, e.g. Nesterov momentum Nesterov (1983), a new method is proposed in Zhang et al. (2019) proposed, where a look-ahead search direction generated by another "fast" optimizer is utilized, achieving faster convergence and better learning stability. Goyal et al. (2017) adapted a large mini-batch to speed up the training and introduced a scaling rule for adjusting the learning rates. In this paper, we realize training efficiency by sufficiently utilizing the historical solutions of DNNs' training and conducting training in a subspace with substantially reduced dimensions.
|
| 223 |
+
|
| 224 |
+
For utilizing historical explorations, SWA (Izmailov et al., 2018) adopts a simple averaging strategy at the tail of training. Cha et al. (2021) extended it to the domain generalization task with a dense and overfit-aware stochastic weight sampling strategy. We firstly propose to utilize the explorations at the head stage of training to achieve training efficiency. Exponentially decaying running average (Hunter, 1986; Szegedy et al., 2016) is a common technique adopted by practitioners. It requires a manually set averaging horizon and generally performs comparably as SGD (Izmailov et al., 2018). Another closely related work is model soups (Wortsman et al., 2022), which improves the model performance by averaging the weights from different fine-tuning configurations in a greedy order. We differ in that the historical solutions are from one single configuration. We mainly focus on improving training efficiency and optimizing the averaging coefficients in a trainable manner.
|
| 225 |
+
|
| 226 |
+
# 7 CONCLUSION
|
| 227 |
+
|
| 228 |
+
In this work, we propose TWA, a novel training algorithm that optimizes the averaging coefficients of historical solutions in DNNs' training to achieve efficiency and better performance. It differs from the manually set averaging strategies as SWA or EMA and manifests better adaptation to different stages of training. We further design a parallel framework for large-scale training with efficiency in memory and computation. Extensive experiments demonstrate the superior performance of TWA on benchmark computer vision tasks with various architectures.
|
| 229 |
+
|
| 230 |
+
# ACKNOWLEDGEMENTS
|
| 231 |
+
|
| 232 |
+
We are very grateful for anonymous reviewers for the valuable feedback on the paper. We thank Minqi Chen at Huawei Technologies for the great support. The research leading to these results has received funding from National Natural Science Foundation of China (61977046), Shanghai Science and Technology Program (22511105600), and Shanghai Municipal Science and Technology Major Project (2021SHZDX0102).
|
| 233 |
+
|
| 234 |
+
# REFERENCES
|
| 235 |
+
|
| 236 |
+
Ben Athiwaratkun, Marc Finzi, Pavel Izmailov, and Andrew Gordon Wilson. There are many consistent explanations of unlabeled data: Why you should average. In International Conference on Learning Representations (ICLR), 2018.
|
| 237 |
+
Junbum Cha, Sanghyuk Chun, Kyungjae Lee, Han-Cheol Cho, Seunghyun Park, Yunsung Lee, and Sungrae Park. Swad: Domain generalization by seeking flat minima. In Advances in Neural Information Processing Systems (NeurIPS), 2021.
|
| 238 |
+
Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasudevan, and Quoc V Le. Autoaugment: Learning augmentation strategies from data. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 113-123, 2019.
|
| 239 |
+
Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 702-703, 2020.
|
| 240 |
+
Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 248-255, 2009.
|
| 241 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
|
| 242 |
+
Terrance DeVries and Graham W Taylor. Improved regularization of convolutional neural networks with cutout. arXiv preprint arXiv:1708.04552, 2017.
|
| 243 |
+
John Duchi, Elad Hazan, and Yoram Singer. Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research (JMLR), 12(7), 2011.
|
| 244 |
+
Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In International Conference on Learning Representations (ICLR), 2020.
|
| 245 |
+
Jonathan Frankle, Gintare Karolina Dziugaite, Daniel Roy, and Michael Carbin. Linear mode connectivity and the lottery ticket hypothesis. In International Conference on Machine Learning (ICML), 2020.
|
| 246 |
+
Timur Garipov, Pavel Izmailov, Dmitrii Podoprikhin, Dmitry P Vetrov, and Andrew G Wilson. Loss surfaces, mode connectivity, and fast ensembling of dnns. In Advances in Neural Information Processing Systems (NeurIPS), 2018.
|
| 247 |
+
Xavier Gastaldi. Shake-shake regularization of 3-branch residual networks. In Workshop Track Proceedings in International Conference on Learning Representations (ICLR), 2017.
|
| 248 |
+
Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: TrainingImagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017.
|
| 249 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770-778, 2016.
|
| 250 |
+
|
| 251 |
+
Byeongho Heo, Sanghyuk Chun, Seong Joon Oh, Dongyoon Han, Sangdoo Yun, Gyuwan Kim, Youngjung Uh, and Jung-Woo Ha. Adamp: Slowing down the slowdown for momentum optimizers on scale-invariant weights. In International Conference on Learning Representations (ICLR), 2021.
|
| 252 |
+
J Stuart Hunter. The exponentially weighted moving average. Journal of Quality Technology, 18(4): 203-210, 1986.
|
| 253 |
+
Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov, and Andrew Gordon Wilson. Averaging weights leads to wider optima and better generalization. arXiv preprint arXiv:1803.05407, 2018.
|
| 254 |
+
Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv preprint arXiv:1609.04836, 2016.
|
| 255 |
+
Diederik P. Kingma and Jimmy Lei Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations (ICLR), 2015.
|
| 256 |
+
Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. Technical Report, 2009.
|
| 257 |
+
Anders Krogh and John Hertz. A simple weight decay can improve generalization. In Advances in Neural Information Processing Systems (NeurIPS), 1991.
|
| 258 |
+
Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. In Advances in Neural Information Processing Systems (NeurIPS), 2018.
|
| 259 |
+
Tao Li, Lei Tan, Zhehao Huang, Qinghua Tao, Yipeng Liu, and Xiaolin Huang. Low dimensional trajectory hypothesis is true: Dnns can be trained in tiny subspaces. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2022.
|
| 260 |
+
Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.
|
| 261 |
+
Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations (ICLR), 2019.
|
| 262 |
+
Stephan Mandt, Matthew D Hoffman, and David M Blei. Stochastic gradient descent as approximate bayesian inference. Journal of Machine Learning Research (JMLR), 18:1-35, 2017.
|
| 263 |
+
Yurii Nesterov. On an approach to the construction of optimal methods of minimization of smooth convex functions. *Ekonomika i Mateaticheskie Metody*, 24(3):509-517, 1988.
|
| 264 |
+
Yurii Nesterov. Introductory lectures on convex optimization: A basic course, volume 87. Springer Science & Business Media, 2003.
|
| 265 |
+
Yurii E Nesterov. A method for solving the convex programming problem with convergence rate o $(1 / k^2)$ . In Dokl. akad. nauk Sssr, volume 269, pp. 543-547, 1983.
|
| 266 |
+
Evgenii Nikishin, Pavel Izmailov, Ben Athiwaratkun, Dmitrii Podoprikhin, Timur Garipov, Pavel Shvechikov, Dmitry Vetrov, and Andrew Gordon Wilson. Improving stability in deep reinforcement learning with weight averaging. In Uncertainty in Artificial Intelligence Workshop on Uncertainty in Deep learning, 2018.
|
| 267 |
+
Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017.
|
| 268 |
+
Boris T Polyak. Some methods of speeding up the convergence of iteration methods. Ussr Computational Mathematics and Mathematical Physics, 4(5):1-17, 1964.
|
| 269 |
+
Rolf Rabenseifner. Optimization of collective reduction operations. In International Conference on Computational Science, pp. 1-9. Springer, 2004.
|
| 270 |
+
|
| 271 |
+
Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014.
|
| 272 |
+
Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. Journal of Machine Learning Research (JMLR), 15(1):1929-1958, 2014.
|
| 273 |
+
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2818-2826, 2016.
|
| 274 |
+
Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461, 2018.
|
| 275 |
+
Mitchell Wortsman, Gabriel Ilharco, Samir Ya Gadre, Rebecca Roelofs, Raphael Gontijo-Lopes, Ari S Morcos, Hongseok Namkoong, Ali Farhadi, Yair Carmon, Simon Kornblith, et al. Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In International Conference on Machine Learning (ICML), 2022.
|
| 276 |
+
Guandao Yang, Tianyi Zhang, Polina Kirichenko, Junwen Bai, Andrew Gordon Wilson, and Chris De Sa. Swalp: Stochastic weight averaging in low precision training. In International Conference on Machine Learning (ICML), pp. 7015-7024. PMLR, 2019.
|
| 277 |
+
Zhewei Yao, Amir Gholami, Sheng Shen, Mustafa Mustafa, Kurt Keutzer, and Michael W. Mahoney. ADAHESSIAN: an adaptive second order optimizer for machine learning. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI, 2021.
|
| 278 |
+
Haoran You, Chaojian Li, Pengfei Xu, Yonggan Fu, Yue Wang, Xiaohan Chen, Richard G Baraniuk, Zhangyang Wang, and Yingyan Lin. Drawing early-bird tickets: Towards more efficient training of deep networks. In International Conference on Learning Representations (ICLR), 2020.
|
| 279 |
+
Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. In Richard C. Wilson, Edwin R. Hancock, and William A. P. Smith (eds.), British Machine Vision Conference (BMVC), 2016.
|
| 280 |
+
Matthew D Zeiler. Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701, 2012.
|
| 281 |
+
Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. In International Conference on Learning Representations (ICLR), 2018.
|
| 282 |
+
Michael Zhang, James Lucas, Jimmy Ba, and Geoffrey E Hinton. Lookahead optimizer: k steps forward, 1 step back. In Advances in Neural Information Processing Systems (NeurIPS), 2019.
|
| 283 |
+
Yaowei Zheng, Richong Zhang, and Yongyi Mao. Regularizing neural networks via adversarial model perturbation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8156-8165, 2021.
|
| 284 |
+
|
| 285 |
+
# A TRAINING DETAILS
|
| 286 |
+
|
| 287 |
+
For SWA experiments, we replicate the SWA baseline by using the publicly released implementation of Izmailov et al. (2018). We use VGG-16 architecture with batch normalization for a unified learning rate setting as PreAct ResNet-164. For ImageNet experiments, we follow official PyTorch implementation. We use a scaled learning rate for TWA training with 20x and 30x factors on CIFAR and ImageNet, respectively (e.g. the original learning rate of 0.1 is scaled up to 2 on CIFAR). CIFAR experiments are performed on one Nvidia Geforce GTX 2080 TI GPU, while ImageNet experiments are on four NVIDIA Tesla A100. The number of historical solutions optimized by TWA is presented in Table A1.
|
| 288 |
+
|
| 289 |
+
We now disclose the specific hyper-parameters in the following.
|
| 290 |
+
|
| 291 |
+
Table A1: The number of historical solutions optimized by TWA
|
| 292 |
+
|
| 293 |
+
<table><tr><td>DATASETS</td><td>SAMPLING EPOCHS</td><td>TIMES / EPOCH</td><td>#NUM</td></tr><tr><td>CIFAR</td><td>100</td><td>1</td><td>100</td></tr><tr><td>IMAGENET</td><td>60</td><td>5</td><td>300</td></tr></table>
|
| 294 |
+
|
| 295 |
+
# A.1 SWATRAINING
|
| 296 |
+
|
| 297 |
+
# A.1.1 CIFAR
|
| 298 |
+
|
| 299 |
+
We use the same schedule and hyper-parameters as in Izmailov et al. (2018). For VGG-16, we use weight decay of $5 \times 10^{-4}$ and train the model for 300 epochs with weight averaging at 161 to 300 epochs. For PreAct ResNet-164, we use weight decay of $3 \times 10^{-4}$ and train the model for 225 epochs with weight averaging at 126 to 225 epochs.
|
| 300 |
+
|
| 301 |
+
For TWA training, we use the same weights as SWA and initialize $w_{\mathrm{twa}}$ as the last checkpoint (i.e. 300/225 epochs). We train the models for 10 epochs with an initial learning rate of 2 and decay it by 10 at the 5th and 8th epochs. The regularization coefficient $\lambda$ is set to $5 \times 10^{-5}$ .
|
| 302 |
+
|
| 303 |
+
# A.1.2 IMAGENET
|
| 304 |
+
|
| 305 |
+
Following Izmailov et al. (2018); Yang et al. (2019), we start from pre-trained models (they are from torchvision.models) and collect weights by running SGD optimizer up to 10 epochs (with a constant learning rate 0.005, weight decay $1 \times 10^{-4}$ ). We uniformly sample the solutions 5 times per epoch.
|
| 306 |
+
|
| 307 |
+
For TWA training, we use the same weights as SWA and initialize $\boldsymbol{w}_{\mathrm{twa}}$ as the pre-trained model. For ImageNet, there are many iterations in one epoch, and hence we conduct TWA training for one epoch, in which we linearly decay the learning rate from 0.03 to 0. The regularization coefficient $\lambda$ is set to $1 \times 10^{-5}$ .
|
| 308 |
+
|
| 309 |
+
# A.2 REGULAR TRAINING
|
| 310 |
+
|
| 311 |
+
# A.2.1 CIFAR
|
| 312 |
+
|
| 313 |
+
For regular training, we train the models for 200 epochs with an initial learning rate of 0.1 and decay it by 10 at the 100th and the 150th epoch. We use SGD optimizer with momentum 0.9, weight decay $1 \times 10^{-4}$ , and batch size 128 by convention.
|
| 314 |
+
|
| 315 |
+
For TWA training, we initialize $\mathbf{w}_{\mathrm{twa}}$ as $\frac{1}{n} \sum_{i=1}^{n} \mathbf{w}_i$ , i.e., the center of sampled solutions. We train the models for 10 epochs with an initial learning rate of 2 and decay it by 10 at the 5th and 8th epochs. The regularization coefficient $\lambda$ is set to $1 \times 10^{-5}$ .
|
| 316 |
+
|
| 317 |
+
# A.2.2 IMAGENET
|
| 318 |
+
|
| 319 |
+
We follow the training protocol described in He et al. (2016). Specifically, we train the models for 90 epochs with an initial learning rate of 0.1 and decay it by a factor of 10 every 30 epochs. We use SGD optimizer with momentum 0.9, weight decay $1 \times 10^{-4}$ , and batch size 256.
|
| 320 |
+
|
| 321 |
+
For TWA training, we uniformly sample solutions 5 times per epoch and initialize $\boldsymbol{w}_{\mathrm{twa}}$ as $\frac{1}{n}\sum_{i=1}^{n}\boldsymbol{w}_i$ . We train the models for 2 epochs with a learning rate of 0.3 and 0.03, respectively. For the extra one epoch training, we use the same training protocol as in subsection A.1.2, i.e., linearly decaying the learning rate from 0.03 to 0. The regularization coefficient $\lambda$ is set to $1\times 10^{-5}$ .
|
| 322 |
+
|
| 323 |
+
# A.2.3 RESULTS ON ADAM OPTIMIZER
|
| 324 |
+
|
| 325 |
+
Adam (Kingma & Ba, 2015) is another mainstream optimizer with adaptive gradient descent, which enjoys fast convergence and insensitivity to the initial learning rate. Here, we apply TWA to the solutions generated by Adam optimizer and the results are in Table A2. The training settings are the same as in Table 3 with default $\beta_{1} = 0.9$ and $\beta_{2} = 0.999$ for Adam. TWA is trained for 5 epochs
|
| 326 |
+
|
| 327 |
+
with a initial learning rate 3. We observe that it could similarly bring generalization improvement and training efficiency.
|
| 328 |
+
|
| 329 |
+
Table A2: Test accuracy (%) and generalization gap (%) on CIFAR-10/100 with Adam Optimizer
|
| 330 |
+
|
| 331 |
+
<table><tr><td rowspan="2">DATASET</td><td rowspan="2">MODEL</td><td colspan="2">ADAM (200 EPOCHS)</td><td>SWA (100 EPOCHS)</td><td colspan="2">TWA (100 + 5 EPOCHS)</td></tr><tr><td>ACCURACY</td><td>GAP</td><td>ACCURACY</td><td>ACCURACY</td><td>GAP</td></tr><tr><td rowspan="2">CIFAR-10</td><td>VGG16</td><td>93.60 ± 0.12</td><td>6.31</td><td>92.39 ± 0.07</td><td>93.63 ± 0.11</td><td>5.74 (↓ 0.57)</td></tr><tr><td>PRERESNET-164</td><td>95.09 ± 0.11</td><td>4.89</td><td>92.65 ± 0.03</td><td>95.16 ± 0.09</td><td>3.85 (↓ 1.04)</td></tr><tr><td rowspan="2">CIFAR-100</td><td>VGG16</td><td>72.77 ± 0.13</td><td>26.67</td><td>68.74 ± 0.05</td><td>74.07 ± 0.07</td><td>17.35 (↓ 9.32)</td></tr><tr><td>PRERESNET-164</td><td>76.23 ± 0.14</td><td>23.71</td><td>73.09 ± 0.13</td><td>77.96 ± 0.14</td><td>19.62 (↓ 4.09)</td></tr></table>
|
| 332 |
+
|
| 333 |
+
# B IMPLEMENTATION
|
| 334 |
+
|
| 335 |
+
Let $\pmb {\beta} = [\beta_{1},\beta_{2},\dots ,\beta_{n}]^{\top}\in \mathbb{R}^{n}$ $\pmb {P} = [e_1,e_2,\dots ,e_n]\in \mathbb{R}^{D\times n}$ , the optimization target for TWA can be formulated as,
|
| 336 |
+
|
| 337 |
+
$$
|
| 338 |
+
\min _ {\boldsymbol {\beta}} L (\boldsymbol {\beta}) \triangleq \mathbb {E} _ {(\boldsymbol {x}, \boldsymbol {y}) \sim \mathcal {D}} \left[ \mathcal {L} \left(f \left(\boldsymbol {w} _ {\mathrm {t w a}}; \boldsymbol {x}\right), \boldsymbol {y}\right) \right] + \frac {\lambda}{2} \boldsymbol {\beta} ^ {\top} \boldsymbol {\beta}, \tag {A.1}
|
| 339 |
+
$$
|
| 340 |
+
|
| 341 |
+
$$
|
| 342 |
+
\begin{array}{l l} \text {s . t .} & \boldsymbol {w} _ {\mathrm {t w a}} = \boldsymbol {P} \boldsymbol {\beta}. \end{array}
|
| 343 |
+
$$
|
| 344 |
+
|
| 345 |
+
We short the loss term $\mathbb{E}_{(\boldsymbol{x},\boldsymbol{y})\sim \mathcal{D}}[\mathcal{L}(f(\boldsymbol{w};\boldsymbol{x}),\boldsymbol{y})]$ to $\mathcal{L}(\boldsymbol{w})$ and the gradient w.r.t. $\beta$ can be written as,
|
| 346 |
+
|
| 347 |
+
$$
|
| 348 |
+
\begin{array}{l} \frac {\partial L}{\partial \boldsymbol {\beta}} = \frac {\partial \boldsymbol {w} _ {\mathrm {t w a}}}{\partial \boldsymbol {\beta}} \frac {\partial \mathcal {L} (\boldsymbol {w} _ {\mathrm {t w a}})}{\partial \boldsymbol {w} _ {\mathrm {t w a}}} + \lambda \boldsymbol {\beta} (A.2) \\ = \boldsymbol {P} ^ {\top} \frac {\partial \mathcal {L} \left(\boldsymbol {w} _ {\mathrm {t w a}}\right)}{\partial \boldsymbol {w} _ {\mathrm {t w a}}} + \lambda \boldsymbol {\beta}. (A.3) \\ \end{array}
|
| 349 |
+
$$
|
| 350 |
+
|
| 351 |
+
Let $\eta$ be the learning rate. We could optimize (A.1) with the following gradient descent:
|
| 352 |
+
|
| 353 |
+
$$
|
| 354 |
+
\boldsymbol {\beta} ^ {(t + 1)} = \boldsymbol {\beta} ^ {(t)} - \eta \left(\boldsymbol {P} ^ {\top} \frac {\partial \mathcal {L} \left(\boldsymbol {w} _ {\mathrm {t w a}} ^ {(t)}\right)}{\partial \boldsymbol {w} _ {\mathrm {t w a}}} + \lambda \boldsymbol {\beta}\right). \tag {A.4}
|
| 355 |
+
$$
|
| 356 |
+
|
| 357 |
+
Since $\pmb{w}_{\mathrm{twa}} = \pmb{P}\pmb{\beta}$ , we have the corresponding update in the parameter space:
|
| 358 |
+
|
| 359 |
+
$$
|
| 360 |
+
\begin{array}{l} \boldsymbol {w} _ {\mathrm {t w a}} ^ {(t + 1)} = \boldsymbol {w} _ {\mathrm {t w a}} ^ {(t)} - \eta \left(\boldsymbol {P} \boldsymbol {P} ^ {\top} \frac {\partial \mathcal {L} \left(\boldsymbol {w} _ {\mathrm {t w a}} ^ {(t)}\right)}{\partial \boldsymbol {w} _ {\mathrm {t w a}}} + \lambda \boldsymbol {P} \beta\right) (A.5) \\ = (1 - \eta \lambda) \boldsymbol {w} _ {\mathrm {t w a}} ^ {(t)} - \eta \boldsymbol {P} \boldsymbol {P} ^ {\top} \frac {\partial \mathcal {L} \left(\boldsymbol {w} _ {\mathrm {t w a}} ^ {(t)}\right)}{\partial \boldsymbol {w} _ {\mathrm {t w a}}}. (A.6) \\ \end{array}
|
| 361 |
+
$$
|
| 362 |
+
|
| 363 |
+
As $\beta$ does not explicitly appear in (A.6), we could treat the coefficient $\beta$ as an implicit variable. In practice, we optimize (A.1) by directly updating the model weights $w_{\mathrm{twa}}$ with weight decay $\lambda$ , which is an optimization in the reduced subspace with projection matrix $\boldsymbol{P}\boldsymbol{P}^{\top}$ .
|
| 364 |
+
|
| 365 |
+
# C SWA WITH DIFFERENT STARTING EPOCHS
|
| 366 |
+
|
| 367 |
+
We test the performance of SWA with different starting epochs to average on ImageNet with the ResNet-50 model. We observe that the performance of SWA gradually becomes better with the relatively latter stage of solutions averaged, showing that SWA could not adapt well to the head stage of training where the solutions are fast-evolving. Hence, a good solution for SWA may require manually selecting which period to average. We also notice that TWA (with 0-60 epoch solutions) consistently outperforms the SWA wherever the averaging begins, confirming that TWA could automatically find a good set of averaging coefficients and provide better performance.
|
| 368 |
+
|
| 369 |
+
<table><tr><td>SWA Epoch 0-60</td><td>SWA Epoch 10-60</td><td>SWA Epoch 20-60</td><td>SWA Epoch 30-60</td><td>SWA Epoch 40-60</td><td>SWA Epoch 50-60</td><td>TWA Epoch 0-60</td></tr><tr><td>67.66</td><td>70.50</td><td>72.12</td><td>74.14</td><td>75.08</td><td>75.34</td><td>75.90</td></tr></table>
|
| 370 |
+
|
| 371 |
+
Table C3: SWA with different starting epochs.
|
| 372 |
+
|
| 373 |
+
# D DISCUSSION ON THE SUM-ONE CONSTRAINT
|
| 374 |
+
|
| 375 |
+
Let $\alpha = [\alpha_{1},\alpha_{2},\dots ,\alpha_{n}]^{\top}\in \mathbb{R}^{n}$ , $W = [w_{1},w_{2},\dots ,w_{n}]\in \mathbb{R}^{D\times n}$ , we have $w_{\mathrm{twa}} = W\alpha$ . We multiply $W^{\top}$ on the both sides, i.e., $W^{\top}w_{\mathrm{twa}} = W^{\top}W\alpha$ , and could obtain $\pmb{\alpha} = (W^{\top}W)^{-1}W^{\top}\pmb{w}_{\mathrm{twa}}$ . Further, we could establish the relation between $\alpha$ and $\beta$ : $\pmb{\alpha} = (W^{\top}W)^{-1}W^{\top}\pmb{P}\beta$ , since they are the coordinates of $w_{\mathrm{twa}}$ under two different set of bases.
|
| 376 |
+
|
| 377 |
+
In the solution set $A = \{\alpha_{1}\pmb{w}_{1} + \alpha_{2}\pmb{w}_{2} + \dots +\alpha_{n}\pmb{w}_{n} \mid \alpha_{i}\in \mathbb{R}\}$ , we do not explicitly require that the sum of averaging coefficients $\sum_{i = 1}^{n}\alpha_{i}$ to be 1 since the network's performance would be sensitive to a direct scaling over all parameters (i.e. $k\pmb{w}$ ), that is, a good solution in $A$ will inherently have a coefficient sum very close to 1. We verify the sum of the averaging coefficients of the attained solution $\pmb{w}_{\mathrm{twa}}$ on CIFAR-100 with PreResNet-164 model and observe that $\sum_{i = 1}^{n}\alpha_{i} = 1.02_{\pm < 0.01}$ .
|
| 378 |
+
|
| 379 |
+
# E NUMERICAL DISCUSSION FOR DDP TRAINING
|
| 380 |
+
|
| 381 |
+
We numerically measure the averaged epoch training time and memory burden for SGD and TWA in the DDP training setting. Specifically, we experiment with the ResNet-50 model on ImageNet and use 1, 2, and 4 GPUs with a batch size of 256 per GPU and a total of 300 historical solutions. The experiments are conducted on NVIDIA Tesla A100 40G GPUs. From the results reported in Table E4, we observe that TWA brings minor additional costs, e.g. $+2.8\%$ on time cost and $+2.9\%$ on memory burden with 4 GPUs, compared with regular SGD training. The additional memory burden becomes even minor with more GPUs. This shows that TWA could provide efficient and scalable averaging for large-scale problems.
|
| 382 |
+
|
| 383 |
+
Table E4: Time and memory comparisons of SGD and TWA with DDP training.
|
| 384 |
+
|
| 385 |
+
<table><tr><td rowspan="2">#GPUs</td><td colspan="2">Time</td><td colspan="2">Memory</td></tr><tr><td>SGD</td><td>TWA</td><td>SGD</td><td>TWA</td></tr><tr><td>1</td><td>1638s</td><td>1692s (+3.3%)</td><td>28286.5 MB</td><td>31432.5 MB (+11.1%)</td></tr><tr><td>2</td><td>824s</td><td>862s (+4.6%)</td><td>28382.5 MB</td><td>30092.5 MB (+6.0%)</td></tr><tr><td>4</td><td>420s</td><td>432s (+2.8%)</td><td>28874.5 MB</td><td>29718.5 MB (+2.9%)</td></tr></table>
|
| 386 |
+
|
| 387 |
+
# F ABLATION STUDY
|
| 388 |
+
|
| 389 |
+
We conduct an ablation study in Table F5 to analyze the impact of the regularization coefficient $\lambda$ . We observe that such regularization brings improvements but is not significant. This is because the main regularization effects come from the significant decrease of training variables, i.e., regular training has $D$ variables but TWA contains only $n$ . Since such regularization is easy to implement and virtually brings little training cost, we include it in our method.
|
| 390 |
+
|
| 391 |
+
Table F5: Ablation studies on the regularization coefficient $\lambda$ .
|
| 392 |
+
|
| 393 |
+
<table><tr><td>Datasets</td><td>Model</td><td>TWA</td><td>TWA (λ = 0)</td></tr><tr><td rowspan="2">CIFAR-10</td><td>VGG16</td><td>93.79 ± 0.18</td><td>93.78 ± 0.07</td></tr><tr><td>PreActResNet-164</td><td>95.11 ± 0.04</td><td>95.03 ± 0.09</td></tr><tr><td rowspan="2">CIFAR-100</td><td>VGG16</td><td>74.24 ± 0.24</td><td>74.02 ± 0.16</td></tr><tr><td>PreActResNet-164</td><td>78.11 ± 0.19</td><td>78.01 ± 0.22</td></tr></table>
|
| 394 |
+
|
| 395 |
+
# G ADDITIONAL RESULTS ON NLP TASKS
|
| 396 |
+
|
| 397 |
+
For NLP datasets, we try a finetune task with pre-trained models and compare the performance of SWA and TWA. Specifically, we experiment with The Corpus of Linguistic Acceptability (CoLA), a text classification task in the General Language Understanding Evaluation (GLUE, Wang et al. (2018)) benchmark. In experiment, we use a pre-trained BERT (Devlin et al., 2018) model (bert-base-uncased) from Hugging Face community $^2$ . We fine-tune BERT on CoLA for 3 epochs with AdamW optimizer Loshchilov & Hutter (2017), learning rate 2e-5, and weight decay 0.0. The model weights at the end of these epochs are collected for SWA and TWA. In TWA, we train the fine-tuned model for 1 epoch with a learning rate of 0.5 and regularization coefficients $\lambda = 0.001$ . From the results below, we observe that TWA could achieve better performance than the competing methods. This further demonstrates the broad application of TWA.
|
| 398 |
+
|
| 399 |
+
<table><tr><td>Fine-tune</td><td>SWA</td><td>TWA</td></tr><tr><td>56.81</td><td>59.73</td><td>60.36</td></tr></table>
|
| 400 |
+
|
| 401 |
+
Table G6: Fine-tune results on CoLA.
|
2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24668f34d6b7795a7d635639993bdfc9095272d456c36375b6288a5da5edfc50
|
| 3 |
+
size 560374
|
2023/Trainable Weight Averaging_ Efficient Training by Optimizing Historical Solutions/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/5a46c4a4-3176-4d02-a09b-ba17d6b20cfc_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/5a46c4a4-3176-4d02-a09b-ba17d6b20cfc_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/5a46c4a4-3176-4d02-a09b-ba17d6b20cfc_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b09383cea5a9a87c317f43b5c374648e80837ebab37eeeb496c3939433423a6d
|
| 3 |
+
size 8478686
|
2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/full.md
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TRAINING-FREE STRUCTURED DIFFUSION GUIDANCE FOR COMPOSITIONAL TEXT-TO-IMAGE SYNTHESIS
|
| 2 |
+
|
| 3 |
+
Weixi Feng $^{1}$ , Xuehai He $^{2}$ , Tsu-jui Fu $^{1}$ , Varun Jampani $^{3}$ , Arjun Akula $^{3}$ , Pradyumna Narayana $^{3}$ , Sugato Basu $^{3}$ , Xin Eric Wang $^{2}$ , William Yang Wang $^{1}$ $^{1}$ University of California, Santa Barbara, $^{2}$ University of California, Santa Cruz, $^{3}$ Google
|
| 4 |
+
|
| 5 |
+
# ABSTRACT
|
| 6 |
+
|
| 7 |
+
Large-scale diffusion models have achieved state-of-the-art results on text-to-image synthesis (T2I) tasks. Despite their ability to generate high-quality yet creative images, we observe that attribution-binding and compositional capabilities are still considered major challenging issues, especially when involving multiple objects. Attribute-binding requires the model to associate objects with the correct attribute descriptions, and compositional skills require the model to combine and generate multiple concepts into a single image. In this work, we improve these two aspects of T2I models to achieve more accurate image compositions. To do this, we incorporate linguistic structures with the diffusion guidance process based on the controllable properties of manipulating cross-attention layers in diffusion-based T2I models. We observe that keys and values in cross-attention layers have strong semantic meanings associated with object layouts and content. Therefore, by manipulating the cross-attention representations based on linguistic insights, we can better preserve the compositional semantics in the generated image. Built upon Stable Diffusion, a SOTA T2I model, our structured cross-attention design is efficient that requires no additional training samples. We achieve better compositional skills in qualitative and quantitative results, leading to a significant $5 - 8\%$ advantage in head-to-head user comparison studies. Lastly, we conduct an in-depth analysis to reveal potential causes of incorrect image compositions and justify the properties of cross-attention layers in the generation process.
|
| 8 |
+
|
| 9 |
+
# 1 INTRODUCTION
|
| 10 |
+
|
| 11 |
+
Text-to-Image Synthesis (T2I) is to generate natural and faithful images given a text prompt as input. Recently, there has been a significant advancement in the quality of generated images by extremely large-scale vision-language models, such as DALL-E 2 (Ramesh et al., 2022),Imagen (Saharia et al., 2022), and Parti (Yu et al., 2022). In particular, Stable Diffusion (Rombach et al., 2022) is the state-of-the-art open-source implementation showing superior evaluation metric gains after training over billions of text-image pairs.
|
| 12 |
+
|
| 13 |
+
In addition to generating high-fidelity images, the ability to compose multiple objects into a coherent scene is also essential. Given a text prompt from the user end, T2I models need to generate an image that contains all necessary visual concepts as mentioned in the text. Achieving such ability requires the model to understand both the full prompt and individual linguistic concepts from the prompt. As a result, the model should be able to combine multiple concepts and generate novel objects that have never been included in the training data. In this work, we mainly focus on improving the compositionality of the generation process, as it is essential to achieve controllable and generalized text-to-image synthesis with multiple objects in a complex scene.
|
| 14 |
+
|
| 15 |
+
Attribute binding is a critical compositionality challenge (Ramesh et al., 2022; Sahara et al., 2022) to existing large-scale diffusion-based models. Despite the improvements in generating multiple objects in the same scene, existing models still fail when given a prompt such as "a brown bench in front of a white building" (see Fig. 1). The output images contains "a white bench" and "a brown building" instead, potentially due to strong training set bias or imprecise language understanding. From a practical perspective, explaining and solving such a two-object binding challenge is a primary step to understanding more complex prompts with multiple objects. Therefore, how to bind the attributes
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Three challenging phenomena in the compositional generation. Attribute leakage: The attribute of one object is (partially) observable in another object. Interchanged attributes: the attributes of two or more objects are interchanged. Missing objects: one or more objects are missing. With slight abuse of attribute binding definitions, we aim to address all three problems in this work.
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+
to the correct objects is a fundamental problem for a more complicated and reliable compositional generation. While previous work has addressed compositional T2I (Park et al., 2021), our work tackles open-domain foreground objects with counterfactual attributes, such as color and materials.
|
| 25 |
+
|
| 26 |
+
Even though state-of-the-art (SOTA) T2I models are trained on large-scale text-image datasets, they can still suffer from inaccurate results for simple prompts similar to the example above. Hence, we are motivated to seek an alternative, data-efficient method to improve the compositionality. We observe that the attribute-object relation pairs can be obtained as text spans for free from the parsing tree of the sentence. Therefore, we propose to combine the structured representations of prompts, such as a constituency tree or a scene graph, with the diffusion guidance process. Text spans only depict limited regions of the whole image. Conventionally, we need spatial information such as coordinates (Yang et al., 2022) as input to map their semantics into corresponding images. However, coordinate inputs cannot be interpreted by T2I models. Instead, we make use of the observations that attention maps provide free token-region associations in trained T2I models (Hertz et al., 2022). By modifying the key-value pairs in cross-attention layers, we manage to map the encoding of each text span into attended regions in 2D image space.
|
| 27 |
+
|
| 28 |
+
In this work, we discover similar observations in Stable Diffusion (Rombach et al., 2022) and utilize the property to build structured cross-attention guidance. Specifically, we use language parsers to obtain hierarchical structures from the prompts. We extract text spans across all levels, including visual concepts or entities, and encode them separately to disentangle the attribute-object pairs from each other. Compared to using a single sequence of text embedding for guidance, we improve the compositionality by multiple sequences where each emphasizes an entity or a union of entities from multiple hierarchies in the structured language representations. We refer to our method as Structured Diffusion Guidance (StructureDiffusion). Our contributions can be summarized as three-fold:
|
| 29 |
+
|
| 30 |
+
- We propose an intuitive and effective method to improve compositional text-to-image synthesis by utilizing structured representations of language inputs. Our method is efficient and training-free that requires no additional training samples.
|
| 31 |
+
- Experimental results show that our method achieves more accurate attribute binding and compositionality in the generated images. We also propose a benchmark named Attribute Binding Contrast set (ABC-6K) to measure the compositional skills of T2I models.
|
| 32 |
+
- We conduct extensive experiments and analysis to identify the causes of incorrect attribute binding, which points out future directions in improving the faithfulness and compositionality of text-to-image synthesis.
|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
Figure 2: An illustration of cross-attention operations and the token-region associations from attention maps. We omit some tokens for simplicity.
|
| 36 |
+
|
| 37 |
+
# 2 DIFFUSION MODELS & STRUCTURED GUIDANCE
|
| 38 |
+
|
| 39 |
+
In this section, we propose a simple yet effective approach incorporating structured language representations into the cross-attention layers. We briefly introduce the Stable Diffusion model and its critical components in Sec. 2.1. Then, we present our method in detail in Sec. 2.2.
|
| 40 |
+
|
| 41 |
+
# 2.1 BACKGROUND
|
| 42 |
+
|
| 43 |
+
Stable Diffusion We implement our approach and experiments on the state-of-the-art T2I model, Stable Diffusion (Rombach et al., 2022). It is a two-stage method that consists of an autoencoder and a diffusion model. The pre-trained autoencoder encodes images as lower-resolution latent maps for diffusion training. During inference, it decodes generated outputs from the diffusion model into images. The diffusion model generates lower-resolution latent maps based on a random Gaussian noise input $z^T$ . Given $z^T$ , it outputs a noise estimation $\epsilon$ at each step $t$ and subtracts it from $z^t$ . The final noise-free latent map prediction $z^0$ is fed into the autoencoder to generate images. Stable Diffusion adopts a modified UNet (Ronneberger et al., 2015) for noise estimation and a frozen CLIP text encoder (Radford et al., 2021) to encode text inputs as embedding sequences. The interactions between the image space and the textual embeddings are achieved through multiple cross-attention layers in both downsampling and upsampling blocks.
|
| 44 |
+
|
| 45 |
+
CLIP Text Encoder Given an input prompt $\mathcal{P}$ , the CLIP encoder encodes it as a sequence of embeddings $\mathcal{W}_{\mathrm{p}} = \mathrm{CLIP}_{\mathrm{text}}(\mathcal{P})$ where $c_{\mathrm{p}}$ is the embedding dimension and $l$ is the sequence length. Our key observation is that the contextualization of CLIP embeddings is a potential cause of incorrect attribute binding. Due to the causal attention masks, tokens in the later part of a sequence are blended with the token semantics before them. For example, When the user indicates some rare color for the second object (e.g. "a yellow apple and red bananas"), Stable Diffusion tends to generate "banana" in "yellow", as the embeddings of "yellow" is attended by token "banana".
|
| 46 |
+
|
| 47 |
+
Cross Attention Layers The cross-attention layers take the embedding sequences from the CLIP text encoder and fuse them with latent feature maps to achieve classifier-free guidance. Denote a 2D feature map $\mathcal{X}^t$ , it is projected into queries by a linear layer $f_{Q}(\cdot)$ and reshaped as $Q^{t}\in R^{(n,h\times w,d)}$ where $n$ denotes the number of attention heads, $d$ is the feature dimension. Similarly $\mathcal{W}_{\mathrm{p}}$ is projected as keys and values $K_{\mathrm{p}},V_{\mathrm{p}}\in R^{(n,l,d)}$ by linear layers $f_{K}(\cdot),f_{V}(\cdot)$ . The attention maps refer to the product between queries and keys, denoted as a function $f_{M}(\cdot)$
|
| 48 |
+
|
| 49 |
+
$$
|
| 50 |
+
M ^ {t} = f _ {M} \left(Q ^ {t}, K _ {p}\right) = \operatorname {S o f t m a x} \left(\frac {Q ^ {t} K _ {\mathrm {p}} ^ {T}}{\sqrt {d}}\right), M ^ {t} \in R ^ {(n, h \times w, l)}. \tag {1}
|
| 51 |
+
$$
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
Figure 3: An illustration of our cross-attention design with structured representations. We un flatten the query and attention maps and omit the feature dimension $d$ of all query, key, and value tensors for demonstration purposes. Note that noun phrases at multiple hierarchies are extracted and encoded through the frozen CLIP text encoder and projected to value vectors.
|
| 55 |
+
|
| 56 |
+
Cross Attention Controls Hertz et al. (2022) observes that the spatial layouts depend on the cross attention maps in Imagen Sahara et al. (2022). These maps control the layout and structure of generated images, while the values contain rich semantics mapped into attended regions. Therefore, we assume that the image layout and content can be disentangled by controlling attention maps and values separately.
|
| 57 |
+
|
| 58 |
+
# 2.2 STRUCTURED DIFFUSION GUIDANCE
|
| 59 |
+
|
| 60 |
+
Given the challenging prompts in Fig. 1, the attribute-object pairs are available for free<sup>1</sup> in many structured representations, such as a constituency tree or a scene graph. We seek an implicit way of combining language structures with the cross-attention layers. As is shown in Fig. 3, we can extract multiple noun phrases (NPs) and map their semantics into corresponding regions. Since $M_t$ provides natural token-region associations (see Fig. 2), we can apply it to multiple values from different NPs to achieve region-wise semantic guidance.
|
| 61 |
+
|
| 62 |
+
Specifically, given a parser $\xi(\cdot)$ , we first extract a collection of concepts from all hierarchical levels as $\mathcal{C} = \{c_1, c_2, \ldots, c_k\}$ . For constituency parsing, we extract all NPs from the tree structure (see Fig.3 left). For the scene graphs, we extract objects and their relations with another object as text segments. We encode each NP separately:
|
| 63 |
+
|
| 64 |
+
$$
|
| 65 |
+
\mathbb {W} = \left[ \mathcal {W} _ {\mathrm {p}}, \mathcal {W} _ {1}, \mathcal {W} _ {2}, \dots , \mathcal {W} _ {k} \right], \mathcal {W} _ {i} = \operatorname {C L I P} _ {\text {t e x t}} \left(c _ {i}\right), i = 1, \dots k. \tag {2}
|
| 66 |
+
$$
|
| 67 |
+
|
| 68 |
+
The embedding sequence $\mathcal{W}_i$ is realigned with $\mathcal{W}_p$ as shown in the middle of Fig. 3. Embeddings between $\langle \mathrm{bos}\rangle$ and $\langle \mathrm{pad}\rangle$ are inserted into $\mathcal{W}_p$ to create a new sequence, denoted as $\overline{\mathcal{W}}_i$ . We use $\overline{\mathcal{W}}_p$ to obtain $K_{\mathrm{p}}$ and $M^t$ as in Eq. 1, assuming that the full-prompt key is able to generate layouts without missing objects. We obtain a set of values from $\mathbb{W}$ and multiply each with $M^t$ to achieve a conjunction of $k$ NPs in $\mathcal{C}$ :
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\mathbb {V} = \left[ f _ {V} \left(\mathcal {W} _ {\mathrm {p}}\right), f _ {V} \left(\overline {{\mathcal {W}}} _ {1}\right), \dots , f _ {V} \left(\overline {{\mathcal {W}}} _ {k}\right) \right] = \left[ V _ {\mathrm {p}}, V _ {1}, \dots , V _ {k} \right]. \tag {3}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
O ^ {t} = \frac {1}{(k + 1)} \sum_ {i} \left(M ^ {t} V _ {i}\right), i = \mathrm {p}, 1, 2, \dots , k. \tag {4}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
Compared to using $f_{V}(\mathcal{W}_{p})$ only, Eq. 4 does not modify the image layout or composition since $M^{t}$ is still calculated from $Q^{t}, K_{p}$ . Empirically, we justify the claim by a series of visualizations of $M_{t}$
|
| 79 |
+
|
| 80 |
+
Algorithm 1 StructureDiffusion Guidance.
|
| 81 |
+
Require: Input: Prompt $\mathcal{P}$ , Parse $\xi$ , decoder $\psi$ , trained diffusion model $\phi$ Output: Generated image $x$ 1: Retrieve concept set $\mathcal{C} = [c_1,\dots ,c_k]$ by traversing $\xi (\mathcal{P})$ .
|
| 82 |
+
2: $\mathcal{W}_{\mathrm{p}}\gets \mathrm{CLIP}_{\mathrm{text}}(\mathcal{P}),\mathcal{W}_{i}\gets \mathrm{CLIP}_{\mathrm{text}}(c_{i});$ $i = 1,\ldots ,k$
|
| 83 |
+
3: for $t = T,T - 1,\dots ,1$ do
|
| 84 |
+
4: for each cross attention layer in $\phi$ do
|
| 85 |
+
5: Obtain previous layer's output $\mathcal{X}^t$
|
| 86 |
+
6: $Q^{t}\gets f_{Q}(\mathcal{X}^{t}),K_{\mathrm{p}}\gets f_{K}(\mathcal{W}_{\mathrm{p}}),V_{i}\gets f_{V}(\overline{\mathcal{W}}_{i})$ $i = \mathrm{p},1,\dots ,k$
|
| 87 |
+
7: Obtain attention maps $M^t$ from $Q^t,K_{\mathrm{p}}$ {Eq. 1}
|
| 88 |
+
8: Obtain $O^t$ from $M^t$ $\{V_i\}$ , and feed to following layers; {Eq. 4}
|
| 89 |
+
9: end for
|
| 90 |
+
10: end for
|
| 91 |
+
11: Feed $z^0$ to decoder $\psi (\cdot)$ to generate x.
|
| 92 |
+
|
| 93 |
+
(see Appendix C). However, Stable Diffusion tends to omit objects in generated images (Fig. 1), especially for concept conjunctions that connect two objects with the word "and". We devise a variant of our method that computes a set of attention maps $\mathbb{M} = \{M_p^t,M_1^t,\ldots \}$ from $\mathcal{C}$ and multiply them to $\mathbb{V}$ :
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathbb {K} = \left\{f _ {K} \left(\mathcal {W} _ {i}\right) \right\}, \mathbb {M} ^ {t} = \left\{f _ {M} \left(Q ^ {t}, K _ {i}\right) \right\}, i = p, 1, 2, \dots , k. \tag {5}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
O ^ {t} = \frac {1}{(k + 1)} \sum_ {i} \left(M _ {i} ^ {t} V _ {k}\right), i = p, 1, 2, \dots , k. \tag {6}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
$O^t$ is the output of a certain cross-attention layer and the input into downstream layers to generate final image $x$ . Our algorithm can be summarized as 1, which requires no training or additional data.
|
| 104 |
+
|
| 105 |
+
# 3 EXPERIMENT
|
| 106 |
+
|
| 107 |
+
# 3.1 EXPERIMENT SETTINGS
|
| 108 |
+
|
| 109 |
+
Datasets To address attribute binding and compositional generation, we propose a new benchmark, Attribute Binding Contrast set (ABC-6K). It consists of natural prompts from MSCOCO where each contains at least two color words modifying different objects. We also switch the position of two color words to create a contrast caption (Gardner et al., 2020). We end up with 6.4K captions or 3.2K contrastive pairs. In addition to natural compositional prompts, we challenge our method with less detailed prompts that conjunct two concepts together. These prompts follow the sentence pattern of "a red apple and a yellow banana" and conjunct two objects with their attribute descriptions. We refer to this set of prompts as Concept Conjunction 500 (CC-500). We also evaluate our method on 10K randomly sampled captions from MSCOCO (Lin et al., 2014). We show that our method generalizes beyond attribute binding and introduces no quality degradation for general prompts.
|
| 110 |
+
|
| 111 |
+
Evaluation Metrics We mainly rely on human evaluations for compositional prompts and concept conjunction (ABC-6K & CC-500). We ask annotators to compare two generated images, from Stable Diffusion and our method respectively, and indicate which image demonstrates better image-text alignment or image fidelity. For image fidelity, we ask the annotators "Regardless of the text, which image is more realistic and natural?" We also investigate an automatic evaluation metric for image compositions, i.e., using a SOTA phrase grounding model GLIP (Li et al., 2022) to match phrase-object pairs. As for system-level evaluation, we follow previous work to utilize Inception Score (IS) (Salimans et al., 2016), Frechet Inception Distance (FID) (Heusel et al., 2017) and CLIP R-precision (R-prec.) (Park et al., 2021). IS and FID mainly measure the image bank's systematic quality and diversity, while R-prec measures image-level alignment.
|
| 112 |
+
|
| 113 |
+
# 3.2 COMPOSITIONAL PROMPTS
|
| 114 |
+
|
| 115 |
+
Here we show the quantitative and qualitative evaluation results on ABC-6K. We observe that our method sometimes generates very similar images to Stable Diffusion. Hence, we first generate
|
| 116 |
+
|
| 117 |
+
<table><tr><td rowspan="2">Benchmark</td><td rowspan="2">StructureDiffusion (ours) v.s.</td><td colspan="3">Alignment</td><td colspan="3">Fidelity</td></tr><tr><td>Win (↑)</td><td>Lose (↓)</td><td>Tie</td><td>Win (↑)</td><td>Lose (↓)</td><td>Tie</td></tr><tr><td>ABC-6K</td><td>Stable Diffusion</td><td>42.2</td><td>35.6</td><td>22.2</td><td>48.3</td><td>39.1</td><td>12.6</td></tr><tr><td rowspan="2">CC-500</td><td>Stable Diffusion</td><td>31.8</td><td>27.7</td><td>38.9</td><td>37.8</td><td>30.6</td><td>31.6</td></tr><tr><td>Composable Diffusion</td><td>46.5</td><td>30.1</td><td>22.8</td><td>61.4</td><td>19.8</td><td>18.8</td></tr></table>
|
| 118 |
+
|
| 119 |
+
Table 1: Percentage of generated images of StructureDiffusion that are better than (win), tied with, or worse than (lose) the compared model in terms of text-image alignment and image fidelity. We filtered out $20\%$ most similar image pairs for comparison (See Sec. E). Composable Diffusion cannot be applied to ABC-6K as those prompts may not contain explicit "and" words that separate concepts.
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
Figure 4: Qualitative results on ABC-6K. Our method improves both object-level and scene-level compositionality.
|
| 123 |
+
|
| 124 |
+
two images per prompt for our method and Stable Diffusion, involving around 12K image pairs to compare. Then, we filter out $20\%$ of the most similar pairs and then randomly sampled 1500 pairs for human evaluations. As shown in Table 1, annotators indicate around a $42\%$ chance of our method winning the comparison, $7\%$ higher than losing the comparison. There is still a $22\%$ of chance that our images are tied with images from Stable Diffusion.
|
| 125 |
+
|
| 126 |
+
We show qualitative examples characterizing three different perspectives in Fig. 4. Our method fills in the correct color for different parts of an object or different objects, as shown in the first two examples. The third example demonstrates that our method can mitigate the issue of "missing objects". Among the $42\%$ winning cases, there are $31\%$ for "fewer missing objects", $14.1\%$ for "better-matched colors", and $54.8\%$ for "other attributes or details" as indicated by annotators. The results certify that the improvement goes beyond colors to component completeness and fine-grained details. More qualitative examples characterizing all three aspects can be found in Fig. 14 in the Appendix.
|
| 127 |
+
|
| 128 |
+
# 3.3 CONCEPT CONJUNCTION
|
| 129 |
+
|
| 130 |
+
Here we address challenging concept conjunction prompts and evaluate our method on CC-500. Apart from Stable Diffusion, we also compare to Composable Diffusion (Liu et al., 2022) implemented on top of Stable Diffusion. For Composable Diffusion, we separate the prompts into text segments by the keyword "and" and feed each span into an independent diffusion process. We generate three images per prompt and use all images for human evaluation for Stable Diffusion. We randomly sampled 600 images for comparison to Composable Diffusion.
|
| 131 |
+
|
| 132 |
+
<table><tr><td rowspan="3">Methods</td><td colspan="6">CC-500 (Prompt format: “a [colorA] [objectA] and a [colorB] [objectB]”)</td></tr><tr><td colspan="3">Human Annotations</td><td colspan="2">GLIP</td><td rowspan="2">Human-GLIP Consistency</td></tr><tr><td>Zero/One obj. (↓)</td><td>Two obj.</td><td>Two obj. w/ correct colors</td><td>Zero/One obj. (↓)</td><td>Two obj.</td></tr><tr><td>Stable Diffusion</td><td>65.5</td><td>34.5</td><td>19.2</td><td>69.0</td><td>31.0</td><td>46.4</td></tr><tr><td>Composable Diffusion</td><td>69.7</td><td>30.3</td><td>20.6</td><td>74.2</td><td>25.8</td><td>48.9</td></tr><tr><td>StructureDiffusion (Ours)</td><td>62.0</td><td>38.0</td><td>22.7</td><td>68.8</td><td>31.2</td><td>47.6</td></tr></table>
|
| 133 |
+
|
| 134 |
+
Table 2: Fine-grained human and automatic evaluation results on CC-500. Recall that each prompt is a conjunction of two different objects with different colors. "Zero/One obj." means that the model fails to generate all desired objects in the image. "Human-GLIP consistency" reflects the percentage of images where human annotations align with GLIP detection results.
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
Figure 5: Qualitative results on CC-500 prompts that emphasize two aspects. (a) Color leakage: our method prevents the green color from invading the bird or apple. (b) Missing objects: our method completes the "blue bowl" and improves the quality of the "blue apple".
|
| 138 |
+
|
| 139 |
+
As shown in Table 1, our method outperforms Stable Diffusion by around $4.1\%$ and Composable Diffusion by $16.4\%$ in terms of image-text alignment. We also observe that our method enhances some fine-grained details in the generated images, leading to a $7.2\%$ improvement in image fidelity when compared with Stable Diffusion. We observe that images from composable diffusion can be oversaturated with unnatural visual textures and layouts, which could be the reason for StructureDiffusion to have high win rate in image fidelity. As shown in Fig. 5 and Fig. 13. Our approach prevents color bleeding (left), missing objects (right) and strengthens details (right).
|
| 140 |
+
|
| 141 |
+
To further quantify the text-image alignment, we consider both human annotations and automatic evaluations. For each object mentioned in the prompt, we ask annotators whether the object exists in the image and whether it is in the correct color. We also apply a state-of-the-art detection model GLIP (Li et al., 2022) to ground each “a [color] [object]” phrase into bounding boxes. We report the percentage of images that contain incomplete objects / complete objects / complete objects with correct colors in Table 2. StructureDiffusion improves the compositionality by $3.5\%$ based on human annotations while only $0.2\%$ based on GLIP. We discover that humans disagree with GLIP for more than $50\%$ of the images, as entailed by the low consistency rate. Previous work also suggests the deficiency of large pre-trained models in compositional understanding (Thrush et al., 2022).
|
| 142 |
+
|
| 143 |
+
# 3.4 OTHER PROMPTS
|
| 144 |
+
|
| 145 |
+
We show that our StructureDiffusion maintain the overall image quality and diversity on general prompts. We follow the standard evaluation process and generate 10,000 images from randomly
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
Figure 6: Qualitative results of using scene graph parser to generate structured representations.
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Figure 7: Ablation study on the text sequence embeddings. We find that the padding embeddings are fully contextualized, representing the prompt's high-level semantics. However, not all padding tokens are necessary to maintain a high-fidelity output from Stable Diffusion.
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
|
| 163 |
+
sampled MSCOCO captions. Stable Diffusion obtains 39.9 IS, 18.0 FID and 72.2 R-Precision. Our method achieves 40.9 IS, 17.9 FID and 72.3 R-Precision. StructureDiffusion maintains the image fidelity and diversity as indicated in the comparable IS/FID/R-Prec scores.
|
| 164 |
+
|
| 165 |
+
# 3.5 SCENE GRAPH INPUT
|
| 166 |
+
|
| 167 |
+
We show that our method is not limited to constituency parsing but can also be extended to other structured representations, such as scene graphs. As shown in Fig. 6, we first adopt the scene graph parser (Wu et al., 2019) and obtain a graph like the ones next to each image from the input prompt. The parser returns basic entities and their relations in between. We extract text spans of basic entities with their attributes attached and text spans that include two related entities. We provide examples in Appendix 3 and make comparison to the constituency parser. Similarly, we encode these spans separately and re-align each with the entire prompt encoding sequence. On MS-COCO, the scene graph parser setting maintains the image quality with 39.2 IS, 17.9 FID, and 72.0 R-Precision. When compared to Stable Diffusion on ABC-6K, the scene graph parser achieves $34.2\% -32.9\% -32.9\%$ Win-Lose-Tie in image-text alignment and $34.5\% -32.5\% -33.0\%$ Win-Lose-Tie in image fidelity. As for CC-500, the scene graph parser leads to the same output images due to the same text spans. We refer to Table 3 and Fig. 12 for more results and comparison.
|
| 168 |
+
|
| 169 |
+
# 4 ABLATION STUDY
|
| 170 |
+
|
| 171 |
+
# 4.1 RE-ALIGNING SEQUENCE
|
| 172 |
+
|
| 173 |
+
In Section 2, we describe a method to realign the encoding of a text span back into the sequence of the full prompt. Since the noun-phrase text spans are shorter than the full sequence, re-alignment ensures that each token's value vector corresponds to the correct attention map. On the other hand, naively expanding the span to the length of the full sequence degrades the image quality by $\sim 2$ IS / FID (37.5 IS, 19.8 FID) compared to images with re-alignment or Stable Diffusion.
|
| 174 |
+
|
| 175 |
+
# 4.2 CONTEXTUALIZED TEXT EMBEDDINGS
|
| 176 |
+
|
| 177 |
+
One limitation brought by our StructureDiffusion is that the cross-attention computation costs increase by the number of noun phrases. Yet we noticed that most of the attention maps are computed from padding embeddings, as Stable Diffusion adopts CLIP text encoders and automatically pads the sequence to 77 tokens. We conjecture that not all padding tokens are necessary for generating high-quality images. As is shown in Fig. 7, we study four different patterns of token embeddings. We discover that leaving the nearest padding embeddings maintains a similar IS / FID score as the full sequence. Further removing this padding embedding results in apparent degradation. While only using the nearest padding embedding results in the worst image quality, we find that the high-level image layout and semantics are preserved (see bottom right of Fig. 7). This phenomenon indicates that the padding embeddings are fully contextualized with the full prompt semantics. This also justifies our re-alignment operation that preserves padding embeddings of the main sequence $\mathcal{W}_{\mathrm{full}}$ .
|
| 178 |
+
|
| 179 |
+
# 5 RELATED WORK
|
| 180 |
+
|
| 181 |
+
Text-to-Image Synthesis The diffusion model is an emerging type of model that generate high-quality images with a much more stable training process (Song & Ermon, 2019; Ho et al., 2020). Rombach et al. (2022) proposes to encode an image with an autoencoder and then leverage a diffusion model to generate continuous feature maps in the latent space. Stable Diffusion Rombach et al. (2022) adopts similar architecture but is trained on large-scale image-text datasets with fixed CLIP text encoder. Imagen (Saharia et al., 2022) addresses the importance of language understanding by using a frozen T5 encoder (Raffel et al., 2020), a dedicated large language model. We mainly focus on diffusion models and conduct our experiments on Stable Diffusion (Rombach et al., 2022), the SOTA open-sourced T2I model.
|
| 182 |
+
|
| 183 |
+
Compositional Generation The compositional or controllable generation has been an essential direction for T2I models to understand and disentangle basic concepts in the generation process. As text inputs are relatively weak conditions, previous work leverage layout or scene graph to enhance compositionality (Johnson et al., 2018; Hong et al., 2018; Yang et al., 2022; Gafni et al., 2022). More recently, Liu et al. (2022) proposes an approach where the concept conjunctions are achieved by adding estimated scores from a parallel set of diffusion processes. In contrast, our method can be directly merged into the cross-attention layers with much less computational overhead.
|
| 184 |
+
|
| 185 |
+
Diffusion Guidance Ho & Salimans (2022) develops classifier-free guidance where a single diffusion model is jointly trained under conditional and unconditional inputs. Most large-scale SOTA models, including autoregressive ones, adopt this technique for flexible and improved conditional synthesis results (Rombach et al., 2022; Ramesh et al., 2022; Gafni et al., 2022; Yu et al., 2022; Saharia et al., 2022). Hertz et al. (2022) discovers unique properties of cross attention maps on Imagen (Saharia et al., 2022) and achieves structure-preserving image editing by manipulating these maps. We observe similar properties in Stable Diffusion (Rombach et al., 2022) but propose a different algorithm for fine-grained, compositional text-to-image generation.
|
| 186 |
+
|
| 187 |
+
# 6 CONCLUSION
|
| 188 |
+
|
| 189 |
+
In this work, we propose a training-free method for compositional text-to-image generation. First, we observe that existing large-scale T2I diffusion models can still struggle in compositional image synthesis. We address this challenge by explicitly focusing on binding objects with the correct attributes. Second, we propose structured diffusion guidance incorporating language structures into the cross-attention layers. We propose two simple techniques to align the structured encoding with the attention maps. Using our structured guidance on Stable Diffusion, attributes can be bound more accurately while maintaining the overall image quality and diversity. In addition, we justify our approach by conducting an in-depth analysis of the frozen language encoder and attention maps. Future work may explore explicit approaches to generate plausible image layouts without missing components. We hope that our approach accelerates the development of interpretable and efficient methods for diffusion-based text-to-image models.
|
| 190 |
+
|
| 191 |
+
# ACKNOWLEDGEMENT
|
| 192 |
+
|
| 193 |
+
We would like to thank the Robert N. Noyce Trust for their generous gift to the University of California via the Noyce Initiative. The work was also partially funded by an unrestricted gift from Google and by the National Science Foundation award #2048122. The writers' opinions and conclusions in this publication are their own and should not be construed as representing the sponsors' official policy, expressed or inferred.
|
| 194 |
+
|
| 195 |
+
# REPRODUCIBILITY STATEMENT
|
| 196 |
+
|
| 197 |
+
We release our core codebase containing the methodology implementation, settings, benchmarks containing compositional prompts under supplementary materials.
|
| 198 |
+
|
| 199 |
+
# ETHICAL STATEMENT
|
| 200 |
+
|
| 201 |
+
As for the data collection and verification, we use the Amazon Mechanical Turk platform and form the comparison task as batches of HITs. We select workers from English-speaking countries, including the US, CA, UK, AU, and NZ, since the task require understanding the English input prompt. Each HIT takes around 15-30 seconds on average to accomplish, and we pay each submitted HIT with 0.15 US dollars, resulting in an hourly payment of 18 US dollars.
|
| 202 |
+
|
| 203 |
+
# REFERENCES
|
| 204 |
+
|
| 205 |
+
Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021.
|
| 206 |
+
Ming Ding, Wendi Zheng, Wenyi Hong, and Jie Tang. Cogview2: Faster and better text-to-image generation via hierarchical transformers. arXiv preprint arXiv:2204.14217, 2022.
|
| 207 |
+
Alaaeldin El-Nouby, Shikhar Sharma, Hannes Schulz, Devon Hjelm, Layla El Asri, Samira Ebrahimi Kahou, Yoshua Bengio, and Graham W.Taylor. Tell, Draw, and Repeat: Generating and Modifying Images Based on Continual Linguistic Instruction. In ICCV, 2019.
|
| 208 |
+
Tsu-Jui Fu, Xin Eric Wang, Scott Grafton, Miguel Eckstein, and William Yang Wang. SSCR: Iterative Language-Based Image Editing via Self-Supervised Counterfactual Reasoning. In EMNLP, 2020.
|
| 209 |
+
Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman. Make-a-scene: Scene-based text-to-image generation with human priors. arXiv preprint arXiv:2203.13131, 2022.
|
| 210 |
+
Matt Gardner, Yoav Artzi, Victoria Basmov, Jonathan Berant, Ben Bogin, Sihao Chen, Pradeep Dasigi, Dheeru Dua, Yanai Elazar, Ananth Gottumukkala, et al. Evaluating models' local decision boundaries via contrast sets. Findings of Empirical Methods in Natural Language Processing, 2020.
|
| 211 |
+
Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10696-10706, 2022a.
|
| 212 |
+
Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10696-10706, 2022b.
|
| 213 |
+
Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022.
|
| 214 |
+
Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.
|
| 215 |
+
|
| 216 |
+
Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022.
|
| 217 |
+
Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems, 33:6840-6851, 2020.
|
| 218 |
+
Seunghoon Hong, Dingdong Yang, Jongwook Choi, and Honglak Lee. Inferring semantic layout for hierarchical text-to-image synthesis. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 7986-7994, 2018.
|
| 219 |
+
Justin Johnson, Agrim Gupta, and Li Fei-Fei. Image generation from scene graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1219-1228, 2018.
|
| 220 |
+
Doyup Lee, Chiheon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11523-11532, 2022.
|
| 221 |
+
Bowen Li, Xiaojuan Qi, Thomas Lukasiewicz, and Philip Torr. Controllable text-to-image generation. Advances in Neural Information Processing Systems, 32, 2019.
|
| 222 |
+
Lianian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10965-10975, 2022.
|
| 223 |
+
Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pp. 740-755. Springer, 2014.
|
| 224 |
+
Luping Liu, Yi Ren, Zhijie Lin, and Zhou Zhao. Pseudo numerical methods for diffusion models on manifolds. In International Conference on Learning Representations, 2021a.
|
| 225 |
+
Nan Liu, Shuang Li, Yilun Du, Antonio Torralba, and Joshua B Tenenbaum. Compositional visual generation with composable diffusion models. arXiv preprint arXiv:2206.01714, 2022.
|
| 226 |
+
Xihui Liu, Dong Huk Park, Samaneh Azadi, Gong Zhang, Arman Chopikyan, Yuxiao Hu, Humphrey Shi, Anna Rohrbach, and Trevor Darrell. More control for free! image synthesis with semantic diffusion guidance. arXiv preprint arXiv:2112.05744, 2021b.
|
| 227 |
+
Chao Lou, Wenjuan Han, Yuhuan Lin, and Zilong Zheng. Unsupervised vision-language parsing: Seamlessly bridging visual scene graphs with language structures via dependency relationships. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 15607-15616, June 2022.
|
| 228 |
+
Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. ICML, 2021.
|
| 229 |
+
Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021.
|
| 230 |
+
Peng Qi, Yuhao Zhang, Yuhui Zhang, Jason Bolton, and Christopher D. Manning. Stanza: A Python natural language processing toolkit for many human languages. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, 2020.
|
| 231 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021.
|
| 232 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67, 2020. URL http://jmlr.org/papers/v21/20-074.html.
|
| 233 |
+
|
| 234 |
+
Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pp. 8821-8831. PMLR, 2021.
|
| 235 |
+
Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022.
|
| 236 |
+
Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684-10695, 2022.
|
| 237 |
+
Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pp. 234-241. Springer, 2015.
|
| 238 |
+
Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. arXiv preprint arXiv:2208.12242, 2022.
|
| 239 |
+
Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022.
|
| 240 |
+
Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016.
|
| 241 |
+
Sebastian Schuster, Ranjay Krishna, Angel Chang, Li Fei-Fei, and Christopher D Manning. Generating semantically precise scene graphs from textual descriptions for improved image retrieval. In Proceedings of the fourth workshop on vision and language, pp. 70-80, 2015.
|
| 242 |
+
Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in Neural Information Processing Systems, 32, 2019.
|
| 243 |
+
Ming Tao, Hao Tang, Fei Wu, Xiao-Yuan Jing, Bing-Kun Bao, and Changsheng Xu. Df-gan: A simple and effective baseline for text-to-image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16515-16525, 2022.
|
| 244 |
+
Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross. Winoground: Probing vision and language models for visio-linguistic compositionality. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5238-5248, 2022.
|
| 245 |
+
Bo Wan, Wenjuan Han, Zilong Zheng, and Tinne Tuytelaars. Unsupervised vision-language grammar induction with shared structure modeling. In International Conference on Learning Representations, 2021.
|
| 246 |
+
Hao Wu, Jiayuan Mao, Yufeng Zhang, Yuning Jiang, Lei Li, Weiwei Sun, and Wei-Ying Ma. Unified visual-semantic embeddings: Bridging vision and language with structured meaning representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6609–6618, 2019.
|
| 247 |
+
Zuopeng Yang, Daqing Liu, Chaoyue Wang, Jie Yang, and Dacheng Tao. Modeling image composition for complex scene generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7764-7773, 2022.
|
| 248 |
+
Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022.
|
| 249 |
+
Han Zhang, Jing Yu Koh, Jason Baldridge, Honglak Lee, and Yinfei Yang. Cross-modal contrastive learning for text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 833-842, 2021.
|
| 250 |
+
|
| 251 |
+
Yiwu Zhong, Liwei Wang, Jianshu Chen, Dong Yu, and Yin Li. Comprehensive image captioning via scene graph decomposition. In European Conference on Computer Vision, pp. 211-229. Springer, 2020.
|
| 252 |
+
Yufan Zhou, Ruiyi Zhang, Changyou Chen, Chunyuan Li, Chris Tensmeyer, Tong Yu, Jiumiang Gu, Jinhui Xu, and Tong Sun. Towards language-free training for text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 17907-17917, 2022.
|
| 253 |
+
Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. Dm-gan: Dynamic memory generative adversarial networks for text-to-image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 5802-5810, 2019.
|
| 254 |
+
|
| 255 |
+
# A RELATED WORK
|
| 256 |
+
|
| 257 |
+
Text-to-Image Synthesis There are mainly three types of models for text-to-image synthesis: GAN-based (Tao et al., 2022; Zhu et al., 2019; Li et al., 2019; Fu et al., 2020; El-Nouby et al., 2019), autoregressive (Gu et al., 2022b; Lee et al., 2022; Ding et al., 2022) and diffusion models (Liu et al., 2021b; Nichol et al., 2021; Ruiz et al., 2022). Zhang et al. (2021) proposes XMC-GAN, a one-stage GAN that employs multiple contrastive losses between image-image, image-text, and region-token pairs. More recently, LAFITE (Zhou et al., 2022) enables language-free training by constructing pseudo image-text feature pairs using CLIP (Radford et al., 2021). As for autoregressive models, DALL-E adopts VQ-VAE to quantize image patches into tokens and then uses a transformer to generate discrete tokens sequentially (Ramesh et al., 2021). Parti (Yu et al., 2022) and Make-A-Scene (Gafni et al., 2022) both leverage classifier-free guidance to improve controllability. As for diffusion models, Gu et al. (2022a) concatenates VQ-VAE with the diffusion model and shows that the diffusion process can operate in discrete latent space. DALL-E 2 adopts the CLIP text encoder so that the diffusion process inverts the textual features into images (Ramesh et al., 2022).
|
| 258 |
+
|
| 259 |
+
Structured Representations for Vision and Language Inferring shared structures across language and vision has been a long-term pursuit in unifying these modalities (Schuster et al., 2015; Johnson et al., 2018; Zhong et al., 2020; Lou et al., 2022). Wu et al. (2019) utilizes the structure from semantic parsing in a visual-semantic embedding framework to facilitate embedding learning. Wan et al. (2021) proposes a new task in which the goal is to learn a joint structure between semantic parsing and image regions. To the best of our knowledge, our work is the first attempt in T2I to incorporate language structures into the image synthesizing process.
|
| 260 |
+
|
| 261 |
+
Diffusion Guidance To convert an unconditional diffusion model into a class-conditional one, Dhariwal & Nichol (2021) input the noisy image from each step into a classifier and calculate the classification loss. The loss can be back-propagated to the image space to provide a gradient that marginalizes the score estimation from the log of conditional probability. Similarly, in the T2I subdomain, Liu et al. (2021b) and Nichol et al. (2021) apply a noisy CLIP model to measure the cosine similarity between text prompts and noisy images.
|
| 262 |
+
|
| 263 |
+
# B IMPLEMENTATION DETAILS
|
| 264 |
+
|
| 265 |
+
Throughout the experiments, we implement our method upon Stable Diffusion v1.4. For all comparisons between our method and Stable Diffusion, we fix the seed to generate the same initial Gaussian map and use 50 diffusion steps with PLMS sampling (Liu et al., 2021a). We fix the guidance scale to 7.5 and equally weight the key-value matrices in cross-attention layers if not otherwise specified. We do not add hand-crafted prompts such as "a photo of" to the text input. We use the Stanza Library (Qi et al., 2020) for constituency parsing and obtain noun phrases if not otherwise specified.
|
| 266 |
+
|
| 267 |
+
# C VISUALIZATION OF ATTENTION MAPS
|
| 268 |
+
|
| 269 |
+
In this section, we demonstrate the visualization of cross-attention maps to support our assumptions and claims in Sec. 2. As is shown in Fig. 8, the attention maps of Stable Diffusion and our method
|
| 270 |
+
|
| 271 |
+

|
| 272 |
+
Figure 8: Visualization of cross attention maps of Stable Diffusion and our method. We compare maps of multiple tokens throughout the whole diffusion process with equal intervals.
|
| 273 |
+
|
| 274 |
+
have similar spatial distribution and highlights throughout the diffusion process. This phenomenon supports our assumption in Sec. 2.2 that the attention map $M_{t}$ is unchanged even with multiple values in each cross-attention layer. We can observe a similar phenomenon in Fig. 9 except that our method accelerates the formation of interpretable attentions for both "green" and "clock" tokens.
|
| 275 |
+
|
| 276 |
+
Fig. 8, 9 also justify our claim that values represent rich textual semantics mapped to the image space as contents. For instance, our method parses the prompt in Fig. 8 into "A long narrow yellow kitchen" and "black and white floor tiles", encodes and aligns them separately to form $\mathbb{V}$ . Empirically, these operations enhance the semantics of "yellow" and "black and white" separately and mitigate "yellow" being blended into "black and white". This explains the disappearance of color leakage in our image compared to Stable Diffusion. Though one may attribute the leakage to incorrect attention distribution of the "yellow" token, we argue that this is not the critical reason. Despite the attention maps of "yellow" from our method slightly highlighting the "floor tile" regions, we cannot observe any yellow in our generated image. This proves that inaccurate attention distributions contribute little to the final image content. In addition, we also show in Fig. 10 that using multiple Keys is able to rectify the image layouts to mitigate missing object issues. The sheep-like attention maps in the third row verify the proposed variants of our method for concept conjunctions.
|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
Figure 9: Visualization of cross attention maps corresponding to token "green" and "clock" across the full diffusion timestamps from step 50 to step 1 in equal intervals. Red boxes highlight steps where our method accelerates the formation of correct attention on the clock region. The evolution of the token "green" is also more interpretable in our method. Although the image composition is imperfect, the visualization still supports our assumptions and claims in Sec. 2.2.
|
| 280 |
+
|
| 281 |
+

|
| 282 |
+
Figure 10: Visualization of attention maps for token "sheep" of different methods. Our method with multiple Keys successfully rectify image layouts.
|
| 283 |
+
|
| 284 |
+
# D ABLATION STUDY
|
| 285 |
+
|
| 286 |
+
# D.1 A CASE STUDY OF ATTRIBUTE BINDING
|
| 287 |
+
|
| 288 |
+
Here, we present a case study to show evidence of two root causes of incorrect attribute binding. The first one is the contextualized token embeddings due to causal attention masks. As is shown on the left side of Fig. 11, we first encode two different prompts with a shared component, e.g. "a red apple" as the naive one and "a green bag and a red apple". Using the encoding sequence of the naive prompt,
|
| 289 |
+
|
| 290 |
+

|
| 291 |
+
Figure 11: Examples showing the potential root causes of incorrect attribute binding. Left: The large green regions in the second image prove that the hidden state's output of token "red" is contextualized with token "green" before it. Right: Visualization of attention maps showing that the semantics from the token "bird" is mistakenly attended to the mouth region of the bear. The final image shows the unnatural beak-like shape of the bear.
|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
|
| 295 |
+
we are able to get an image of red apple only. It is reasonable to assume that the yellow green regions are natural results of learning from authentic apple images. Then, we replace the tokens of the naive prompt with embeddings of the same token from the more complicated prompt. We use the same gaussian noise as initialization and generate an unnatural image with a solid green region (in the yellow bounding box). This result proves that the token "red" is contaminated with the semantics of "green" before it and explains some images with color leakage problems (e.g., Fig. 1).
|
| 296 |
+
|
| 297 |
+
The second reason attributes to inaccurate attention maps. In Fig. 11 (right), we visualize five cross-attention maps (averaged across attention heads) from both downsample and upsampling blocks. The attention maps show the salient regions corresponding to the token "bird". These maps demonstrate highlighted regions in the bottom left corner where the bird is located in the final image. Despite the interpretable structures, the maps also show saliency around the mouth region of the bear across all five layers. Thus, the inaccurate attention maps lead to a beak-like mouth of the bear in the image.
|
| 298 |
+
|
| 299 |
+
# D.2 COMPARISON OF PARSERS
|
| 300 |
+
|
| 301 |
+
In this subsection, we compare the difference between using a constituency parser and a scene graph parser to obtain text spans and generate images. Table 3 compares the extracted text spans using constituency parser and scene graph parser. Example 0 shows that both parsers end up with the same results for CC-500 prompts. For Example 1-4, the scene graph parser generates more spans than the constituency parser. We notice that concepts in the middle of the sentence appear more often in these spans than other noun tokens, like "egg" or "red sauce" in Example 3. This imbalance potentially explains why the "egg" looks more highlighted in Fig. 12 (bottom left). On the other hand, "orange slices" appear more often in constituency parsing results, leading to better "orange" textures in the generated image. Similar observations can be made in Example 2, where "green pole" is emphasized more often by the constituency parser.
|
| 302 |
+
|
| 303 |
+
# E LIMITATIONS & FUTURE WORK
|
| 304 |
+
|
| 305 |
+
There are several limitations of our work. First of all, our method depends on an external parsing function that may not be perfect. We adopt the commonly used Stanza Library Qi et al. (2020) for constituency parsing. The parsing function can be replaced with a more advanced learning-based method for improvement. Secondly, our method mainly focuses on compositional T2I neglecting any
|
| 306 |
+
|
| 307 |
+
<table><tr><td></td><td>Constituency Parser</td><td>Scene Graph Parser</td></tr><tr><td rowspan="2">Example 0</td><td colspan="2">CC-500 Prompt: A white sheep and a red car</td></tr><tr><td>“A white sheep”, “a red car”</td><td>“A white sheep”, “a red car”</td></tr><tr><td rowspan="2">Example 1</td><td colspan="2">Prompt: A silver car with a black cat sleeping on top of it</td></tr><tr><td>“A silver car”, “a black cat”, “A silver car with a black cat”</td><td>“A silver car”, “a black cat”, “top of it”, “a black cat sleeping on top of it”</td></tr><tr><td rowspan="2">Example 2</td><td colspan="2">Prompt: A horse running in a white field next to a black and green pole</td></tr><tr><td>“A horse”, “a white field”, “a black and green pole”, “a white field next to a black and green pole”</td><td>“A horse”, “a white field”, “a black and green pole”, “A horse running in a white field”</td></tr><tr><td rowspan="2">Example 3</td><td colspan="2">Prompt: Rice with red sauce with eggs over the top and orange slices on the side</td></tr><tr><td>“red sauce”, “the side”, “the top and orange slices”, “the top and orange slices on the side”</td><td>“red sauce”, “the side”, “the top and orange slices”, “Rice with red sauce”, “red sauce with eggs”, “the top and orange slices on the side”, “red sauce with eggs over the top and orange slices”</td></tr><tr><td rowspan="2">Example 4</td><td colspan="2">Prompt: A pink scooter with a black seat next to a blue car</td></tr><tr><td>“A pink scooter”, “a black seat”, “a blue car”</td><td>“A pink scooter”, “a black seat”, “a blue car”, “a pink scooter with a black seat”, “a black seat next to a blue car”</td></tr></table>
|
| 308 |
+
|
| 309 |
+
Table 3: Comparison between the constituency parser and scene graph parser. For CC-500 prompts, both parsers end up with the same results. As for general prompts, scene graph parser tends to generate more text spans with middle concepts appearing multiple times across different spans.
|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
Figure 12: Synthesized images corresponding to prompts in Table 3. Yellow boxes annotate compositions that are improved using different parsers.
|
| 313 |
+
|
| 314 |
+
style descriptions. The parsing mechanism may categorize a style description, e.g. "in Van Gogh style" as a separate noun phrase that cannot be grounded in the image space. In addition, we discover that StructureDiffusion tends to generate similar images as Stable Diffusion. Thus we filtered out $20\%$ of most similar image pairs in Table 1, considering the efficiency of human evaluation. Therefore, the improvement could be compromised when evaluated on the full set of generated images. Future work may focus on devising explicit methods to associate attributes to objects using spatial information as input. For example, how to make a text-to-image synthesis model interpret coordinate information with limited fine-tuning or prompt tuning steps would be an appealing direction.
|
| 315 |
+
|
| 316 |
+
# F ADDITIONAL RESULTS
|
| 317 |
+
|
| 318 |
+
# Stable Diffusion Ours
|
| 319 |
+
|
| 320 |
+
A green apple and a red banana
|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
|
| 324 |
+
A gold clock and a green bench
|
| 325 |
+
|
| 326 |
+

|
| 327 |
+
|
| 328 |
+
A red cake and a blue suitcase
|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
|
| 332 |
+
A brown dog and a blue suitcase
|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
|
| 336 |
+
A yellow bowl and a blue cat
|
| 337 |
+
|
| 338 |
+

|
| 339 |
+
Figure 13: Qualitative results on CC-500
|
| 340 |
+
|
| 341 |
+
# Stable Diffusion Ours
|
| 342 |
+
|
| 343 |
+
A red bird and a green banana
|
| 344 |
+
|
| 345 |
+

|
| 346 |
+
|
| 347 |
+
A blue bird and a brown bowl
|
| 348 |
+
|
| 349 |
+

|
| 350 |
+
|
| 351 |
+
A blue backpack and a brown bear
|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
|
| 355 |
+
A blue backpack and a brown cow
|
| 356 |
+
|
| 357 |
+

|
| 358 |
+
|
| 359 |
+
A brown dog and a blue horse
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
|
| 363 |
+
# Stable Diffusion
|
| 364 |
+
|
| 365 |
+
a purple cat with a orange hat on its head
|
| 366 |
+
|
| 367 |
+

|
| 368 |
+
A red helmet is on a yellow toilet in the dirt
|
| 369 |
+
|
| 370 |
+

|
| 371 |
+
|
| 372 |
+

|
| 373 |
+
|
| 374 |
+

|
| 375 |
+
|
| 376 |
+

|
| 377 |
+
A bathroom with red tile and a green shower curtain
|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
A white toilet bowl with a purple rug in front
|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
A spoon and bowl of red pea soup and green beans with onions
|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
Figure 14: Qualitative results on ABC-6K
|
| 389 |
+
|
| 390 |
+
# Stable Diffusion
|
| 391 |
+
|
| 392 |
+
A red cat sits on a rug with a black cord
|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
A red stop sign above a white walk across road sign
|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
A spacious kitchen has white walls , red countertops , and a large stove
|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
A pink towel stands out greatly in the white bathroom
|
| 406 |
+
|
| 407 |
+

|
| 408 |
+
A cow standing outside of a white building with a blue entrance
|
| 409 |
+
|
| 410 |
+
# Stable Diffusion
|
| 411 |
+
|
| 412 |
+
# Ours
|
| 413 |
+
|
| 414 |
+
A yellow cat is wearing a blue plastic baseball hat.
|
| 415 |
+
|
| 416 |
+

|
| 417 |
+
|
| 418 |
+

|
| 419 |
+
Two elephants walking by a green wall with tan palm trees painted on it
|
| 420 |
+
|
| 421 |
+

|
| 422 |
+
A large white bed sitting in a hotel room next to a red couch
|
| 423 |
+
|
| 424 |
+

|
| 425 |
+
A large pizza on a white plate sitting on a blue table
|
| 426 |
+
|
| 427 |
+

|
| 428 |
+
A black and white curtain hanging in a room that is decorated in black, white and red
|
| 429 |
+
|
| 430 |
+

|
| 431 |
+
Stable Diffusion
|
| 432 |
+
Structure Diffusion
|
| 433 |
+
|
| 434 |
+

|
| 435 |
+
a skateboarder jumps over a silver dome with stone domes in front of it and bicyclist behind
|
| 436 |
+
|
| 437 |
+

|
| 438 |
+
a stone building with a wooden bench placed near it
|
| 439 |
+
|
| 440 |
+

|
| 441 |
+
a wooden building with a stone bench placed near it
|
| 442 |
+
|
| 443 |
+

|
| 444 |
+
a red sphere and a blue cube
|
| 445 |
+
Figure 15: Qualitative results characterizing attributes beyond colors, including shape, size and materials.
|
| 446 |
+
|
| 447 |
+

|
| 448 |
+
Stable Diffusion
|
| 449 |
+
small silver laptop sitting on top of a wooden computer desk
|
| 450 |
+
|
| 451 |
+

|
| 452 |
+
a silver stove sits next to a counter on a wooden floor
|
| 453 |
+
|
| 454 |
+

|
| 455 |
+
a wooden building with a stone bench placed near it
|
| 456 |
+
|
| 457 |
+

|
| 458 |
+
a red sphere and a yellow cube
|
| 459 |
+
|
| 460 |
+

|
| 461 |
+
a blue sphere and a red cube
|
| 462 |
+
|
| 463 |
+

|
| 464 |
+
Stable Diffusion
|
| 465 |
+
Structure Diffusion
|
| 466 |
+
two back to back wooden benches next to a shrub near a stone walkway
|
| 467 |
+
|
| 468 |
+

|
| 469 |
+
a shiny silver metal microwavenear wooden cabinets
|
| 470 |
+
|
| 471 |
+

|
| 472 |
+
a cat is perched upon a stone bench that sits on a wooden patio
|
| 473 |
+
|
| 474 |
+

|
| 475 |
+
a brown sphere and a cyan cube
|
| 476 |
+
|
| 477 |
+

|
| 478 |
+
a green sphere and a red cube
|
| 479 |
+
|
| 480 |
+

|
| 481 |
+
|
| 482 |
+

|
| 483 |
+
|
| 484 |
+

|
| 485 |
+
|
| 486 |
+

|
| 487 |
+
Figure 16: A prompt "an astronaut riding a horse" appended with different (combinations of) style descriptions. Our method has no negative effects on the image style. "base" refers to Stable Diffusion.
|
| 488 |
+
|
| 489 |
+

|
| 490 |
+
|
| 491 |
+

|
| 492 |
+
|
| 493 |
+

|
| 494 |
+
|
| 495 |
+

|
2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2b1928527d30e21cc6a7db9c8ccda775c98feb396fda7a670d0f8dba67335cd
|
| 3 |
+
size 2195045
|
2023/Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/8496ecb1-f6c2-4cfc-afce-97312b8c1375_content_list.json
ADDED
|
@@ -0,0 +1,2124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "TRANSPEECH: SPEECH-TO-SPEECH TRANSLATION WITH BILATERAL PERTURBATION",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
169,
|
| 8 |
+
99,
|
| 9 |
+
823,
|
| 10 |
+
146
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Rongjie Huang $^{1*}$ ; Jinglin Liu $^{1*}$ ; Huadai Liu $^{1*}$ ; Yi Ren $^{2}$ , Lichao Zhang $^{1}$ , Jinzheng He $^{1}$ , Zhou Zhao $^{1\\dagger}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
184,
|
| 19 |
+
167,
|
| 20 |
+
704,
|
| 21 |
+
200
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup>Zhejiang University {rongjiehuang, jinglinliu, huadailiu, zhaozhou}@zju.edu.cn",
|
| 28 |
+
"bbox": [
|
| 29 |
+
181,
|
| 30 |
+
212,
|
| 31 |
+
754,
|
| 32 |
+
243
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "2ByteDance ren.yi@bytedance.com",
|
| 39 |
+
"bbox": [
|
| 40 |
+
181,
|
| 41 |
+
253,
|
| 42 |
+
382,
|
| 43 |
+
284
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "ABSTRACT",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
450,
|
| 53 |
+
320,
|
| 54 |
+
547,
|
| 55 |
+
335
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Direct speech-to-speech translation (S2ST) with discrete units leverages recent progress in speech representation learning. Specifically, a sequence of discrete representations derived in a self-supervised manner are predicted from the model and passed to a vocoder for speech reconstruction, while still facing the following challenges: 1) Acoustic multimodality: the discrete units derived from speech with same content could be indeterministic due to the acoustic property (e.g., rhythm, pitch, and energy), which causes deterioration of translation accuracy; 2) high latency: current S2ST systems utilize autoregressive models which predict each unit conditioned on the sequence previously generated, failing to take full advantage of parallelism. In this work, we propose TranSpeech, a speech-to-speech translation model with bilateral perturbation. To alleviate the acoustic multimodal problem, we propose bilateral perturbation (BiP), which consists of the style normalization and information enhancement stages, to learn only the linguistic information from speech samples and generate more deterministic representations. With reduced multimodality, we step forward and become the first to establish a non-autoregressive S2ST technique, which repeatedly masks and predicts unit choices and produces high-accuracy results in just a few cycles. Experimental results on three language pairs demonstrate that BiP yields an improvement of 2.9 BLEU on average compared with a baseline textless S2ST model. Moreover, our parallel decoding shows a significant reduction of inference latency, enabling speedup up to $21.4\\mathrm{x}$ than autoregressive technique. $^{1}$",
|
| 62 |
+
"bbox": [
|
| 63 |
+
228,
|
| 64 |
+
353,
|
| 65 |
+
769,
|
| 66 |
+
647
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "1 INTRODUCTION",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
173,
|
| 76 |
+
676,
|
| 77 |
+
336,
|
| 78 |
+
690
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Speech-to-speech translation (S2ST) aims at converting speech from one language into speech in another, significantly breaking down communication barriers between people not sharing a common language. Among the conventional method (Lavie et al., 1997; Nakamura et al., 2006; Wahlster, 2013), the cascaded system of automatic speech recognition (ASR), machine translation (MT), or speech-to-text translation (S2T) followed by text-to-speech synthesis (TTS) have demonstrated reasonable results yet suffering from expensive computational costs. Compared to these cascaded systems, recently proposed direct S2ST literature (Jia et al., 2019; Zhang et al., 2020; Jia et al., 2021; Lee et al., 2021a;b) demonstrate the benefits of lower latencies as fewer decoding stages are needed.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
169,
|
| 87 |
+
708,
|
| 88 |
+
826,
|
| 89 |
+
821
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "Among them, Lee et al. (2021a,b) leverage recent progress on self-supervised discrete units learned from unlabeled speech for building textless S2ST systems, further supporting translation between unwritten languages. As illustrated in Figure 1(a), the unit-based textless S2ST system consists of",
|
| 96 |
+
"bbox": [
|
| 97 |
+
169,
|
| 98 |
+
827,
|
| 99 |
+
826,
|
| 100 |
+
869
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "header",
|
| 106 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 107 |
+
"bbox": [
|
| 108 |
+
171,
|
| 109 |
+
32,
|
| 110 |
+
478,
|
| 111 |
+
47
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "page_footnote",
|
| 117 |
+
"text": "*Equal Contribution",
|
| 118 |
+
"bbox": [
|
| 119 |
+
189,
|
| 120 |
+
883,
|
| 121 |
+
315,
|
| 122 |
+
895
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "page_footnote",
|
| 128 |
+
"text": "†Corresponding Author",
|
| 129 |
+
"bbox": [
|
| 130 |
+
194,
|
| 131 |
+
896,
|
| 132 |
+
336,
|
| 133 |
+
909
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "page_footnote",
|
| 139 |
+
"text": "<sup>1</sup>Audio samples are available at https://TranSpeech.github.io/.",
|
| 140 |
+
"bbox": [
|
| 141 |
+
194,
|
| 142 |
+
910,
|
| 143 |
+
643,
|
| 144 |
+
922
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "page_number",
|
| 150 |
+
"text": "1",
|
| 151 |
+
"bbox": [
|
| 152 |
+
493,
|
| 153 |
+
948,
|
| 154 |
+
503,
|
| 155 |
+
959
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 0
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "image",
|
| 161 |
+
"img_path": "images/1cbc6375b1ba24216313f72513f62ac86692dccd95214d447548b8e51bd3b8e5.jpg",
|
| 162 |
+
"image_caption": [
|
| 163 |
+
"(a) Direct speech-to-speech translation (S2ST) system"
|
| 164 |
+
],
|
| 165 |
+
"image_footnote": [],
|
| 166 |
+
"bbox": [
|
| 167 |
+
153,
|
| 168 |
+
83,
|
| 169 |
+
560,
|
| 170 |
+
200
|
| 171 |
+
],
|
| 172 |
+
"page_idx": 1
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"type": "image",
|
| 176 |
+
"img_path": "images/0932f99432b0bea921126f88dfadbfc17598c1f7706092d710cf5129a52d2a91.jpg",
|
| 177 |
+
"image_caption": [
|
| 178 |
+
"(b) Multimodality challenges",
|
| 179 |
+
"Figure 1: 1) Acoustic multimodality: Speech with the same content \"Vielen dank\" could be different due to a variety of acoustic conditions; 2) Linguistic multimodality (Gu et al., 2017; Wang et al., 2019): There are multiple correct target translations (\"Danke schon\" and \"Vielen dank\") for the same source word/phrase/sentence (\"Thank you\")."
|
| 180 |
+
],
|
| 181 |
+
"image_footnote": [],
|
| 182 |
+
"bbox": [
|
| 183 |
+
563,
|
| 184 |
+
83,
|
| 185 |
+
841,
|
| 186 |
+
202
|
| 187 |
+
],
|
| 188 |
+
"page_idx": 1
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "text",
|
| 192 |
+
"text": "a speech-to-unit translation (S2UT) model followed by a unit-based vocoder that converts discrete units to speech, leading to a significant improvement over previous literature.",
|
| 193 |
+
"bbox": [
|
| 194 |
+
169,
|
| 195 |
+
316,
|
| 196 |
+
823,
|
| 197 |
+
345
|
| 198 |
+
],
|
| 199 |
+
"page_idx": 1
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"text": "In modern textless speech-to-speech translation (S2ST), our goal is mainly two-fold: 1) high quality: direct S2ST is challenging, especially without using the transcription. 2) low latency: high inference speed is essential when considering real-time translation. However, the current development of the unit-based textless S2ST system is hampered by two major challenges: 1) It is challenging to achieve high translation accuracy due to the acoustic multimodality (as illustrated in the orange dotted box in Figure 1(b)): different from the language tokens (e.g., bpe) used in the text translation, the self-supervised representation derived from speech with the same content could be different due to a variety of acoustic conditions (e.g., speaker identity, rhythm, pitch, and energy), including both linguistic content and acoustic information. As such, the indeterministic training target for speech-to-unit translation fails to yield good results; and 2) Building a parallel model upon multimodal S2ST systems with reasonable accuracy is challenging as it introduces further indeterminacy. A non-autoregressive (NAR) S2ST system generates all tokens in parallel without any limitation of sequential dependency, making it a poor approximation to the actual target distribution. With the acoustic multimodality unsettled, the parallel decoding approaches increasingly burden S2ST capturing the distribution of target translation.",
|
| 204 |
+
"bbox": [
|
| 205 |
+
169,
|
| 206 |
+
351,
|
| 207 |
+
826,
|
| 208 |
+
559
|
| 209 |
+
],
|
| 210 |
+
"page_idx": 1
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "text",
|
| 214 |
+
"text": "In this work, we propose TranSpeech, a fast speech-to-speech translation model with bilateral perturbation. To tackle the acoustic multimodal challenge, we propose a Bilateral Perturbation (BiP) technique that finetunes a self-supervised speech representation learning model with CTC loss to generate deterministic representation agnostic to acoustic variation. Based on preliminary speech analysis by decomposing a signal into linguistic and acoustic information, the bilateral perturbation consists of the 1) style normalization stage, which eliminates the acoustic-style information in speech and creates the style-agnostic \"pseudo text\" for finetuning; and 2) information enhancement stage, which applies information bottleneck to create speech samples variant in acoustic conditions (i.e., rhythm, pitch, and energy) while preserving linguistic information. The proposed bilateral perturbation guarantees the speech encoder to learn only the linguistic information from acoustic-variant speech samples, significantly reducing the acoustic multimodality in unit-based S2ST.",
|
| 215 |
+
"bbox": [
|
| 216 |
+
169,
|
| 217 |
+
566,
|
| 218 |
+
826,
|
| 219 |
+
720
|
| 220 |
+
],
|
| 221 |
+
"page_idx": 1
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "text",
|
| 225 |
+
"text": "The proposed bilateral perturbation eases acoustic multimodality and makes it possible for NAR generation. As such, we further step forward and become the first to establish a NAR S2ST technique, which repeatedly masks and predicts unit choices and produces high-accuracy results in just a few cycles. Experimental results on three language pairs demonstrate that BiP yields an improvement of 2.9 BLEU on average compared with baseline textless S2ST models. The parallel decoding algorithm requires as few as 2 iterations to generate samples that outperformed competing systems, enabling a speedup by up to $21.4\\mathrm{x}$ compared to the autoregressive baseline. TranSpeech further enjoys a speed-performance trade-off with advanced decoding choices, including multiple iterations, length beam, and noisy parallel decoding, trading by up to 3 BLEU points in translation results. The main contributions of this work include:",
|
| 226 |
+
"bbox": [
|
| 227 |
+
169,
|
| 228 |
+
726,
|
| 229 |
+
826,
|
| 230 |
+
864
|
| 231 |
+
],
|
| 232 |
+
"page_idx": 1
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "text",
|
| 236 |
+
"text": "- Through preliminary speech analysis, we propose bilateral perturbation which assists in generating deterministic representations agnostic to acoustic variation. This novel technique alleviates the acoustic multimodal challenge and leads to significant improvement in S2ST.",
|
| 237 |
+
"bbox": [
|
| 238 |
+
171,
|
| 239 |
+
881,
|
| 240 |
+
823,
|
| 241 |
+
925
|
| 242 |
+
],
|
| 243 |
+
"page_idx": 1
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"type": "header",
|
| 247 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 248 |
+
"bbox": [
|
| 249 |
+
171,
|
| 250 |
+
32,
|
| 251 |
+
478,
|
| 252 |
+
47
|
| 253 |
+
],
|
| 254 |
+
"page_idx": 1
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"type": "page_number",
|
| 258 |
+
"text": "2",
|
| 259 |
+
"bbox": [
|
| 260 |
+
493,
|
| 261 |
+
948,
|
| 262 |
+
503,
|
| 263 |
+
959
|
| 264 |
+
],
|
| 265 |
+
"page_idx": 1
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"type": "list",
|
| 269 |
+
"sub_type": "text",
|
| 270 |
+
"list_items": [
|
| 271 |
+
"- We step forward and become the first to establish a non-autoregressive S2ST technique with a mask-predict algorithm to speed up the inference procedure. To further reduce the linguistic multimodality in NAR translation, we apply the knowledge distillation technique and construct a less noisy and more deterministic corpus.",
|
| 272 |
+
"- Experimental results on three language pairs demonstrate that BiP yields the promotion of 2.9 BLEU on average compared with baseline textless S2ST models. In terms of inference speed, our parallel decoding enables speedup up to $21.4\\mathrm{x}$ compared to the autoregressive baseline."
|
| 273 |
+
],
|
| 274 |
+
"bbox": [
|
| 275 |
+
171,
|
| 276 |
+
103,
|
| 277 |
+
823,
|
| 278 |
+
208
|
| 279 |
+
],
|
| 280 |
+
"page_idx": 2
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"type": "text",
|
| 284 |
+
"text": "2 BACKGROUND: DIRECT SPEECH-TO-SPEECH TRANSLATION",
|
| 285 |
+
"text_level": 1,
|
| 286 |
+
"bbox": [
|
| 287 |
+
171,
|
| 288 |
+
229,
|
| 289 |
+
705,
|
| 290 |
+
244
|
| 291 |
+
],
|
| 292 |
+
"page_idx": 2
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"type": "text",
|
| 296 |
+
"text": "Direct speech-to-speech translation has made huge progress to date. Translatotron (Jia et al., 2019) is the first direct S2ST model and shows reasonable translation accuracy and speech naturalness. Translatotron 2 (Jia et al., 2021) utilizes the auxiliary target phoneme decoder to promote translation quality but still needs phoneme data during training. UWSpeech (Zhang et al., 2020) builds the VQ-VAE model and discards transcript in the target language, while paired speech and phoneme corpora of written language are required.",
|
| 297 |
+
"bbox": [
|
| 298 |
+
169,
|
| 299 |
+
261,
|
| 300 |
+
826,
|
| 301 |
+
347
|
| 302 |
+
],
|
| 303 |
+
"page_idx": 2
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"type": "text",
|
| 307 |
+
"text": "Most recently, a direct S2ST system (Lee et al., 2021a) takes advantage of self-supervised learning (SSL) and demonstrates the results without using text data. However, the majority of SSL models are trained by reconstructing (Chorowski et al., 2019) or predicting unseen speech signals (Chung et al., 2019), which would inevitably include factors unrelated to the linguistic content (i.e., acoustic condition). As such, the indeterministic training target for speech-to-unit translation fails to yield good results. The textless S2ST system (Lee et al., 2021b) further demonstrates to obtain the speaker-invariant representation by finetuning the SSL model to disentangle the speaker-dependent information. However, this system only constrains speaker identity, and the remaining aspects (i.e., content, rhythm, pitch, and energy) are still lumped together.",
|
| 308 |
+
"bbox": [
|
| 309 |
+
169,
|
| 310 |
+
352,
|
| 311 |
+
826,
|
| 312 |
+
479
|
| 313 |
+
],
|
| 314 |
+
"page_idx": 2
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"type": "text",
|
| 318 |
+
"text": "At the same time, various approaches that perturb information flow to fine-tune acoustic models have demonstrated efficiency in promoting downstream performance. A line of works (Yang et al., 2021; Gao et al., 2022) utilizes the pre-trained encoder and introduces approaches that reprogram acoustic models in downstream tasks. For multi-lingual tuning, Yen et al. (2021) propose a novel adversarial reprogramming approach for low-resource spoken command recognition (SCR). Sharing a common insight, we tune a pre-trained acoustic model with bilateral perturbation technique and generates more deterministic units agnostic to acoustic conditions, including rhythm, pitch, and energy. Following the common textless setup in Figure 1(a), we design a challenging NAR S2ST technique especially for applications requiring low latency. More details have been attached in Appendix A.",
|
| 319 |
+
"bbox": [
|
| 320 |
+
169,
|
| 321 |
+
484,
|
| 322 |
+
826,
|
| 323 |
+
609
|
| 324 |
+
],
|
| 325 |
+
"page_idx": 2
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"type": "text",
|
| 329 |
+
"text": "3 SPEECH ANALYSIS AND BILATERAL PERTURBATION",
|
| 330 |
+
"text_level": 1,
|
| 331 |
+
"bbox": [
|
| 332 |
+
171,
|
| 333 |
+
631,
|
| 334 |
+
640,
|
| 335 |
+
647
|
| 336 |
+
],
|
| 337 |
+
"page_idx": 2
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"type": "text",
|
| 341 |
+
"text": "3.1 ACOUSTIC MULTIMODALITY",
|
| 342 |
+
"text_level": 1,
|
| 343 |
+
"bbox": [
|
| 344 |
+
171,
|
| 345 |
+
662,
|
| 346 |
+
416,
|
| 347 |
+
678
|
| 348 |
+
],
|
| 349 |
+
"page_idx": 2
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"type": "text",
|
| 353 |
+
"text": "As reported in previous textless S2ST system (Lee et al., 2021b), speech representations predicted by the self-supervised pre-trained model include both linguistic and acoustic information. As such, derived representations of speech samples with the same content can be different due to the acoustic variation, and the indeterministic training target for speech-to-unit translation (as illustrated in Figure 1(a)) fails to yield good results. To address this multimodal issue, we conduct a preliminary speech analysis and introduce the bilateral perturbation technique. More details on how indeterminacy units influence S2ST have been attached in Appendix C.",
|
| 354 |
+
"bbox": [
|
| 355 |
+
169,
|
| 356 |
+
689,
|
| 357 |
+
826,
|
| 358 |
+
789
|
| 359 |
+
],
|
| 360 |
+
"page_idx": 2
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"type": "text",
|
| 364 |
+
"text": "3.2 SPEECH ANALYSIS",
|
| 365 |
+
"text_level": 1,
|
| 366 |
+
"bbox": [
|
| 367 |
+
171,
|
| 368 |
+
806,
|
| 369 |
+
346,
|
| 370 |
+
820
|
| 371 |
+
],
|
| 372 |
+
"page_idx": 2
|
| 373 |
+
},
|
| 374 |
+
{
|
| 375 |
+
"type": "text",
|
| 376 |
+
"text": "In this part, we decompose speech variations (Cui et al., 2022; Huang et al., 2021; Yang et al., 2022a) into linguistic content and acoustic condition (e.g., speaker identity, rhythm, pitch, and energy) and provide a brief primer on each of these components.",
|
| 377 |
+
"bbox": [
|
| 378 |
+
169,
|
| 379 |
+
832,
|
| 380 |
+
825,
|
| 381 |
+
875
|
| 382 |
+
],
|
| 383 |
+
"page_idx": 2
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"type": "text",
|
| 387 |
+
"text": "Linguistic Content represents the meaning of speech signals. To translate a speech sample to another language, learning the linguistic information from the speech signal is crucial; Speaker Identity is perceived as the voice characteristics of a speaker. Rhythm characterizes how fast the speaker utters",
|
| 388 |
+
"bbox": [
|
| 389 |
+
169,
|
| 390 |
+
881,
|
| 391 |
+
825,
|
| 392 |
+
925
|
| 393 |
+
],
|
| 394 |
+
"page_idx": 2
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"type": "header",
|
| 398 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 399 |
+
"bbox": [
|
| 400 |
+
171,
|
| 401 |
+
32,
|
| 402 |
+
478,
|
| 403 |
+
47
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 2
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "page_number",
|
| 409 |
+
"text": "3",
|
| 410 |
+
"bbox": [
|
| 411 |
+
493,
|
| 412 |
+
948,
|
| 413 |
+
504,
|
| 414 |
+
959
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 2
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "image",
|
| 420 |
+
"img_path": "images/8b9f49bb16e16f98dd7b348efbd83ca16c8185e3b1a1eefca9bb512d62d09827.jpg",
|
| 421 |
+
"image_caption": [
|
| 422 |
+
"(a) Speech Analysis and Bilateral Perturbation",
|
| 423 |
+
"(b) TranSpeech",
|
| 424 |
+
"Figure 2: In subfigure(a), we use $RR$ and $F$ to respectively denote the random resampling and a chain function for random pitch shifting. In subfigure(b), the \"sinusoidal-like symbol\" denotes the positional encoding, we have $N_{b}$ encoder and decoder blocks. During training, we randomly select the masked position and compute the cross-entropy loss (denoted as \"CE\")."
|
| 425 |
+
],
|
| 426 |
+
"image_footnote": [],
|
| 427 |
+
"bbox": [
|
| 428 |
+
174,
|
| 429 |
+
66,
|
| 430 |
+
803,
|
| 431 |
+
265
|
| 432 |
+
],
|
| 433 |
+
"page_idx": 3
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"type": "text",
|
| 437 |
+
"text": "each syllable, and the duration plays a vital role in acoustic variation; Pitch is an essential component of intonation, which is the result of a constant attempt to hit the pitch targets of each syllable; Energy affects the volume of speech, where stress and tone represent different energy values.",
|
| 438 |
+
"bbox": [
|
| 439 |
+
169,
|
| 440 |
+
375,
|
| 441 |
+
823,
|
| 442 |
+
417
|
| 443 |
+
],
|
| 444 |
+
"page_idx": 3
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"type": "text",
|
| 448 |
+
"text": "3.3 BILATERAL PERTURBATION",
|
| 449 |
+
"text_level": 1,
|
| 450 |
+
"bbox": [
|
| 451 |
+
171,
|
| 452 |
+
436,
|
| 453 |
+
408,
|
| 454 |
+
450
|
| 455 |
+
],
|
| 456 |
+
"page_idx": 3
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"text": "To alleviate the multimodal problem and increase the translation accuracy in the S2ST system, we propose bilateral perturbation that disentangles the acoustic variation and generates deterministic speech representations according to the linguistic content. Specifically, we leverage the success of connectionist temporal classification (CTC) finetuning (Baevski et al., 2019) with a pre-trained speech encoder, using the perturbed input speech and normalized target. Since how to obtain speaker-invariant representation has been well-studied (Lee et al., 2021b; Hsu et al., 2020), we focus on the more challenging acoustic conditions in a single-speaker scenario, including rhythm, pitch, and energy variations.",
|
| 461 |
+
"bbox": [
|
| 462 |
+
169,
|
| 463 |
+
462,
|
| 464 |
+
826,
|
| 465 |
+
575
|
| 466 |
+
],
|
| 467 |
+
"page_idx": 3
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "text",
|
| 471 |
+
"text": "3.3.1 OVERVIEW",
|
| 472 |
+
"text_level": 1,
|
| 473 |
+
"bbox": [
|
| 474 |
+
171,
|
| 475 |
+
590,
|
| 476 |
+
305,
|
| 477 |
+
603
|
| 478 |
+
],
|
| 479 |
+
"page_idx": 3
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"type": "text",
|
| 483 |
+
"text": "Denote the domain of speech samples by $S \\subset \\mathbb{R}$ and the perturbed speeches in style normalization and information enhancement by $\\overline{S}, \\hat{S}$ respectively. The source language is therefore a sequence of speech samples $X = \\{x_{1},\\dots,x_{N^{\\prime}}\\}$ , where $N^{\\prime}$ is the number of frames in source speech. The SSL model is composed of a multi-layer convolutional feature encoder $f$ which takes as input raw audio $S$ and outputs discrete latent speech representations. In the end, the audio in the target language is represented as discrete units $Y = \\{y_{1},\\ldots ,y_{N}\\}$ , where $N$ is the number of units. The overview of the information flow is shown in Figure 2(a), and we consider tackling the multimodality in bilateral sides for CTC finetuning, including 1) style normalization stage to eliminate the acoustic information in the CTC target and create the acoustic-agnostic \"pseudo text\"; and 2) information enhancement stage which applies bottleneck on acoustic features to create speech samples variant in acoustic conditions (e.g., rhythm, pitch, and energy) while preserving linguistic content information. In the final, we train an ASR model using the perturbed speech $\\hat{S}$ as input and the \"pseudo text\" as the target. As a result, according to speeches with acoustic variation, the ASR model with CTC decoding is encouraged to learn the \"average\" information referring to linguistic content and generate deterministic representations, significantly reducing multimodality and promoting speech-to-unit translation. In the following subsections, we present the bilateral perturbation technique in detail:",
|
| 484 |
+
"bbox": [
|
| 485 |
+
169,
|
| 486 |
+
614,
|
| 487 |
+
826,
|
| 488 |
+
843
|
| 489 |
+
],
|
| 490 |
+
"page_idx": 3
|
| 491 |
+
},
|
| 492 |
+
{
|
| 493 |
+
"type": "text",
|
| 494 |
+
"text": "3.3.2 STYLE NORMALIZATION",
|
| 495 |
+
"text_level": 1,
|
| 496 |
+
"bbox": [
|
| 497 |
+
171,
|
| 498 |
+
856,
|
| 499 |
+
401,
|
| 500 |
+
871
|
| 501 |
+
],
|
| 502 |
+
"page_idx": 3
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "text",
|
| 506 |
+
"text": "To create the acoustic-agnostic \"pseudo text\" for CTC finetuning, the acoustic-style information should be eliminated and disentangled: 1) We first compute the averaged pitch fundamental frequency $\\bar{p}$ and energy $\\bar{e}$ values in original dataset $S$ ; and 2) for each sample in $S$ , we conduct pitch shifting to",
|
| 507 |
+
"bbox": [
|
| 508 |
+
169,
|
| 509 |
+
881,
|
| 510 |
+
825,
|
| 511 |
+
925
|
| 512 |
+
],
|
| 513 |
+
"page_idx": 3
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "header",
|
| 517 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 518 |
+
"bbox": [
|
| 519 |
+
171,
|
| 520 |
+
32,
|
| 521 |
+
478,
|
| 522 |
+
47
|
| 523 |
+
],
|
| 524 |
+
"page_idx": 3
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "page_number",
|
| 528 |
+
"text": "4",
|
| 529 |
+
"bbox": [
|
| 530 |
+
493,
|
| 531 |
+
948,
|
| 532 |
+
503,
|
| 533 |
+
959
|
| 534 |
+
],
|
| 535 |
+
"page_idx": 3
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "text",
|
| 539 |
+
"text": "$\\overline{p}$ and normalize its energy to $\\overline{e}$ , resulting in a new dataset $\\overline{S}$ with the averaged acoustic condition, where the style-specific information has been eliminated; finally, 3) the self-supervised learning (SSL) model encodes $\\overline{S}$ and creates the normalized targets for CTC finetuning.",
|
| 540 |
+
"bbox": [
|
| 541 |
+
169,
|
| 542 |
+
103,
|
| 543 |
+
826,
|
| 544 |
+
148
|
| 545 |
+
],
|
| 546 |
+
"page_idx": 4
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "text",
|
| 550 |
+
"text": "3.3.3 INFORMATION ENHANCEMENT",
|
| 551 |
+
"text_level": 1,
|
| 552 |
+
"bbox": [
|
| 553 |
+
171,
|
| 554 |
+
161,
|
| 555 |
+
444,
|
| 556 |
+
176
|
| 557 |
+
],
|
| 558 |
+
"page_idx": 4
|
| 559 |
+
},
|
| 560 |
+
{
|
| 561 |
+
"type": "text",
|
| 562 |
+
"text": "According to the speech samples with different acoustic conditions, the ASR model is supposed to learn the deterministic representation referring to linguistic content. As such, we apply the following functions as information bottleneck on acoustic features (e.g., rhythm, pitch, and energy) to create highly acoustic-variant speech samples $\\hat{S}$ , while the linguistic content remains unchanged, including 1) formant shifting $fs$ , 2) pitch randomization $pr$ , 3) random frequency shaping using a parametric equalizer $peq$ , and 4) random resampling $RR$ .",
|
| 563 |
+
"bbox": [
|
| 564 |
+
169,
|
| 565 |
+
185,
|
| 566 |
+
826,
|
| 567 |
+
272
|
| 568 |
+
],
|
| 569 |
+
"page_idx": 4
|
| 570 |
+
},
|
| 571 |
+
{
|
| 572 |
+
"type": "list",
|
| 573 |
+
"sub_type": "text",
|
| 574 |
+
"list_items": [
|
| 575 |
+
"- For rhythm information, random resampling $RR$ divides the input into segments of random lengths, and we randomly stretch or squeeze each segment along the time dimension.",
|
| 576 |
+
"- For pitch information, we apply the chain function $F = fs(pr(peq(S)))$ to randomly shift the pitch value of original speech $S$ .",
|
| 577 |
+
"- For energy information, we perturb the audio in the waveform domain."
|
| 578 |
+
],
|
| 579 |
+
"bbox": [
|
| 580 |
+
169,
|
| 581 |
+
282,
|
| 582 |
+
825,
|
| 583 |
+
364
|
| 584 |
+
],
|
| 585 |
+
"page_idx": 4
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"type": "text",
|
| 589 |
+
"text": "The perturbed waveforms $\\hat{S}$ are highly variant on acoustic features (i.e., rhythm, pitch, and energy) while preserving linguistic information. It guarantees the speech encoder to learn the \"acoustic-averaged\" information referring to linguistic content and generate deterministic representations. The hyperparameters of the perturbation functions have been included in Appendix E.",
|
| 590 |
+
"bbox": [
|
| 591 |
+
169,
|
| 592 |
+
378,
|
| 593 |
+
826,
|
| 594 |
+
436
|
| 595 |
+
],
|
| 596 |
+
"page_idx": 4
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"type": "text",
|
| 600 |
+
"text": "4 TRANSPEECH",
|
| 601 |
+
"text_level": 1,
|
| 602 |
+
"bbox": [
|
| 603 |
+
171,
|
| 604 |
+
454,
|
| 605 |
+
323,
|
| 606 |
+
470
|
| 607 |
+
],
|
| 608 |
+
"page_idx": 4
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"type": "text",
|
| 612 |
+
"text": "The S2ST pipeline has been illustrated in Figure 2(a), we 1) use the SSL HuBERT (Hsu et al., 2021) tuned by BiP to derive discrete units of target speech; 2) build the sequence-to-sequence model TranSpeech for speech-to-unit translation (S2UT) and 3) apply a separately trained unit-based vocoder to convert the translated units into waveform.",
|
| 613 |
+
"bbox": [
|
| 614 |
+
169,
|
| 615 |
+
486,
|
| 616 |
+
826,
|
| 617 |
+
542
|
| 618 |
+
],
|
| 619 |
+
"page_idx": 4
|
| 620 |
+
},
|
| 621 |
+
{
|
| 622 |
+
"type": "text",
|
| 623 |
+
"text": "In this section, we first overview the encoder-decoder architecture for TranSpeech, following which we introduce the knowledge distillation procedure to alleviate the linguistic multimodal challenges. Finally, we present the mask-predict algorithm in both training and decoding procedures and include more advanced decoding choices.",
|
| 624 |
+
"bbox": [
|
| 625 |
+
169,
|
| 626 |
+
549,
|
| 627 |
+
826,
|
| 628 |
+
604
|
| 629 |
+
],
|
| 630 |
+
"page_idx": 4
|
| 631 |
+
},
|
| 632 |
+
{
|
| 633 |
+
"type": "text",
|
| 634 |
+
"text": "4.1 ARCHITECTURE",
|
| 635 |
+
"text_level": 1,
|
| 636 |
+
"bbox": [
|
| 637 |
+
171,
|
| 638 |
+
622,
|
| 639 |
+
328,
|
| 640 |
+
636
|
| 641 |
+
],
|
| 642 |
+
"page_idx": 4
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"type": "text",
|
| 646 |
+
"text": "The overall architecture has been illustrated in Figure 2(b), and we put more details on the encoder and decoder block in Appendix B.",
|
| 647 |
+
"bbox": [
|
| 648 |
+
169,
|
| 649 |
+
648,
|
| 650 |
+
823,
|
| 651 |
+
678
|
| 652 |
+
],
|
| 653 |
+
"page_idx": 4
|
| 654 |
+
},
|
| 655 |
+
{
|
| 656 |
+
"type": "text",
|
| 657 |
+
"text": "Conformer Encoder. Different from previous textless S2ST literature (Lee et al., 2021b), we use conformer blocks (Gulati et al., 2020) in place of transformer blocks (Vaswani et al., 2017). The conformer model (Guo et al., 2021; Chen et al., 2021) has demonstrated its efficiency in combining convolution neural networks and transformers to model both local and global dependencies of audio in a parameter-efficient way, achieving state-of-the-art results on various downstream tasks. Furthermore, we employ the multi-head self-attention with a relative sinusoidal positional encoding scheme from Transformer-XL (Dai et al., 2019), which promotes the robustness of the self-attention module and generalizes better to different utterance lengths.",
|
| 658 |
+
"bbox": [
|
| 659 |
+
169,
|
| 660 |
+
683,
|
| 661 |
+
826,
|
| 662 |
+
796
|
| 663 |
+
],
|
| 664 |
+
"page_idx": 4
|
| 665 |
+
},
|
| 666 |
+
{
|
| 667 |
+
"type": "text",
|
| 668 |
+
"text": "Non-autoregressive Unit Decoder. Currently, S2ST systems utilize the autoregressive S2UT models and suffer from high inference latency. Given the $N'$ frames source speech $X = \\{x_{1},\\ldots ,x_{N'}\\}$ , autoregressive model $\\theta$ factors the distribution over possible outputs $Y = \\{y_{1},\\dots ,y_{N}\\}$ by $p(Y\\mid X;\\theta) = \\prod_{i = 1}^{N + 1}p(y_i\\mid y_{0:i - 1},x_{1:N';\\theta})$ , where the special tokens $y_0(\\langle bos\\rangle)$ and $y_{N + 1}(\\langle eos\\rangle)$ are used to represent the beginning and end of all target units.",
|
| 669 |
+
"bbox": [
|
| 670 |
+
169,
|
| 671 |
+
801,
|
| 672 |
+
826,
|
| 673 |
+
876
|
| 674 |
+
],
|
| 675 |
+
"page_idx": 4
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
"type": "text",
|
| 679 |
+
"text": "Unlike the relatively well-studied non-autoregressive (NAR) MT (Gu et al., 2017; Wang et al., 2019; Gu et al., 2019; Ghazvininejad et al., 2019; Yin et al., 2023), building NAR S2UT models that generate units in parallel could be much more challenging due to the joint linguistic and acoustic",
|
| 680 |
+
"bbox": [
|
| 681 |
+
169,
|
| 682 |
+
881,
|
| 683 |
+
826,
|
| 684 |
+
926
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 4
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "header",
|
| 690 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 691 |
+
"bbox": [
|
| 692 |
+
171,
|
| 693 |
+
32,
|
| 694 |
+
478,
|
| 695 |
+
47
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 4
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "page_number",
|
| 701 |
+
"text": "5",
|
| 702 |
+
"bbox": [
|
| 703 |
+
493,
|
| 704 |
+
948,
|
| 705 |
+
504,
|
| 706 |
+
959
|
| 707 |
+
],
|
| 708 |
+
"page_idx": 4
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"type": "text",
|
| 712 |
+
"text": "multimodality. Yet the proposed bilateral perturbation eases this acoustic multimodality and makes it possible for NAR modeling. As such, we further step forward and become the first to establish a NAR S2ST model $\\theta$ .",
|
| 713 |
+
"bbox": [
|
| 714 |
+
169,
|
| 715 |
+
103,
|
| 716 |
+
823,
|
| 717 |
+
147
|
| 718 |
+
],
|
| 719 |
+
"page_idx": 5
|
| 720 |
+
},
|
| 721 |
+
{
|
| 722 |
+
"type": "text",
|
| 723 |
+
"text": "It assumes that the target sequence length $N$ can be modeled with a separate conditional distribution $p_L$ , and the distribution becomes $p(Y \\mid X; \\theta) = p_L(T \\mid x_{1:N'}; \\theta) \\cdot \\prod_{i=1}^{N} p(y_i \\mid x_{1:N'}; \\theta)$ . The target units are conditionally independent of each other, and the individual probabilities $p$ is predicted for each token in $Y$ . Since the length of target units $N$ should be given in advance, TranSpeech predicts it by pooling the encoder outputs into a length predictor.",
|
| 724 |
+
"bbox": [
|
| 725 |
+
169,
|
| 726 |
+
152,
|
| 727 |
+
826,
|
| 728 |
+
227
|
| 729 |
+
],
|
| 730 |
+
"page_idx": 5
|
| 731 |
+
},
|
| 732 |
+
{
|
| 733 |
+
"type": "text",
|
| 734 |
+
"text": "4.2 LINGUISTIC MULTIMODALITY",
|
| 735 |
+
"text_level": 1,
|
| 736 |
+
"bbox": [
|
| 737 |
+
171,
|
| 738 |
+
247,
|
| 739 |
+
426,
|
| 740 |
+
261
|
| 741 |
+
],
|
| 742 |
+
"page_idx": 5
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"type": "text",
|
| 746 |
+
"text": "As illustrated in Figure 1(b), there might be multiple valid translations for the same source utterance, and thus this linguistic multimodality degrades the ability of NAR models to properly capture the target distribution. To alleviate this linguistic multimodality in NAR translation, we apply knowledge distillation to construct a sampled translation corpus from an autoregressive teacher, which is less noisy and more deterministic than the original one. The knowledge of the AR model is distilled to the NAR model, assisting to capture the target distribution for better accuracy.",
|
| 747 |
+
"bbox": [
|
| 748 |
+
169,
|
| 749 |
+
273,
|
| 750 |
+
826,
|
| 751 |
+
363
|
| 752 |
+
],
|
| 753 |
+
"page_idx": 5
|
| 754 |
+
},
|
| 755 |
+
{
|
| 756 |
+
"type": "text",
|
| 757 |
+
"text": "4.3 MASK-PREDICT",
|
| 758 |
+
"text_level": 1,
|
| 759 |
+
"bbox": [
|
| 760 |
+
171,
|
| 761 |
+
380,
|
| 762 |
+
328,
|
| 763 |
+
393
|
| 764 |
+
],
|
| 765 |
+
"page_idx": 5
|
| 766 |
+
},
|
| 767 |
+
{
|
| 768 |
+
"type": "text",
|
| 769 |
+
"text": "The NAR unit decoder applies the mask-predict algorithm (Ghazvininejad et al., 2019) to repeatedly reconsider unit choices and produce high-accuracy translation results in just a few cycles.",
|
| 770 |
+
"bbox": [
|
| 771 |
+
169,
|
| 772 |
+
407,
|
| 773 |
+
823,
|
| 774 |
+
436
|
| 775 |
+
],
|
| 776 |
+
"page_idx": 5
|
| 777 |
+
},
|
| 778 |
+
{
|
| 779 |
+
"type": "text",
|
| 780 |
+
"text": "Training. During training, the target units are given conditioned on source speech sample $X$ and the unmasked target units $Y_{obs}$ . As illustrated in Figure 2(b), given the length $N$ of the target sequence, we first sample the number of masked units from a uniform distribution $n \\sim \\mathrm{Unif}(\\{1,\\dots ,N\\})$ , and then randomly choose the masked position. For the learning objective, we compute the cross-entropy (CE) loss with label smoothing between the generated and target units in masked places, and the CE loss for target length prediction is further added.",
|
| 781 |
+
"bbox": [
|
| 782 |
+
169,
|
| 783 |
+
443,
|
| 784 |
+
825,
|
| 785 |
+
527
|
| 786 |
+
],
|
| 787 |
+
"page_idx": 5
|
| 788 |
+
},
|
| 789 |
+
{
|
| 790 |
+
"type": "text",
|
| 791 |
+
"text": "Decoding. In inference, the algorithm runs for pre-determined $T$ times of iterative refinement, and we perform a mask operation at each iteration, followed by predict.",
|
| 792 |
+
"bbox": [
|
| 793 |
+
169,
|
| 794 |
+
534,
|
| 795 |
+
823,
|
| 796 |
+
564
|
| 797 |
+
],
|
| 798 |
+
"page_idx": 5
|
| 799 |
+
},
|
| 800 |
+
{
|
| 801 |
+
"type": "text",
|
| 802 |
+
"text": "In the first iteration $t = 0$ , we predict the length $N$ of target sequence and mask all units $Y = \\{y_{1},\\ldots ,y_{N}\\}$ . In the following iterations, we mask $n$ units with the lowest probability scores $p$ :",
|
| 803 |
+
"bbox": [
|
| 804 |
+
171,
|
| 805 |
+
569,
|
| 806 |
+
823,
|
| 807 |
+
599
|
| 808 |
+
],
|
| 809 |
+
"page_idx": 5
|
| 810 |
+
},
|
| 811 |
+
{
|
| 812 |
+
"type": "equation",
|
| 813 |
+
"text": "\n$$\nY _ {m a s k} ^ {t} = \\arg \\min _ {i} (p _ {i}, n) \\quad Y _ {o b s} ^ {t} = Y \\backslash Y _ {m a s k} ^ {t}, \\tag {1}\n$$\n",
|
| 814 |
+
"text_format": "latex",
|
| 815 |
+
"bbox": [
|
| 816 |
+
338,
|
| 817 |
+
608,
|
| 818 |
+
825,
|
| 819 |
+
631
|
| 820 |
+
],
|
| 821 |
+
"page_idx": 5
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "text",
|
| 825 |
+
"text": "where $n$ is a function of the iteration $t$ , and we use linear decay $n = N \\cdot \\frac{T - t}{T}$ in this work.",
|
| 826 |
+
"bbox": [
|
| 827 |
+
169,
|
| 828 |
+
643,
|
| 829 |
+
767,
|
| 830 |
+
662
|
| 831 |
+
],
|
| 832 |
+
"page_idx": 5
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"type": "text",
|
| 836 |
+
"text": "After masking, TranSpeech predicts the masked units $Y_{mask}^{t}$ conditioned on the source speech $X$ and unmasked units $Y_{obs}$ . We select the prediction with the highest probability $p$ for each $y_{i} \\in Y_{mask}^{t}$ and update its probability score accordingly:",
|
| 837 |
+
"bbox": [
|
| 838 |
+
169,
|
| 839 |
+
667,
|
| 840 |
+
823,
|
| 841 |
+
710
|
| 842 |
+
],
|
| 843 |
+
"page_idx": 5
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "equation",
|
| 847 |
+
"text": "\n$$\ny _ {i} ^ {t} = \\arg \\max _ {w} P \\left(y _ {i} = w \\mid X, Y _ {o b s} ^ {t}; \\theta\\right) \\quad p _ {i} ^ {t} = \\max _ {w} P \\left(y _ {i} = w \\mid X, Y _ {o b s} ^ {t}; \\theta\\right) \\tag {2}\n$$\n",
|
| 848 |
+
"text_format": "latex",
|
| 849 |
+
"bbox": [
|
| 850 |
+
245,
|
| 851 |
+
720,
|
| 852 |
+
825,
|
| 853 |
+
743
|
| 854 |
+
],
|
| 855 |
+
"page_idx": 5
|
| 856 |
+
},
|
| 857 |
+
{
|
| 858 |
+
"type": "text",
|
| 859 |
+
"text": "4.4 ADVANCED DECODING CHOICES",
|
| 860 |
+
"text_level": 1,
|
| 861 |
+
"bbox": [
|
| 862 |
+
171,
|
| 863 |
+
763,
|
| 864 |
+
444,
|
| 865 |
+
777
|
| 866 |
+
],
|
| 867 |
+
"page_idx": 5
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "text",
|
| 871 |
+
"text": "Target Length Beam. It has been reported (Ghazvininejad et al., 2019) that translating multiple candidate sequences of different lengths can improve performance. As such, we select the top $K$ length candidates with the highest probabilities and decode the same example with varying lengths in parallel. In the following, we pick up the sequence with the highest average log probability as our result. It avoids distinctly increasing the decoding time since the computation can be batched.",
|
| 872 |
+
"bbox": [
|
| 873 |
+
169,
|
| 874 |
+
790,
|
| 875 |
+
825,
|
| 876 |
+
863
|
| 877 |
+
],
|
| 878 |
+
"page_idx": 5
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "text",
|
| 882 |
+
"text": "Noisy Parallel Decoding. The absence of the AR decoding procedure makes it more difficult to capture the target distribution in S2ST. To obtain the more accurate optimum of the target distribution and compute the best translation for each fertility sequence, we use the autoregressive teacher to identify the best overall translation.",
|
| 883 |
+
"bbox": [
|
| 884 |
+
169,
|
| 885 |
+
867,
|
| 886 |
+
825,
|
| 887 |
+
925
|
| 888 |
+
],
|
| 889 |
+
"page_idx": 5
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "header",
|
| 893 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 894 |
+
"bbox": [
|
| 895 |
+
171,
|
| 896 |
+
32,
|
| 897 |
+
478,
|
| 898 |
+
47
|
| 899 |
+
],
|
| 900 |
+
"page_idx": 5
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "page_number",
|
| 904 |
+
"text": "6",
|
| 905 |
+
"bbox": [
|
| 906 |
+
493,
|
| 907 |
+
948,
|
| 908 |
+
504,
|
| 909 |
+
959
|
| 910 |
+
],
|
| 911 |
+
"page_idx": 5
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "text",
|
| 915 |
+
"text": "5 EXPERIMENTS",
|
| 916 |
+
"text_level": 1,
|
| 917 |
+
"bbox": [
|
| 918 |
+
171,
|
| 919 |
+
102,
|
| 920 |
+
328,
|
| 921 |
+
118
|
| 922 |
+
],
|
| 923 |
+
"page_idx": 6
|
| 924 |
+
},
|
| 925 |
+
{
|
| 926 |
+
"type": "text",
|
| 927 |
+
"text": "5.1 EXPERIMENTAL SETUP",
|
| 928 |
+
"text_level": 1,
|
| 929 |
+
"bbox": [
|
| 930 |
+
171,
|
| 931 |
+
133,
|
| 932 |
+
377,
|
| 933 |
+
148
|
| 934 |
+
],
|
| 935 |
+
"page_idx": 6
|
| 936 |
+
},
|
| 937 |
+
{
|
| 938 |
+
"type": "text",
|
| 939 |
+
"text": "Following the common practice in the direct S2ST pipeline, we apply the publicly-available pretrained multilingual HuBERT (mHuBERT) model and unit-based HiFi-GAN vocoder (Polyak et al., 2021; Kong et al., 2020) and leave them unchanged.",
|
| 940 |
+
"bbox": [
|
| 941 |
+
169,
|
| 942 |
+
162,
|
| 943 |
+
826,
|
| 944 |
+
204
|
| 945 |
+
],
|
| 946 |
+
"page_idx": 6
|
| 947 |
+
},
|
| 948 |
+
{
|
| 949 |
+
"type": "text",
|
| 950 |
+
"text": "Dataset. For a fair comparison, we use the benchmark CVSS-C dataset (Jia et al., 2022), which is derived from the CoVoST 2 (Wang et al., 2020b) speech-to-text translation corpus by synthesizing the translation text into speech using a single-speaker TTS system. To evaluate the performance of the proposed model, we conduct experiments on three language pairs, including French-English (Fr-En), English-Spanish (En-Es), and English-French (En-Fr).",
|
| 951 |
+
"bbox": [
|
| 952 |
+
169,
|
| 953 |
+
210,
|
| 954 |
+
826,
|
| 955 |
+
282
|
| 956 |
+
],
|
| 957 |
+
"page_idx": 6
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"type": "text",
|
| 961 |
+
"text": "Model Configurations and Training. For bilateral perturbation, we finetune the publicly-available mHuBERT model for each language separately with CTC loss until 25k updates using the Adam optimizer $(\\beta_{1} = 0.9, \\beta_{2} = 0.98, \\epsilon = 10^{-8})$ . Following the practice in textless S2ST (Lee et al., 2021b), we use the k-means algorithm to cluster the representation given by the well-tuned mHuBERT into a vocabulary of 1000 units. TranSpeech computes 80-dimensional mel-filterbank features at every 10-ms for the source speech as input, and we set $N_{b}$ to 6 in encoding and decoding blocks. In training the TranSpeech, we remove the auxiliary tasks for simplification and follow the unwritten language scenario. TranSpeech is trained until convergence for 200k steps using 1 Tesla V100 GPU. A comprehensive table of hyperparameters is available in Appendix B.",
|
| 962 |
+
"bbox": [
|
| 963 |
+
169,
|
| 964 |
+
287,
|
| 965 |
+
826,
|
| 966 |
+
414
|
| 967 |
+
],
|
| 968 |
+
"page_idx": 6
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "text",
|
| 972 |
+
"text": "Evaluation and Baseline models. For translation accuracy, we pre-train an ASR model to generate the corresponding text of the translated speech and then calculate the BLEU score (Papineni et al., 2002) between the generated and the reference text. In decoding speed, latency is computed as the time to decode the single n-frame speech sample averaged over the test set using 1 V100 GPU.",
|
| 973 |
+
"bbox": [
|
| 974 |
+
169,
|
| 975 |
+
420,
|
| 976 |
+
826,
|
| 977 |
+
477
|
| 978 |
+
],
|
| 979 |
+
"page_idx": 6
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "text",
|
| 983 |
+
"text": "We compare TranSpeech with other systems using the publicly-available fairseq framework (Ott et al., 2019), including 1) Direct ASR, where we transcribe S2ST data with open-sourced ASR as reference and compute BELU; 2) Direct TTS, where we synthesize speech samples with target units, and then transcribe the speech to text and compute BELU; 3) S2T+TTS cascaded system, where we train the S2T basic transformer model (Wang et al., 2020a) and then apply TTS model (Ren et al., 2020; Kong et al., 2020) for speech generation; 4) basic transformer (Lee et al., 2021a) without using text, and 5) basic norm transformer (Lee et al., 2021b) with speaker normalization.",
|
| 984 |
+
"bbox": [
|
| 985 |
+
169,
|
| 986 |
+
482,
|
| 987 |
+
826,
|
| 988 |
+
582
|
| 989 |
+
],
|
| 990 |
+
"page_idx": 6
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"type": "text",
|
| 994 |
+
"text": "5.2 TRANSLATION ACCURACY AND SPEECH NATURALNESS",
|
| 995 |
+
"text_level": 1,
|
| 996 |
+
"bbox": [
|
| 997 |
+
171,
|
| 998 |
+
599,
|
| 999 |
+
602,
|
| 1000 |
+
614
|
| 1001 |
+
],
|
| 1002 |
+
"page_idx": 6
|
| 1003 |
+
},
|
| 1004 |
+
{
|
| 1005 |
+
"type": "text",
|
| 1006 |
+
"text": "Table 1 summarizes the translation accuracy and inference latency among all systems, and we have the following observations: 1) Bilateral perturbation (3 vs. 4) improves S2ST performance by a large margin of 2.9 BLEU points. The proposed techniques address acoustic multimodality by disentangling the acoustic information and learning linguistic representation given speech samples, which produce more deterministic targets in speech-to-unit translation. 2) Conformer architecture (2 vs. 3) shows a 2.2 BLEU gain of translation accuracy. It combines convolution neural networks and transformers as joint architecture, exhibiting outperformed ability in learning local and global dependencies of an audio. 3) Knowledge distillation (6 vs. 7) is demonstrated to alleviate the linguistic multimodality where training on the distillation corpus provides a distinct promotion of around 1 BLEU points. For speech quality, we attach evaluation in Appendix D. When considering the speed-performance trade-off in the NAR unit decoder, we find that more iterative cycles (7 vs. 8), or advanced decoding methods (e.g., length beam (8 vs. 9) and noisy parallel decoding (9 vs. 10)) further lead to an improvement of translation accuracy, trading up to 1.5 BLEU points during decoding. In comparison with baseline systems, TranSpeech yields the highest BLEU scores than the best publicly-available direct S2ST baselines (2 vs. 6) by a considerable margin; in fact, only 2 mask-predict iterations (see Figure 3(b)) are necessary for achieving a new SOTA on textless S2ST.",
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
169,
|
| 1009 |
+
627,
|
| 1010 |
+
826,
|
| 1011 |
+
849
|
| 1012 |
+
],
|
| 1013 |
+
"page_idx": 6
|
| 1014 |
+
},
|
| 1015 |
+
{
|
| 1016 |
+
"type": "text",
|
| 1017 |
+
"text": "5.3 DECODING SPEED",
|
| 1018 |
+
"text_level": 1,
|
| 1019 |
+
"bbox": [
|
| 1020 |
+
171,
|
| 1021 |
+
869,
|
| 1022 |
+
344,
|
| 1023 |
+
883
|
| 1024 |
+
],
|
| 1025 |
+
"page_idx": 6
|
| 1026 |
+
},
|
| 1027 |
+
{
|
| 1028 |
+
"type": "text",
|
| 1029 |
+
"text": "We visualize the relationship between the translation latency and the length of input speech in Figure 3(a). As can be seen, the autoregressive baselines have a latency linear in the decoding",
|
| 1030 |
+
"bbox": [
|
| 1031 |
+
169,
|
| 1032 |
+
895,
|
| 1033 |
+
825,
|
| 1034 |
+
925
|
| 1035 |
+
],
|
| 1036 |
+
"page_idx": 6
|
| 1037 |
+
},
|
| 1038 |
+
{
|
| 1039 |
+
"type": "header",
|
| 1040 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1041 |
+
"bbox": [
|
| 1042 |
+
171,
|
| 1043 |
+
32,
|
| 1044 |
+
478,
|
| 1045 |
+
47
|
| 1046 |
+
],
|
| 1047 |
+
"page_idx": 6
|
| 1048 |
+
},
|
| 1049 |
+
{
|
| 1050 |
+
"type": "page_number",
|
| 1051 |
+
"text": "7",
|
| 1052 |
+
"bbox": [
|
| 1053 |
+
493,
|
| 1054 |
+
948,
|
| 1055 |
+
504,
|
| 1056 |
+
959
|
| 1057 |
+
],
|
| 1058 |
+
"page_idx": 6
|
| 1059 |
+
},
|
| 1060 |
+
{
|
| 1061 |
+
"type": "table",
|
| 1062 |
+
"img_path": "images/0ed311a193544ad19cc3cd47fe85967b3fec4af7efa56bc0f5e8fcd59727e578.jpg",
|
| 1063 |
+
"table_caption": [
|
| 1064 |
+
"Table 1: Translation quality (BLEU scores $(\\uparrow)$ ) and inference speed (frame/second $(\\uparrow)$ ) comparison with baseline systems. We set beam size to 5 in autoregressive decoding, and apply 5 iterative cycles in NAR naive decoding. $\\dagger$ : In this work, we remove the auxiliary task (e.g., source and target CTC, auto-encoding) in training the S2ST system for simplification. Though the S2ST system can be further improved with the auxiliary task, this is beyond our focus. BiP: Bilateral Perturbation; NPD: noisy parallel decoding; b: length beam in NAR decoding."
|
| 1065 |
+
],
|
| 1066 |
+
"table_footnote": [],
|
| 1067 |
+
"table_body": "<table><tr><td>ID</td><td>Model</td><td>BiP</td><td>Fr-En</td><td>En-Fr</td><td>En-Es</td><td>Speed</td><td>Speedup</td></tr><tr><td colspan=\"8\">Autoregressive models</td></tr><tr><td>1</td><td>Basic Transformer (Lee et al., 2021a)†</td><td>×</td><td>15.44</td><td>15.28</td><td>10.07</td><td rowspan=\"2\">870</td><td rowspan=\"2\">1.00×</td></tr><tr><td>2</td><td>Basic Norm Transformer (Lee et al., 2021b)†</td><td>×</td><td>15.81</td><td>15.93</td><td>12.98</td></tr><tr><td>3</td><td>Basic Conformer</td><td>×</td><td>18.02</td><td>17.07</td><td>13.75</td><td rowspan=\"2\">895</td><td rowspan=\"2\">1.02×</td></tr><tr><td>4</td><td>Basic Conformer</td><td>✓</td><td>22.39</td><td>19.65</td><td>14.94</td></tr><tr><td colspan=\"8\">Non-autoregressive models with naive decoding</td></tr><tr><td>5</td><td>TranSpeech - Distill</td><td>×</td><td>14.86</td><td>14.12</td><td>10.27</td><td rowspan=\"3\">9610</td><td rowspan=\"3\">11.04×</td></tr><tr><td>6</td><td>Transpeech - Distill</td><td>✓</td><td>16.23</td><td>15.9</td><td>10.94</td></tr><tr><td>7</td><td>TranSpeech</td><td>✓</td><td>17.24</td><td>16.3</td><td>11.79</td></tr><tr><td colspan=\"8\">Non-autoregressive models with advanced decoding</td></tr><tr><td>8</td><td>TranSpeech (iter=15)</td><td>✓</td><td>18.03</td><td>16.97</td><td>12.62</td><td>4651</td><td>5.34×</td></tr><tr><td>9</td><td>TranSpeech (iter=15 + b=15)</td><td>✓</td><td>18.10</td><td>17.05</td><td>12.70</td><td>2394</td><td>2.75×</td></tr><tr><td>10</td><td>TranSpeech (iter=15 + b=15 + NPD)</td><td>✓</td><td>18.39</td><td>17.50</td><td>12.77</td><td>2208</td><td>2.53×</td></tr><tr><td colspan=\"8\">Cascaded systems</td></tr><tr><td>11</td><td>S2T + TTS</td><td>/</td><td>27.17</td><td>34.85</td><td>32.86</td><td>/</td><td>/</td></tr><tr><td>12</td><td>Direct ASR</td><td>/</td><td>71.61</td><td>50.92</td><td>68.75</td><td>/</td><td>/</td></tr><tr><td>13</td><td>Direct TTS</td><td>/</td><td>82.41</td><td>76.87</td><td>83.69</td><td>/</td><td>/</td></tr></table>",
|
| 1068 |
+
"bbox": [
|
| 1069 |
+
173,
|
| 1070 |
+
172,
|
| 1071 |
+
831,
|
| 1072 |
+
489
|
| 1073 |
+
],
|
| 1074 |
+
"page_idx": 7
|
| 1075 |
+
},
|
| 1076 |
+
{
|
| 1077 |
+
"type": "image",
|
| 1078 |
+
"img_path": "images/97f90f40c3e0ba55818fa5a1df984c65e8687e7d3c4f68c41ffe6eaf543e581c.jpg",
|
| 1079 |
+
"image_caption": [
|
| 1080 |
+
"(a) Translation latency"
|
| 1081 |
+
],
|
| 1082 |
+
"image_footnote": [],
|
| 1083 |
+
"bbox": [
|
| 1084 |
+
200,
|
| 1085 |
+
500,
|
| 1086 |
+
493,
|
| 1087 |
+
676
|
| 1088 |
+
],
|
| 1089 |
+
"page_idx": 7
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "image",
|
| 1093 |
+
"img_path": "images/1301747a461012319cff9b5646d2f75bc3ad0b2da5d1b8c1d69a863b08504243.jpg",
|
| 1094 |
+
"image_caption": [
|
| 1095 |
+
"(b) Performance-speed trade-off.",
|
| 1096 |
+
"Figure 3: The translation latency is computed as the time to decode the n-frame speech sample, averaged over the test set using 1 NVIDIA V100. b: length beam. NPD: noisy parallel decoding."
|
| 1097 |
+
],
|
| 1098 |
+
"image_footnote": [],
|
| 1099 |
+
"bbox": [
|
| 1100 |
+
501,
|
| 1101 |
+
498,
|
| 1102 |
+
795,
|
| 1103 |
+
676
|
| 1104 |
+
],
|
| 1105 |
+
"page_idx": 7
|
| 1106 |
+
},
|
| 1107 |
+
{
|
| 1108 |
+
"type": "text",
|
| 1109 |
+
"text": "length. At the same time, NAR TranSpeech is nearly constant for typical lengths, even with multiple cycles of mask-predict iterative refinement. We further illustrate the versatile speed-performance trade-off for NAR decoding in Figure 3(b). TranSpeech enables a speedup up to $21.4\\mathrm{x}$ compared to the autoregressive baseline. On the other, it could alternatively retain the highest quality with BELU 18.39 while gaining a $253\\%$ speedup.",
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
169,
|
| 1112 |
+
746,
|
| 1113 |
+
826,
|
| 1114 |
+
816
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 7
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "text",
|
| 1120 |
+
"text": "5.4 CASE STUDY",
|
| 1121 |
+
"text_level": 1,
|
| 1122 |
+
"bbox": [
|
| 1123 |
+
171,
|
| 1124 |
+
825,
|
| 1125 |
+
307,
|
| 1126 |
+
839
|
| 1127 |
+
],
|
| 1128 |
+
"page_idx": 7
|
| 1129 |
+
},
|
| 1130 |
+
{
|
| 1131 |
+
"type": "text",
|
| 1132 |
+
"text": "We present several translation examples sampled from the Fr-En language pair in Table 2, and have the following findings: 1) Models trained with original units suffer severely from the issue of noisy and incomplete translation due to the indeterministic training targets, while with the bilateral perturbation brought in, this multimodal issue is largely alleviated; 2) the advanced decoding methods lead to a distinct improvement in translation accuracy. As can be seen, the results produced by the TranSpeech",
|
| 1133 |
+
"bbox": [
|
| 1134 |
+
169,
|
| 1135 |
+
854,
|
| 1136 |
+
826,
|
| 1137 |
+
925
|
| 1138 |
+
],
|
| 1139 |
+
"page_idx": 7
|
| 1140 |
+
},
|
| 1141 |
+
{
|
| 1142 |
+
"type": "header",
|
| 1143 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1144 |
+
"bbox": [
|
| 1145 |
+
171,
|
| 1146 |
+
32,
|
| 1147 |
+
478,
|
| 1148 |
+
47
|
| 1149 |
+
],
|
| 1150 |
+
"page_idx": 7
|
| 1151 |
+
},
|
| 1152 |
+
{
|
| 1153 |
+
"type": "page_number",
|
| 1154 |
+
"text": "8",
|
| 1155 |
+
"bbox": [
|
| 1156 |
+
493,
|
| 1157 |
+
948,
|
| 1158 |
+
504,
|
| 1159 |
+
959
|
| 1160 |
+
],
|
| 1161 |
+
"page_idx": 7
|
| 1162 |
+
},
|
| 1163 |
+
{
|
| 1164 |
+
"type": "table",
|
| 1165 |
+
"img_path": "images/55cc6884f7929206134799cb367fab1f97b88e0dec1839e7d23c315059c7a6ba.jpg",
|
| 1166 |
+
"table_caption": [
|
| 1167 |
+
"Table 2: Two examples comparing translations produced by TranSpeech and baseline models. We use the bond fonts to indicate the the issue of noisy and incomplete translation."
|
| 1168 |
+
],
|
| 1169 |
+
"table_footnote": [],
|
| 1170 |
+
"table_body": "<table><tr><td>Source:</td><td>l'origine de la rue est liée à la construction de la place rihour.</td></tr><tr><td>Target:</td><td>the origin of the street is linked to the construction of rihour square.</td></tr><tr><td>Basic Conformer:</td><td>the origin of the street is linked to the construction of the.</td></tr><tr><td>TranSpeech:</td><td>th origin of the seti is linked to the construction of the rear.</td></tr><tr><td>TranSpeech+BiP:</td><td>the origin of the street is linked to the construction of the ark.</td></tr><tr><td>TranSpeech+BiP+Advanced:</td><td>the origin of the street is linked to the construction of the work.</td></tr><tr><td>Source:</td><td>il participe aux activités du patronage laïquè et des pionniers de saint-ouen.</td></tr><tr><td>Target:</td><td>he participates in the secular patronage and pioneer activities of saint ouen.</td></tr><tr><td>Basic Conformer:</td><td>he participated in the activities of the late patronage a d see.</td></tr><tr><td>TranSpeech:</td><td>he takes in the patronage activities in of saint.</td></tr><tr><td>TranSpeech+BiP:</td><td>he participated in the activities of the lake patronage and say pointing</td></tr><tr><td>TranSpeech+BiP+Advanced:</td><td>he participated in the activities of the wake patronage and saint pioneers</td></tr></table>",
|
| 1171 |
+
"bbox": [
|
| 1172 |
+
173,
|
| 1173 |
+
132,
|
| 1174 |
+
828,
|
| 1175 |
+
301
|
| 1176 |
+
],
|
| 1177 |
+
"page_idx": 8
|
| 1178 |
+
},
|
| 1179 |
+
{
|
| 1180 |
+
"type": "text",
|
| 1181 |
+
"text": "with advanced decoding (more iterations and NPD), while of a similar quality to those produced by the autoregressive basic conformer, are noticeably more literal.",
|
| 1182 |
+
"bbox": [
|
| 1183 |
+
169,
|
| 1184 |
+
332,
|
| 1185 |
+
823,
|
| 1186 |
+
361
|
| 1187 |
+
],
|
| 1188 |
+
"page_idx": 8
|
| 1189 |
+
},
|
| 1190 |
+
{
|
| 1191 |
+
"type": "text",
|
| 1192 |
+
"text": "5.5 ABLATION STUDY",
|
| 1193 |
+
"text_level": 1,
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
171,
|
| 1196 |
+
398,
|
| 1197 |
+
341,
|
| 1198 |
+
412
|
| 1199 |
+
],
|
| 1200 |
+
"page_idx": 8
|
| 1201 |
+
},
|
| 1202 |
+
{
|
| 1203 |
+
"type": "text",
|
| 1204 |
+
"text": "We conduct ablation studies to demonstrate the effectiveness of several detailed designs in this work, including the bilateral perturbation and the conformer architecture in TranSpeech. The results have been presented in Table 3, and we have the following observations: 1) Style normalization and information enhance",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
169,
|
| 1207 |
+
434,
|
| 1208 |
+
393,
|
| 1209 |
+
571
|
| 1210 |
+
],
|
| 1211 |
+
"page_idx": 8
|
| 1212 |
+
},
|
| 1213 |
+
{
|
| 1214 |
+
"type": "text",
|
| 1215 |
+
"text": "ment in bilateral perturbation both demonstrate a performance gain, and they work in a joint effort to learn deterministic representations, leading to improvements in translation accuracy. 2) Replacing the relative positional encoding in the self-attention layer by the vanilla one (Vaswani et al., 2017) witnesses a distinct degradation in translation accuracy, demonstrating the outperformed capability of modeling both local and global audio dependencies brought by architecture designs.",
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
169,
|
| 1218 |
+
573,
|
| 1219 |
+
826,
|
| 1220 |
+
643
|
| 1221 |
+
],
|
| 1222 |
+
"page_idx": 8
|
| 1223 |
+
},
|
| 1224 |
+
{
|
| 1225 |
+
"type": "table",
|
| 1226 |
+
"img_path": "images/adc75f5325b3209dded8831f44481d9090afd2205125723b7303a019d74ee3c1.jpg",
|
| 1227 |
+
"table_caption": [
|
| 1228 |
+
"Table 3: Ablation study results. SN: style normalization; IE: information enhancement; PE: positional encoding."
|
| 1229 |
+
],
|
| 1230 |
+
"table_footnote": [],
|
| 1231 |
+
"table_body": "<table><tr><td>ID</td><td>Model</td><td>PE</td><td>Fr-En</td><td>En-Fr</td><td>En-Es</td></tr><tr><td>1</td><td>Basic Conformer</td><td>Relative</td><td>18.02</td><td>17.07</td><td>13.75</td></tr><tr><td>2</td><td>Basic Conformer + IE</td><td>Relative</td><td>21.98</td><td>19.60</td><td>14.91</td></tr><tr><td>3</td><td>Basic Conformer + SN</td><td>Relative</td><td>21.54</td><td>18.53</td><td>13.97</td></tr><tr><td>4</td><td>Basic Conformer</td><td>Absolute</td><td>17.23</td><td>16.19</td><td>13.06</td></tr></table>",
|
| 1232 |
+
"bbox": [
|
| 1233 |
+
403,
|
| 1234 |
+
468,
|
| 1235 |
+
831,
|
| 1236 |
+
550
|
| 1237 |
+
],
|
| 1238 |
+
"page_idx": 8
|
| 1239 |
+
},
|
| 1240 |
+
{
|
| 1241 |
+
"type": "text",
|
| 1242 |
+
"text": "6 CONCLUSION",
|
| 1243 |
+
"text_level": 1,
|
| 1244 |
+
"bbox": [
|
| 1245 |
+
171,
|
| 1246 |
+
685,
|
| 1247 |
+
318,
|
| 1248 |
+
699
|
| 1249 |
+
],
|
| 1250 |
+
"page_idx": 8
|
| 1251 |
+
},
|
| 1252 |
+
{
|
| 1253 |
+
"type": "text",
|
| 1254 |
+
"text": "In this work, we propose TranSpeech, a speech-to-speech translation model with bilateral perturbation. To tackle the acoustic multimodal issue in S2ST, the bilateral perturbation, which included style normalization and information enhancement, had been proposed to learn only the linguistic information from acoustic-variant speech samples. It assisted in generating deterministic representation agnostic to acoustic conditions, significantly reducing the acoustic multimodality and making it possible for non-autoregressive (NAR) generation. As such, we further stepped forward and became the first to establish a NAR S2ST technique. TranSpeech took full advantage of parallelism and leveraged the mask-predict algorithm to generate results in a constant number of iterations. To address linguistic multimodality, we applied knowledge distillation by constructing a less noisy sampled translation corpus. Experimental results demonstrated that BiP yields an improvement of 2.9 BLEU on average compared with a baseline textless S2ST model. Moreover, TranSpeech showed a significant improvement in inference latency, which required as few as 2 iterations to generate outperformed samples, enabling a sampling speed of up to $21.4\\mathrm{x}$ faster than the autoregressive baseline. We envisage that our work will serve as a basis for future textless S2ST studies.",
|
| 1255 |
+
"bbox": [
|
| 1256 |
+
169,
|
| 1257 |
+
729,
|
| 1258 |
+
826,
|
| 1259 |
+
924
|
| 1260 |
+
],
|
| 1261 |
+
"page_idx": 8
|
| 1262 |
+
},
|
| 1263 |
+
{
|
| 1264 |
+
"type": "header",
|
| 1265 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1266 |
+
"bbox": [
|
| 1267 |
+
171,
|
| 1268 |
+
32,
|
| 1269 |
+
478,
|
| 1270 |
+
47
|
| 1271 |
+
],
|
| 1272 |
+
"page_idx": 8
|
| 1273 |
+
},
|
| 1274 |
+
{
|
| 1275 |
+
"type": "page_number",
|
| 1276 |
+
"text": "9",
|
| 1277 |
+
"bbox": [
|
| 1278 |
+
493,
|
| 1279 |
+
948,
|
| 1280 |
+
504,
|
| 1281 |
+
959
|
| 1282 |
+
],
|
| 1283 |
+
"page_idx": 8
|
| 1284 |
+
},
|
| 1285 |
+
{
|
| 1286 |
+
"type": "text",
|
| 1287 |
+
"text": "ACKNOWLEDGEMENTS",
|
| 1288 |
+
"text_level": 1,
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
171,
|
| 1291 |
+
102,
|
| 1292 |
+
369,
|
| 1293 |
+
118
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 9
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "ref_text",
|
| 1299 |
+
"text": "This work was supported in part by the National Natural Science Foundation of China under Grant No. 62222211, National Key R&D Program of China under Grant No.2020YFC0832505, Zhejiang Electric Power Co., Ltd. Science and Technology Project No.5211YF22006 and Yiwise.",
|
| 1300 |
+
"bbox": [
|
| 1301 |
+
171,
|
| 1302 |
+
133,
|
| 1303 |
+
826,
|
| 1304 |
+
176
|
| 1305 |
+
],
|
| 1306 |
+
"page_idx": 9
|
| 1307 |
+
},
|
| 1308 |
+
{
|
| 1309 |
+
"type": "text",
|
| 1310 |
+
"text": "REFERENCES",
|
| 1311 |
+
"text_level": 1,
|
| 1312 |
+
"bbox": [
|
| 1313 |
+
173,
|
| 1314 |
+
196,
|
| 1315 |
+
287,
|
| 1316 |
+
213
|
| 1317 |
+
],
|
| 1318 |
+
"page_idx": 9
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"type": "list",
|
| 1322 |
+
"sub_type": "ref_text",
|
| 1323 |
+
"list_items": [
|
| 1324 |
+
"Alexei Baevski, Michael Auli, and Abdelrahman Mohamed. Effectiveness of self-supervised pretraining for speech recognition. arXiv preprint arXiv:1911.03912, 2019.",
|
| 1325 |
+
"Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33:12449-12460, 2020.",
|
| 1326 |
+
"Nanxin Chen, Shinji Watanabe, Jesús Villalba, Piotr Želasko, and Najim Dehak. Non-autoregressive transformer for speech recognition. IEEE Signal Processing Letters, 28:121-125, 2020.",
|
| 1327 |
+
"Sanyuan Chen, Yu Wu, Zhuo Chen, Jian Wu, Jinyu Li, Takuya Yoshioka, Chengyi Wang, Shujie Liu, and Ming Zhou. Continuous speech separation with conformer. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5749-5753. IEEE, 2021.",
|
| 1328 |
+
"Hyeong-Seok Choi, Juheon Lee, Wansoo Kim, Jie Lee, Hoon Heo, and Kyogu Lee. Neural analysis and synthesis: Reconstructing speech from self-supervised representations. Advances in Neural Information Processing Systems, 34, 2021.",
|
| 1329 |
+
"Jan Chorowski, Ron J Weiss, Samy Bengio, and Aaron Van Den Oord. Unsupervised speech representation learning using wavenet autoencoders. IEEE/ACM transactions on audio, speech, and language processing, 27(12):2041-2053, 2019.",
|
| 1330 |
+
"Yu-An Chung, Wei-Ning Hsu, Hao Tang, and James Glass. An unsupervised autoregressive model for speech representation learning. arXiv preprint arXiv:1904.03240, 2019.",
|
| 1331 |
+
"Chenye Cui, Yi Ren, Jinglin Liu, Feiyang Chen, Rongjie Huang, Ming Lei, and Zhou Zhao. Emovie: A mandarin emotion speech dataset with a simple emotional text-to-speech model. arXiv preprint arXiv:2106.09317, 2021.",
|
| 1332 |
+
"Chenye Cui, Yi Ren, Jinglin Liu, Rongjie Huang, and Zhou Zhao. Varietysound: Timbre-controllable video to sound generation via unsupervised information disentanglement. arXiv preprint arXiv:2211.10666, 2022.",
|
| 1333 |
+
"Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019.",
|
| 1334 |
+
"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.",
|
| 1335 |
+
"Heting Gao, Junrui Ni, Kaizhi Qian, Yang Zhang, Shiyu Chang, and Mark Hasegawa-Johnson. Wavprompt: Towards few-shot spoken language understanding with frozen language models. arXiv preprint arXiv:2203.15863, 2022.",
|
| 1336 |
+
"Marjan Ghazvininejad, Omer Levy, Yinhan Liu, and Luke Zettlemoyer. Mask-predict: Parallel decoding of conditional masked language models. arXiv preprint arXiv:1904.09324, 2019.",
|
| 1337 |
+
"Jiatao Gu, James Bradbury, Caiming Xiong, Victor OK Li, and Richard Socher. Non-autoregressive neural machine translation. arXiv preprint arXiv:1711.02281, 2017.",
|
| 1338 |
+
"Jiatao Gu, Changhan Wang, and Junbo Zhao. Levenshtein transformer. Advances in Neural Information Processing Systems, 32, 2019."
|
| 1339 |
+
],
|
| 1340 |
+
"bbox": [
|
| 1341 |
+
171,
|
| 1342 |
+
220,
|
| 1343 |
+
828,
|
| 1344 |
+
924
|
| 1345 |
+
],
|
| 1346 |
+
"page_idx": 9
|
| 1347 |
+
},
|
| 1348 |
+
{
|
| 1349 |
+
"type": "header",
|
| 1350 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1351 |
+
"bbox": [
|
| 1352 |
+
171,
|
| 1353 |
+
32,
|
| 1354 |
+
478,
|
| 1355 |
+
47
|
| 1356 |
+
],
|
| 1357 |
+
"page_idx": 9
|
| 1358 |
+
},
|
| 1359 |
+
{
|
| 1360 |
+
"type": "page_number",
|
| 1361 |
+
"text": "10",
|
| 1362 |
+
"bbox": [
|
| 1363 |
+
490,
|
| 1364 |
+
948,
|
| 1365 |
+
506,
|
| 1366 |
+
959
|
| 1367 |
+
],
|
| 1368 |
+
"page_idx": 9
|
| 1369 |
+
},
|
| 1370 |
+
{
|
| 1371 |
+
"type": "list",
|
| 1372 |
+
"sub_type": "ref_text",
|
| 1373 |
+
"list_items": [
|
| 1374 |
+
"Anmol Gulati, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, Shibo Wang, Zhengdong Zhang, Yonghui Wu, et al. Conformer: Convolution-augmented transformer for speech recognition. arXiv preprint arXiv:2005.08100, 2020.",
|
| 1375 |
+
"Pengcheng Guo, Florian Boyer, Xuankai Chang, Tomoki Hayashi, Yosuke Higuchi, Hirofumi Inaguma, Naoyuki Kamo, Chenda Li, Daniel Garcia-Romero, Jiatong Shi, et al. Recent developments on espnet toolkit boosted by conformer. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5874-5878. IEEE, 2021.",
|
| 1376 |
+
"Wei-Ning Hsu, David Harwath, Christopher Song, and James Glass. Text-free image-to-speech synthesis using learned segmental units. arXiv preprint arXiv:2012.15454, 2020.",
|
| 1377 |
+
"Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021.",
|
| 1378 |
+
"Rongjie Huang, Feiyang Chen, Yi Ren, Jinglin Liu, Chenye Cui, and Zhou Zhao. Multi-singer: Fast multi-singer singing voice vocoder with a large-scale corpus. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 3945-3954, 2021.",
|
| 1379 |
+
"Rongjie Huang, Chenye Cui, Feiyang Chen, Yi Ren, Jinglin Liu, Zhou Zhao, Baoxing Huai, and Zhefeng Wang. Singgan: Generative adversarial network for high-fidelity singing voice generation. In Proceedings of the 30th ACM International Conference on Multimedia, pp. 2525-2535, 2022a.",
|
| 1380 |
+
"Rongjie Huang, Max WY Lam, Jun Wang, Dan Su, Dong Yu, Yi Ren, and Zhou Zhao. Fastdiff: A fast conditional diffusion model for high-quality speech synthesis. arXiv preprint arXiv:2204.09934, 2022b.",
|
| 1381 |
+
"Rongjie Huang, Yi Ren, Jinglin Liu, Chenye Cui, and Zhou Zhao. Generspeech: Towards style transfer for generalizable out-of-domain text-to-speech synthesis. arXiv preprint arXiv:2205.07211, 2022c.",
|
| 1382 |
+
"Rongjie Huang, Zhou Zhao, Huadai Liu, Jinglin Liu, Chenye Cui, and Yi Ren. Prodiff: Progressive fast diffusion model for high-quality text-to-speech. arXiv preprint arXiv:2207.06389, 2022d.",
|
| 1383 |
+
"Ye Jia, Ron J Weiss, Fadi Biadsy, Wolfgang Macherey, Melvin Johnson, Zhifeng Chen, and Yonghui Wu. Direct speech-to-speech translation with a sequence-to-sequence model. arXiv preprint arXiv:1904.06037, 2019.",
|
| 1384 |
+
"Ye Jia, Michelle Tadmor Ramanovich, Tal Remez, and Roi Pomerantz. Translatotron 2: Robust direct speech-to-speech translation. arXiv preprint arXiv:2107.08661, 2021.",
|
| 1385 |
+
"Ye Jia, Michelle Tadmor Ramanovich, Quan Wang, and Heiga Zen. Cvss corpus and massively multilingual speech-to-speech translation. arXiv preprint arXiv:2201.03713, 2022.",
|
| 1386 |
+
"Jungil Kong, Jaehyeon Kim, and Jaekyoung Bae. Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis. Advances in Neural Information Processing Systems, 33:17022-17033, 2020.",
|
| 1387 |
+
"Max WY Lam, Jun Wang, Rongjie Huang, Dan Su, and Dong Yu. Bilateral denoising diffusion models. arXiv preprint arXiv:2108.11514, 2021.",
|
| 1388 |
+
"Alon Lavie, Alex Waibel, Lori Levin, Michael Finke, Donna Gates, Marsal Gavalda, Torsten Zeppenfeld, and Puming Zhan. Janus-iii: Speech-to-speech translation in multiple languages. In 1997 IEEE International Conference on Acoustics, Speech, and Signal Processing, volume 1, pp. 99-102. IEEE, 1997.",
|
| 1389 |
+
"Ann Lee, Peng-Jen Chen, Changhan Wang, Jiatao Gu, Xutai Ma, Adam Polyak, Yossi Adi, Qing He, Yun Tang, Juan Pino, et al. Direct speech-to-speech translation with discrete units. arXiv preprint arXiv:2107.05604, 2021a."
|
| 1390 |
+
],
|
| 1391 |
+
"bbox": [
|
| 1392 |
+
171,
|
| 1393 |
+
102,
|
| 1394 |
+
826,
|
| 1395 |
+
922
|
| 1396 |
+
],
|
| 1397 |
+
"page_idx": 10
|
| 1398 |
+
},
|
| 1399 |
+
{
|
| 1400 |
+
"type": "header",
|
| 1401 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1402 |
+
"bbox": [
|
| 1403 |
+
171,
|
| 1404 |
+
32,
|
| 1405 |
+
478,
|
| 1406 |
+
47
|
| 1407 |
+
],
|
| 1408 |
+
"page_idx": 10
|
| 1409 |
+
},
|
| 1410 |
+
{
|
| 1411 |
+
"type": "page_number",
|
| 1412 |
+
"text": "11",
|
| 1413 |
+
"bbox": [
|
| 1414 |
+
488,
|
| 1415 |
+
946,
|
| 1416 |
+
506,
|
| 1417 |
+
959
|
| 1418 |
+
],
|
| 1419 |
+
"page_idx": 10
|
| 1420 |
+
},
|
| 1421 |
+
{
|
| 1422 |
+
"type": "list",
|
| 1423 |
+
"sub_type": "ref_text",
|
| 1424 |
+
"list_items": [
|
| 1425 |
+
"Ann Lee, Hongyu Gong, Paul-Ambroise Duquenne, Holger Schwenk, Peng-Jen Chen, Changhan Wang, Sravya Popuri, Juan Pino, Jiatao Gu, and Wei-Ning Hsu. Textless speech-to-speech translation on real data. arXiv preprint arXiv:2112.08352, 2021b.",
|
| 1426 |
+
"Zhijie Lin, Zhou Zhao, Haoyuan Li, Jinglin Liu, Meng Zhang, Xingshan Zeng, and Xiaofei He. Simullr: Simultaneous lip reading transducer with attention-guided adaptive memory. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 1359-1367, 2021.",
|
| 1427 |
+
"Satoshi Nakamura, Konstantin Markov, Hiromi Nakaiwa, Gen-ichiro Kikui, Hisashi Kawai, Takatoshi Jitsuhiro, J-S Zhang, Hirofumi Yamamoto, Eiichiro Sumita, and Seiichi Yamamoto. The atr multilingual speech-to-speech translation system. IEEE Transactions on Audio, Speech, and Language Processing, 14(2):365-376, 2006.",
|
| 1428 |
+
"Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. *fairseq: A fast, extensible toolkit for sequence modeling*. arXiv preprint arXiv:1904.01038, 2019.",
|
| 1429 |
+
"Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pp. 311-318, 2002.",
|
| 1430 |
+
"Adam Polyak and Lior Wolf. Attention-based wavenet autoencoder for universal voice conversion. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6800-6804. IEEE, 2019.",
|
| 1431 |
+
"Adam Polyak, Yossi Adi, Jade Copet, Eugene Kharitonov, Kushal Lakhotia, Wei-Ning Hsu, Abdelrahman Mohamed, and Emmanuel Dupoux. Speech resynthesis from discrete disentangled self-supervised representations. arXiv preprint arXiv:2104.00355, 2021.",
|
| 1432 |
+
"Kaizhi Qian, Yang Zhang, Shiyu Chang, Mark Hasegawa-Johnson, and David Cox. Unsupervised speech decomposition via triple information bottleneck. In International Conference on Machine Learning, pp. 7836-7846. PMLR, 2020.",
|
| 1433 |
+
"Kaizhi Qian, Yang Zhang, Shiyu Chang, Jinjun Xiong, Chuang Gan, David Cox, and Mark Hasegawa-Johnson. Global prosody style transfer without text transcriptions. In International Conference on Machine Learning, pp. 8650-8660. PMLR, 2021.",
|
| 1434 |
+
"Yi Ren, Chenxu Hu, Xu Tan, Tao Qin, Sheng Zhao, Zhou Zhao, and Tie-Yan Liu. Fastspeech 2: Fast and high-quality end-to-end text to speech. arXiv preprint arXiv:2006.04558, 2020.",
|
| 1435 |
+
"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.",
|
| 1436 |
+
"Wolfgang Wahlster. Verbmobil: foundations of speech-to-speech translation. Springer Science & Business Media, 2013.",
|
| 1437 |
+
"Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, and Juan Pino. fairseq s2t: Fast speech-to-text modeling with fairseq. In Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations, 2020a.",
|
| 1438 |
+
"Changhan Wang, Anne Wu, and Juan Pino. Covost 2 and massively multilingual speech-to-text translation. arXiv preprint arXiv:2007.10310, 2020b.",
|
| 1439 |
+
"Yiren Wang, Fei Tian, Di He, Tao Qin, ChengXiang Zhai, and Tie-Yan Liu. Non-autoregressive machine translation with auxiliary regularization. In Proceedings of the AAAI conference on artificial intelligence, volume 33, pp. 5377-5384, 2019.",
|
| 1440 |
+
"Yan Xia, Zhou Zhao, Shangwei Ye, Yang Zhao, Haoyuan Li, and Yi Ren. Video-guided curriculum learning for spoken video grounding. In Proceedings of the 30th ACM International Conference on Multimedia, pp. 5191-5200, 2022.",
|
| 1441 |
+
"Bang Yang, Fenglin Liu, and Yuexian Zou. Non-autoregressive video captioning with iterative refinement. 2019."
|
| 1442 |
+
],
|
| 1443 |
+
"bbox": [
|
| 1444 |
+
171,
|
| 1445 |
+
102,
|
| 1446 |
+
826,
|
| 1447 |
+
922
|
| 1448 |
+
],
|
| 1449 |
+
"page_idx": 11
|
| 1450 |
+
},
|
| 1451 |
+
{
|
| 1452 |
+
"type": "header",
|
| 1453 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1454 |
+
"bbox": [
|
| 1455 |
+
171,
|
| 1456 |
+
32,
|
| 1457 |
+
478,
|
| 1458 |
+
47
|
| 1459 |
+
],
|
| 1460 |
+
"page_idx": 11
|
| 1461 |
+
},
|
| 1462 |
+
{
|
| 1463 |
+
"type": "page_number",
|
| 1464 |
+
"text": "12",
|
| 1465 |
+
"bbox": [
|
| 1466 |
+
488,
|
| 1467 |
+
946,
|
| 1468 |
+
508,
|
| 1469 |
+
959
|
| 1470 |
+
],
|
| 1471 |
+
"page_idx": 11
|
| 1472 |
+
},
|
| 1473 |
+
{
|
| 1474 |
+
"type": "list",
|
| 1475 |
+
"sub_type": "ref_text",
|
| 1476 |
+
"list_items": [
|
| 1477 |
+
"Chao-Han Huck Yang, Yun-Yun Tsai, and Pin-Yu Chen. Voice2series: Reprogramming acoustic models for time series classification. In International Conference on Machine Learning, pp. 11808-11819. PMLR, 2021.",
|
| 1478 |
+
"Dongchao Yang, Songxiang Liu, Jianwei Yu, Helin Wang, Chao Weng, and Yuexian Zou. Norespeech: Knowledge distillation based conditional diffusion model for noise-robust expressive tts. arXiv preprint arXiv:2211.02448, 2022a.",
|
| 1479 |
+
"Dongchao Yang, Jianwei Yu, Helin Wang, Wen Wang, Chao Weng, Yuexian Zou, and Dong Yu. Diffsound: Discrete diffusion model for text-to-sound generation. arXiv preprint arXiv:2207.09983, 2022b.",
|
| 1480 |
+
"Dongchao Yang, Songxiang Liu, Rongjie Huang, Guangzhi Lei, Chao Weng, Helen Meng, and Dong Yu. Instructtts: Modelling expressive tt's in discrete latent space with natural language style prompt. arXiv preprint arXiv:2301.13662, 2023.",
|
| 1481 |
+
"Zhenhui Ye, Zhou Zhao, Yi Ren, and Fei Wu. Syntaspeech: Syntax-aware generative adversarial text-to-speech. arXiv preprint arXiv:2204.11792, 2022.",
|
| 1482 |
+
"Zhenhui Ye, Ziyue Jiang, Yi Ren, Jinglin Liu, Jinzheng He, and Zhou Zhao. Geneface: Generalized and high-fidelity audio-driven 3d talking face synthesis. arXiv preprint arXiv:2301.13430, 2023.",
|
| 1483 |
+
"Hao Yen, Pin-Jui Ku, Chao-Han Huck Yang, Hu Hu, Sabato Marco Siniscalchi, Pin-Yu Chen, and Yu Tsao. A study of low-resource speech commands recognition based on adversarial reprogramming. arXiv preprint arXiv:2110.03894, 2021.",
|
| 1484 |
+
"Aoxiong Yin, Zhou Zhao, Jinglin Liu, Weike Jin, Meng Zhang, Xingshan Zeng, and Xiaofei He. Simulslt: End-to-end simultaneous sign language translation. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 4118-4127, 2021.",
|
| 1485 |
+
"Aoxiong Yin, Zhou Zhao, Weike Jin, Meng Zhang, Xingshan Zeng, and Xiaofei He. Mlslt: Towards multilingual sign language translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5109-5119, 2022.",
|
| 1486 |
+
"Aoxiong Yin, Tianyun Zhong, Li Tang, Weike Jin, Tao Jin, and Zhou Zhao. Gloss attention for gloss-free sign language translation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, Canada, June 17-23, 2023. IEEE, 2023.",
|
| 1487 |
+
"Chen Zhang, Xu Tan, Yi Ren, Tao Qin, Kejun Zhang, and Tie-Yan Liu. Uwspeech: Speech to speech translation for unwritten languages. arXiv preprint arXiv:2006.07926, 59:132, 2020.",
|
| 1488 |
+
"Jie Zhang, Chen Chen, Bo Li, Lingjuan Lyu, Shuang Wu, Shouhong Ding, Chunhua Shen, and Chao Wu. Dense: Data-free one-shot federated learning. In Advances in Neural Information Processing Systems.",
|
| 1489 |
+
"Jie Zhang, Bo Li, Jianghe Xu, Shuang Wu, Shouhong Ding, Lei Zhang, and Chao Wu. Towards efficient data free black-box adversarial attack. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15115-15125, 2022a.",
|
| 1490 |
+
"Jie Zhang, Bo Li, Chen Chen, Lingjuan Lyu, Shuang Wu, Shouhong Ding, and Chao Wu. Delving into the adversarial robustness of federated learning. arXiv preprint arXiv:2302.09479, 2023a.",
|
| 1491 |
+
"Zijian Zhang, Zhou Zhao, and Zhijie Lin. Unsupervised representation learning from pre-trained diffusion probabilistic models. In Advances in Neural Information Processing Systems, 2022b.",
|
| 1492 |
+
"Zijian Zhang, Zhou Zhao, Jun Yu, and Qi Tian. Shiftddpms: Exploring conditional diffusion models by shifting diffusion trajectories. arXiv preprint arXiv:2302.02373, 2023b."
|
| 1493 |
+
],
|
| 1494 |
+
"bbox": [
|
| 1495 |
+
171,
|
| 1496 |
+
102,
|
| 1497 |
+
826,
|
| 1498 |
+
838
|
| 1499 |
+
],
|
| 1500 |
+
"page_idx": 12
|
| 1501 |
+
},
|
| 1502 |
+
{
|
| 1503 |
+
"type": "header",
|
| 1504 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1505 |
+
"bbox": [
|
| 1506 |
+
171,
|
| 1507 |
+
32,
|
| 1508 |
+
478,
|
| 1509 |
+
47
|
| 1510 |
+
],
|
| 1511 |
+
"page_idx": 12
|
| 1512 |
+
},
|
| 1513 |
+
{
|
| 1514 |
+
"type": "page_number",
|
| 1515 |
+
"text": "13",
|
| 1516 |
+
"bbox": [
|
| 1517 |
+
488,
|
| 1518 |
+
946,
|
| 1519 |
+
506,
|
| 1520 |
+
959
|
| 1521 |
+
],
|
| 1522 |
+
"page_idx": 12
|
| 1523 |
+
},
|
| 1524 |
+
{
|
| 1525 |
+
"type": "text",
|
| 1526 |
+
"text": "Appendices",
|
| 1527 |
+
"text_level": 1,
|
| 1528 |
+
"bbox": [
|
| 1529 |
+
426,
|
| 1530 |
+
99,
|
| 1531 |
+
571,
|
| 1532 |
+
125
|
| 1533 |
+
],
|
| 1534 |
+
"page_idx": 13
|
| 1535 |
+
},
|
| 1536 |
+
{
|
| 1537 |
+
"type": "text",
|
| 1538 |
+
"text": "TranSpeech: Speech-to-Speech Translation With Bilateral Perturbation",
|
| 1539 |
+
"text_level": 1,
|
| 1540 |
+
"bbox": [
|
| 1541 |
+
204,
|
| 1542 |
+
137,
|
| 1543 |
+
794,
|
| 1544 |
+
181
|
| 1545 |
+
],
|
| 1546 |
+
"page_idx": 13
|
| 1547 |
+
},
|
| 1548 |
+
{
|
| 1549 |
+
"type": "text",
|
| 1550 |
+
"text": "A RELATED WORK",
|
| 1551 |
+
"bbox": [
|
| 1552 |
+
171,
|
| 1553 |
+
220,
|
| 1554 |
+
351,
|
| 1555 |
+
236
|
| 1556 |
+
],
|
| 1557 |
+
"page_idx": 13
|
| 1558 |
+
},
|
| 1559 |
+
{
|
| 1560 |
+
"type": "text",
|
| 1561 |
+
"text": "A.1 SELF-SUPERVISED REPRESENTATION LEARNING",
|
| 1562 |
+
"bbox": [
|
| 1563 |
+
171,
|
| 1564 |
+
255,
|
| 1565 |
+
555,
|
| 1566 |
+
270
|
| 1567 |
+
],
|
| 1568 |
+
"page_idx": 13
|
| 1569 |
+
},
|
| 1570 |
+
{
|
| 1571 |
+
"type": "text",
|
| 1572 |
+
"text": "There has been an increasing interest in self-supervised learning in the machine learning (Zhang et al., 2022a; Lam et al., 2021; Zhang et al., 2023b; 2022b) and multimodal processing community (Xia et al., 2022; Zhang et al., 2023a; Zhang et al.; Huang et al., 2022b;a). Wav2Vec 2.0 (Baevski et al., 2020) trains a convolutional neural network to distinguish true future samples from random distractor samples using a contrastive predictive coding (CPC) loss function. HuBERT (Hsu et al., 2021) is trained with a masked prediction with masked continuous audio signals. The majority of self-supervised representation learning models are trained by reconstructing (Chorowski et al., 2019) or predicting unseen speech signals (Chung et al., 2019), which would inevitably include factors unrelated to the linguistic content (i.e., acoustic condition).",
|
| 1573 |
+
"bbox": [
|
| 1574 |
+
169,
|
| 1575 |
+
284,
|
| 1576 |
+
826,
|
| 1577 |
+
411
|
| 1578 |
+
],
|
| 1579 |
+
"page_idx": 13
|
| 1580 |
+
},
|
| 1581 |
+
{
|
| 1582 |
+
"type": "text",
|
| 1583 |
+
"text": "A.2 PERTURBATION-BASED SPEECH REPROGRAMMING",
|
| 1584 |
+
"bbox": [
|
| 1585 |
+
171,
|
| 1586 |
+
433,
|
| 1587 |
+
568,
|
| 1588 |
+
446
|
| 1589 |
+
],
|
| 1590 |
+
"page_idx": 13
|
| 1591 |
+
},
|
| 1592 |
+
{
|
| 1593 |
+
"type": "text",
|
| 1594 |
+
"text": "Various approaches that perturb information flow in acoustic models have demonstrated the efficiency in promoting downstream performance: SpeechSplit (Qian et al., 2020), AutoPST (Qian et al., 2021), and NANSY (Choi et al., 2021) perturb the speech variations during the analysis stage to encourage the synthesis stage to use the supplied more stable representations. Voice2Series (Yang et al., 2021) introduces a novel end-to-end approach that reprograms pre-trained acoustic models for time series classification by input transformation learning and output label mapping. Wavprompt (Gao et al., 2022) utilizes the pre-trained audio encoder as part of an ASR to convert the speech in the demonstrations into embeddings digestible to the language model. For multi-lingual tuning, Yen et al. (2021) propose a novel adversarial reprogramming approach for low-resource spoken command recognition (SCR), which repurposes a pre-trained SCR model to modify the acoustic signals. In this work, we propose the bilateral perturbation technique with style normalization and information enhancement to perturb the acoustic conditions in speech.",
|
| 1595 |
+
"bbox": [
|
| 1596 |
+
169,
|
| 1597 |
+
460,
|
| 1598 |
+
826,
|
| 1599 |
+
628
|
| 1600 |
+
],
|
| 1601 |
+
"page_idx": 13
|
| 1602 |
+
},
|
| 1603 |
+
{
|
| 1604 |
+
"type": "text",
|
| 1605 |
+
"text": "A.3 NON-AUTOREGRESSIVE SEQUENCE GENERATION",
|
| 1606 |
+
"bbox": [
|
| 1607 |
+
171,
|
| 1608 |
+
652,
|
| 1609 |
+
563,
|
| 1610 |
+
666
|
| 1611 |
+
],
|
| 1612 |
+
"page_idx": 13
|
| 1613 |
+
},
|
| 1614 |
+
{
|
| 1615 |
+
"type": "text",
|
| 1616 |
+
"text": "An autoregressive model (Lin et al., 2021; Yin et al., 2021; 2022) takes in a source sequence and then generates target sentences one by one with the causal structure during the inference process. It prevents parallelism during inference, and thus the computational power of GPU cannot be fully exploited. To reduce the inference latency, (Gu et al., 2017) introduces a non-autoregressive (NAR) transformer-based approach with explicit word fertility, and identifies the multimodality problem of linguistic information between the source and target language. (Ghazvininejad et al., 2019) introduced the masked language modeling objective from BERT (Devlin et al., 2018) to non-autoregressively predict and refine translations. Besides the study of neural machine translation, many works bring NAR model into other sequence-to-sequence tasks (Cui et al., 2021; Ye et al., 2023; Huang et al., 2022c; Yang et al., 2022b), such as video caption (Yang et al., 2019), speech recognition (Chen et al., 2020) and speech synthesis (Ye et al., 2022; Huang et al., 2022d; Yang et al., 2023). In contrast, we focus on non-autoregressive generation in direct S2ST, which is relatively overlooked.",
|
| 1617 |
+
"bbox": [
|
| 1618 |
+
169,
|
| 1619 |
+
680,
|
| 1620 |
+
828,
|
| 1621 |
+
848
|
| 1622 |
+
],
|
| 1623 |
+
"page_idx": 13
|
| 1624 |
+
},
|
| 1625 |
+
{
|
| 1626 |
+
"type": "text",
|
| 1627 |
+
"text": "B MODEL ARCHITECTURES",
|
| 1628 |
+
"bbox": [
|
| 1629 |
+
171,
|
| 1630 |
+
875,
|
| 1631 |
+
423,
|
| 1632 |
+
888
|
| 1633 |
+
],
|
| 1634 |
+
"page_idx": 13
|
| 1635 |
+
},
|
| 1636 |
+
{
|
| 1637 |
+
"type": "text",
|
| 1638 |
+
"text": "In this section, we list the model hyper-parameters of TranSpeech in Table 4.",
|
| 1639 |
+
"bbox": [
|
| 1640 |
+
169,
|
| 1641 |
+
909,
|
| 1642 |
+
676,
|
| 1643 |
+
924
|
| 1644 |
+
],
|
| 1645 |
+
"page_idx": 13
|
| 1646 |
+
},
|
| 1647 |
+
{
|
| 1648 |
+
"type": "header",
|
| 1649 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1650 |
+
"bbox": [
|
| 1651 |
+
171,
|
| 1652 |
+
32,
|
| 1653 |
+
478,
|
| 1654 |
+
47
|
| 1655 |
+
],
|
| 1656 |
+
"page_idx": 13
|
| 1657 |
+
},
|
| 1658 |
+
{
|
| 1659 |
+
"type": "page_number",
|
| 1660 |
+
"text": "14",
|
| 1661 |
+
"bbox": [
|
| 1662 |
+
488,
|
| 1663 |
+
946,
|
| 1664 |
+
508,
|
| 1665 |
+
959
|
| 1666 |
+
],
|
| 1667 |
+
"page_idx": 13
|
| 1668 |
+
},
|
| 1669 |
+
{
|
| 1670 |
+
"type": "table",
|
| 1671 |
+
"img_path": "images/ab6590f313a2ee1189eb6f355b525b820c4cc141e6063d53268e424cf54f6bbd.jpg",
|
| 1672 |
+
"table_caption": [],
|
| 1673 |
+
"table_footnote": [],
|
| 1674 |
+
"table_body": "<table><tr><td colspan=\"2\">Hyperparameter</td><td>TranSpeech</td></tr><tr><td rowspan=\"6\">Conformer Encoder</td><td>Conv1d Layers</td><td>2</td></tr><tr><td>Conv1d Kernel</td><td>(5, 5)</td></tr><tr><td>Encoder Block</td><td>6</td></tr><tr><td>Encoder Hidden</td><td>512</td></tr><tr><td>Encoder Attention Heads</td><td>8</td></tr><tr><td>Encoder Dropout</td><td>0.1</td></tr><tr><td>Length Predictor</td><td>Projection Dim</td><td>512</td></tr><tr><td rowspan=\"5\">Unit Decoder</td><td>Unit Dictionary</td><td>1000</td></tr><tr><td>Decoder Block</td><td>6</td></tr><tr><td>Decoder Hidden</td><td>512</td></tr><tr><td>Decoder Attention Headers</td><td>8</td></tr><tr><td>Decoder Dropout</td><td>0.1</td></tr></table>",
|
| 1675 |
+
"bbox": [
|
| 1676 |
+
292,
|
| 1677 |
+
101,
|
| 1678 |
+
702,
|
| 1679 |
+
294
|
| 1680 |
+
],
|
| 1681 |
+
"page_idx": 14
|
| 1682 |
+
},
|
| 1683 |
+
{
|
| 1684 |
+
"type": "text",
|
| 1685 |
+
"text": "C IMPACT OF INDETERMINISTIC TRAINING TARGET",
|
| 1686 |
+
"text_level": 1,
|
| 1687 |
+
"bbox": [
|
| 1688 |
+
171,
|
| 1689 |
+
353,
|
| 1690 |
+
614,
|
| 1691 |
+
369
|
| 1692 |
+
],
|
| 1693 |
+
"page_idx": 14
|
| 1694 |
+
},
|
| 1695 |
+
{
|
| 1696 |
+
"type": "text",
|
| 1697 |
+
"text": "To visualize the acoustic multimodality and demonstrate the effectiveness of proposed bilateral perturbation, we apply the information bottleneck on acoustic features (i.e., rhythm, pitch, and energy) to create perturbed speech samples $\\hat{S}_r$ , $\\hat{S}_p$ , $\\hat{S}_e$ , respectively. We further plot the spectrogram and pitch contours of the original and acoustic-perturbed samples in Figure 5 in Appendix F. The unit error rate (UER) is further adopted as an evaluation matrix to measure the undeterminacy and multimodality according to acoustic variation, and we have the following observations: 1) In the pre-trained SSL model, the acoustic dynamics result in UERs by up to $22.7\\%$ (in rhythm), indicating the distinct alteration of derived representations. The pre-trained SSL model learns both linguistic and acoustic information given speech, and thus the units derived from speech with the same content can be indeterministic; however, 2) with the proposed bilateral perturbation (BiP), a distinct drop of UER (in energy) by up to $82.8\\%$ could be witnessed, demonstrating the efficiency of BiP in producing deterministic representations referring to linguistic content.",
|
| 1698 |
+
"bbox": [
|
| 1699 |
+
169,
|
| 1700 |
+
386,
|
| 1701 |
+
826,
|
| 1702 |
+
556
|
| 1703 |
+
],
|
| 1704 |
+
"page_idx": 14
|
| 1705 |
+
},
|
| 1706 |
+
{
|
| 1707 |
+
"type": "table",
|
| 1708 |
+
"img_path": "images/5b5a07b9c9cab83eb872e043cd5ad3a55161c99ffbb3117a65890deeaf9ad88b.jpg",
|
| 1709 |
+
"table_caption": [
|
| 1710 |
+
"Table 4: Hyperparameters of TranSpeech."
|
| 1711 |
+
],
|
| 1712 |
+
"table_footnote": [],
|
| 1713 |
+
"table_body": "<table><tr><td>Acoustic</td><td>Pretrained</td><td>BiP-Tuned</td></tr><tr><td>Reference</td><td>0.0</td><td>0.0</td></tr><tr><td>Rhythmˆr</td><td>22.7</td><td>10.2</td></tr><tr><td>Pitchˆp</td><td>16.3</td><td>4.3</td></tr><tr><td>Energyˆe</td><td>10.5</td><td>1.8</td></tr></table>",
|
| 1714 |
+
"bbox": [
|
| 1715 |
+
361,
|
| 1716 |
+
571,
|
| 1717 |
+
635,
|
| 1718 |
+
671
|
| 1719 |
+
],
|
| 1720 |
+
"page_idx": 14
|
| 1721 |
+
},
|
| 1722 |
+
{
|
| 1723 |
+
"type": "text",
|
| 1724 |
+
"text": "Table 5: We calculate UER between units derived from original and perturbed speeches respectively using the pre-trained and fine-tuned SSL model, which is calculated averaged over the dataset. It measures the ability of the SSL model to generate acoustic-agnostic representations referring to linguistic content.",
|
| 1725 |
+
"bbox": [
|
| 1726 |
+
169,
|
| 1727 |
+
681,
|
| 1728 |
+
826,
|
| 1729 |
+
738
|
| 1730 |
+
],
|
| 1731 |
+
"page_idx": 14
|
| 1732 |
+
},
|
| 1733 |
+
{
|
| 1734 |
+
"type": "text",
|
| 1735 |
+
"text": "D EVALUATION ON SPEECH QUALITY",
|
| 1736 |
+
"text_level": 1,
|
| 1737 |
+
"bbox": [
|
| 1738 |
+
171,
|
| 1739 |
+
771,
|
| 1740 |
+
500,
|
| 1741 |
+
787
|
| 1742 |
+
],
|
| 1743 |
+
"page_idx": 14
|
| 1744 |
+
},
|
| 1745 |
+
{
|
| 1746 |
+
"type": "text",
|
| 1747 |
+
"text": "Following the publicly-available implementation fairseq (Ott et al., 2019), we include the SNR as an evaluation matrix to measure the speech quality across the test set. We approximate the noise by subtracting the output of the enhancement model from the input-noisy speech and then compute the SNR between the two. Further, we conduct crowd-sourced human evaluations with MOS, rated from 1 to 5 and reported with $95\\%$ confidence intervals (CI). For easy comparison, the results are compiled and presented in the following table:",
|
| 1748 |
+
"bbox": [
|
| 1749 |
+
169,
|
| 1750 |
+
804,
|
| 1751 |
+
823,
|
| 1752 |
+
888
|
| 1753 |
+
],
|
| 1754 |
+
"page_idx": 14
|
| 1755 |
+
},
|
| 1756 |
+
{
|
| 1757 |
+
"type": "text",
|
| 1758 |
+
"text": "As illustrated in Table 6, TranSpeech has achieved the SNR and MOS with scores of 46.56 and 4.03 competitive with the baseline systems. Since we apply the publicly-available pre-trained unit",
|
| 1759 |
+
"bbox": [
|
| 1760 |
+
169,
|
| 1761 |
+
895,
|
| 1762 |
+
823,
|
| 1763 |
+
925
|
| 1764 |
+
],
|
| 1765 |
+
"page_idx": 14
|
| 1766 |
+
},
|
| 1767 |
+
{
|
| 1768 |
+
"type": "header",
|
| 1769 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1770 |
+
"bbox": [
|
| 1771 |
+
173,
|
| 1772 |
+
32,
|
| 1773 |
+
478,
|
| 1774 |
+
47
|
| 1775 |
+
],
|
| 1776 |
+
"page_idx": 14
|
| 1777 |
+
},
|
| 1778 |
+
{
|
| 1779 |
+
"type": "page_number",
|
| 1780 |
+
"text": "15",
|
| 1781 |
+
"bbox": [
|
| 1782 |
+
490,
|
| 1783 |
+
946,
|
| 1784 |
+
506,
|
| 1785 |
+
959
|
| 1786 |
+
],
|
| 1787 |
+
"page_idx": 14
|
| 1788 |
+
},
|
| 1789 |
+
{
|
| 1790 |
+
"type": "table",
|
| 1791 |
+
"img_path": "images/8a1a9c37fad9aca8d57cbe0fab2c988075e9c2f5bc1a71d275e488ed82d20d39.jpg",
|
| 1792 |
+
"table_caption": [],
|
| 1793 |
+
"table_footnote": [],
|
| 1794 |
+
"table_body": "<table><tr><td>Method</td><td>SNR (↑)</td><td>MOS (↑)</td></tr><tr><td>Translation GT</td><td>/</td><td>4.22±0.06</td></tr><tr><td>DirectS2ST</td><td>46.45</td><td>4.01±0.07</td></tr><tr><td>TextlessS2ST</td><td>47.22</td><td>4.05±0.06</td></tr><tr><td>TranSpeech</td><td>46.56</td><td>4.03±0.06</td></tr></table>",
|
| 1795 |
+
"bbox": [
|
| 1796 |
+
354,
|
| 1797 |
+
101,
|
| 1798 |
+
643,
|
| 1799 |
+
200
|
| 1800 |
+
],
|
| 1801 |
+
"page_idx": 15
|
| 1802 |
+
},
|
| 1803 |
+
{
|
| 1804 |
+
"type": "text",
|
| 1805 |
+
"text": "Table 6: Speech quality (SNR(↑) and MOS(↑)) comparison with baseline systems.",
|
| 1806 |
+
"bbox": [
|
| 1807 |
+
223,
|
| 1808 |
+
210,
|
| 1809 |
+
767,
|
| 1810 |
+
226
|
| 1811 |
+
],
|
| 1812 |
+
"page_idx": 15
|
| 1813 |
+
},
|
| 1814 |
+
{
|
| 1815 |
+
"type": "text",
|
| 1816 |
+
"text": "vocoder and leave it unchanged for unit-to-speech, we expect our model to exhibit high-quality speech generation as baseline models while achieving a significant improvement in translation accuracy.",
|
| 1817 |
+
"bbox": [
|
| 1818 |
+
169,
|
| 1819 |
+
251,
|
| 1820 |
+
823,
|
| 1821 |
+
280
|
| 1822 |
+
],
|
| 1823 |
+
"page_idx": 15
|
| 1824 |
+
},
|
| 1825 |
+
{
|
| 1826 |
+
"type": "text",
|
| 1827 |
+
"text": "E INFORMATION ENHANCEMENT",
|
| 1828 |
+
"text_level": 1,
|
| 1829 |
+
"bbox": [
|
| 1830 |
+
171,
|
| 1831 |
+
300,
|
| 1832 |
+
467,
|
| 1833 |
+
316
|
| 1834 |
+
],
|
| 1835 |
+
"page_idx": 15
|
| 1836 |
+
},
|
| 1837 |
+
{
|
| 1838 |
+
"type": "text",
|
| 1839 |
+
"text": "We apply the following functions (Qian et al., 2020; Choi et al., 2021) on acoustic features (e.g., rhythm, pitch, and energy) to create acoustic-perturbed speech samples $\\hat{S}$ , while the linguistic content remains unchanged, including 1) formant shifting $fs$ , 2) pitch randomization $pr$ , 3) random frequency shaping using a parametric equalizer $peq$ , and 4) random resampling $RR$ . As shown in Figure 4, we further illustrate the mel-spectrogram of the single-perturbed utterance in bilateral perturbation.",
|
| 1840 |
+
"bbox": [
|
| 1841 |
+
169,
|
| 1842 |
+
330,
|
| 1843 |
+
826,
|
| 1844 |
+
404
|
| 1845 |
+
],
|
| 1846 |
+
"page_idx": 15
|
| 1847 |
+
},
|
| 1848 |
+
{
|
| 1849 |
+
"type": "list",
|
| 1850 |
+
"sub_type": "text",
|
| 1851 |
+
"list_items": [
|
| 1852 |
+
"- For $fs$ , a formant shifting ratio is sampled uniformly from $\\mathrm{Unif}(1, 1.4)$ . After sampling the ratio, we again randomly decided whether to take the reciprocal of the sampled ratio or not.",
|
| 1853 |
+
"- In $pr$ , a pitch shift ratio and pitch range ratio are sampled uniformly from Unif(1,2) and Unif(1,1.5), respectively. Again, we randomly decide whether to take the reciprocal of the sampled ratios or not. For more details for formant shifting and pitch randomization, please refer to Parselmouth https://github.com/YannickJadoul/Parselmouth.",
|
| 1854 |
+
"- peq represents a serial composition of low-shelving, peaking, and high-shelving filters. We use one low-shelving HLS, one high-shelving HHS, and eight peaking filters HPeak.",
|
| 1855 |
+
"- $RR$ denotes a random resampling to modify the rhythm. The input signal is divided into segments, whose length is randomly uniformly drawn from 19 frames to 32 frames (Polyak & Wolf, 2019). Each segment is resampled using linear interpolation with a resampling factor randomly drawn from 0.5 to 1.5."
|
| 1856 |
+
],
|
| 1857 |
+
"bbox": [
|
| 1858 |
+
171,
|
| 1859 |
+
415,
|
| 1860 |
+
826,
|
| 1861 |
+
597
|
| 1862 |
+
],
|
| 1863 |
+
"page_idx": 15
|
| 1864 |
+
},
|
| 1865 |
+
{
|
| 1866 |
+
"type": "text",
|
| 1867 |
+
"text": "F VISUALIZATION OF ACOUSTIC-PERTURBED SPEECH SAMPLES",
|
| 1868 |
+
"text_level": 1,
|
| 1869 |
+
"bbox": [
|
| 1870 |
+
171,
|
| 1871 |
+
622,
|
| 1872 |
+
718,
|
| 1873 |
+
638
|
| 1874 |
+
],
|
| 1875 |
+
"page_idx": 15
|
| 1876 |
+
},
|
| 1877 |
+
{
|
| 1878 |
+
"type": "header",
|
| 1879 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 1880 |
+
"bbox": [
|
| 1881 |
+
171,
|
| 1882 |
+
32,
|
| 1883 |
+
478,
|
| 1884 |
+
47
|
| 1885 |
+
],
|
| 1886 |
+
"page_idx": 15
|
| 1887 |
+
},
|
| 1888 |
+
{
|
| 1889 |
+
"type": "page_number",
|
| 1890 |
+
"text": "16",
|
| 1891 |
+
"bbox": [
|
| 1892 |
+
488,
|
| 1893 |
+
946,
|
| 1894 |
+
508,
|
| 1895 |
+
960
|
| 1896 |
+
],
|
| 1897 |
+
"page_idx": 15
|
| 1898 |
+
},
|
| 1899 |
+
{
|
| 1900 |
+
"type": "image",
|
| 1901 |
+
"img_path": "images/69666a79505d661721df1773c65b245c42ea336990d62b9c20169542dad1e09b.jpg",
|
| 1902 |
+
"image_caption": [
|
| 1903 |
+
"Pitch Mean"
|
| 1904 |
+
],
|
| 1905 |
+
"image_footnote": [],
|
| 1906 |
+
"bbox": [
|
| 1907 |
+
174,
|
| 1908 |
+
152,
|
| 1909 |
+
331,
|
| 1910 |
+
244
|
| 1911 |
+
],
|
| 1912 |
+
"page_idx": 16
|
| 1913 |
+
},
|
| 1914 |
+
{
|
| 1915 |
+
"type": "image",
|
| 1916 |
+
"img_path": "images/4034b952ce7adc674a101878aa6b23596aff9c087be86a4c1aae97eaba61e19e.jpg",
|
| 1917 |
+
"image_caption": [],
|
| 1918 |
+
"image_footnote": [],
|
| 1919 |
+
"bbox": [
|
| 1920 |
+
334,
|
| 1921 |
+
156,
|
| 1922 |
+
490,
|
| 1923 |
+
244
|
| 1924 |
+
],
|
| 1925 |
+
"page_idx": 16
|
| 1926 |
+
},
|
| 1927 |
+
{
|
| 1928 |
+
"type": "image",
|
| 1929 |
+
"img_path": "images/edcf2b439d69d16b96c320ef72295a3b8c71652742eae069973dd07d3ccce2b0.jpg",
|
| 1930 |
+
"image_caption": [
|
| 1931 |
+
"Energy Norm"
|
| 1932 |
+
],
|
| 1933 |
+
"image_footnote": [],
|
| 1934 |
+
"bbox": [
|
| 1935 |
+
506,
|
| 1936 |
+
156,
|
| 1937 |
+
663,
|
| 1938 |
+
246
|
| 1939 |
+
],
|
| 1940 |
+
"page_idx": 16
|
| 1941 |
+
},
|
| 1942 |
+
{
|
| 1943 |
+
"type": "image",
|
| 1944 |
+
"img_path": "images/41a3350c7a6b795cd139d1d9230ba1a6bc6e4815b17dc596d34c08d0377a9293.jpg",
|
| 1945 |
+
"image_caption": [],
|
| 1946 |
+
"image_footnote": [],
|
| 1947 |
+
"bbox": [
|
| 1948 |
+
666,
|
| 1949 |
+
157,
|
| 1950 |
+
823,
|
| 1951 |
+
247
|
| 1952 |
+
],
|
| 1953 |
+
"page_idx": 16
|
| 1954 |
+
},
|
| 1955 |
+
{
|
| 1956 |
+
"type": "image",
|
| 1957 |
+
"img_path": "images/e790f68efd35325727aeb2a42b19d2c1f210868b9214991a0c7592aedd726ea2.jpg",
|
| 1958 |
+
"image_caption": [
|
| 1959 |
+
"F"
|
| 1960 |
+
],
|
| 1961 |
+
"image_footnote": [],
|
| 1962 |
+
"bbox": [
|
| 1963 |
+
178,
|
| 1964 |
+
268,
|
| 1965 |
+
331,
|
| 1966 |
+
358
|
| 1967 |
+
],
|
| 1968 |
+
"page_idx": 16
|
| 1969 |
+
},
|
| 1970 |
+
{
|
| 1971 |
+
"type": "image",
|
| 1972 |
+
"img_path": "images/702425bc84e4bd0726742da17f04562fa566116e66b148c430d0416dc72d3aa1.jpg",
|
| 1973 |
+
"image_caption": [],
|
| 1974 |
+
"image_footnote": [],
|
| 1975 |
+
"bbox": [
|
| 1976 |
+
334,
|
| 1977 |
+
268,
|
| 1978 |
+
490,
|
| 1979 |
+
357
|
| 1980 |
+
],
|
| 1981 |
+
"page_idx": 16
|
| 1982 |
+
},
|
| 1983 |
+
{
|
| 1984 |
+
"type": "image",
|
| 1985 |
+
"img_path": "images/769a8ef965b1ff16ff86052648145527ccd792ae7d57eb6d5767660b94b9e4a1.jpg",
|
| 1986 |
+
"image_caption": [
|
| 1987 |
+
"RR"
|
| 1988 |
+
],
|
| 1989 |
+
"image_footnote": [],
|
| 1990 |
+
"bbox": [
|
| 1991 |
+
506,
|
| 1992 |
+
268,
|
| 1993 |
+
661,
|
| 1994 |
+
358
|
| 1995 |
+
],
|
| 1996 |
+
"page_idx": 16
|
| 1997 |
+
},
|
| 1998 |
+
{
|
| 1999 |
+
"type": "image",
|
| 2000 |
+
"img_path": "images/bb8ac6cf61419638fafdfe8a4d71c13d3fa2ea6bb6fe02fe3c5c7ed0364a815a.jpg",
|
| 2001 |
+
"image_caption": [],
|
| 2002 |
+
"image_footnote": [],
|
| 2003 |
+
"bbox": [
|
| 2004 |
+
666,
|
| 2005 |
+
268,
|
| 2006 |
+
821,
|
| 2007 |
+
357
|
| 2008 |
+
],
|
| 2009 |
+
"page_idx": 16
|
| 2010 |
+
},
|
| 2011 |
+
{
|
| 2012 |
+
"type": "image",
|
| 2013 |
+
"img_path": "images/45a27e5af0e3fd0a816d7c31938adaaa6b8cf3987c6b3947ae798d0143182723.jpg",
|
| 2014 |
+
"image_caption": [
|
| 2015 |
+
"Un-Perturbed Source Speech"
|
| 2016 |
+
],
|
| 2017 |
+
"image_footnote": [],
|
| 2018 |
+
"bbox": [
|
| 2019 |
+
348,
|
| 2020 |
+
382,
|
| 2021 |
+
504,
|
| 2022 |
+
472
|
| 2023 |
+
],
|
| 2024 |
+
"page_idx": 16
|
| 2025 |
+
},
|
| 2026 |
+
{
|
| 2027 |
+
"type": "image",
|
| 2028 |
+
"img_path": "images/000f1fcc3c55602688cace4b6c1ccda510c4da7de2d6be9345038421a3759701.jpg",
|
| 2029 |
+
"image_caption": [
|
| 2030 |
+
"Figure 4: Spectrogram and pitch contours of the utterance with the single-perturbed acoustic condition, remaining the linguistic content (\"really interesting work will finally be undertaken on that topic\") unchanged. RR: random resampling. F: a chain function $F = f s(pr(peq(x)))$ for random pitch shifting."
|
| 2031 |
+
],
|
| 2032 |
+
"image_footnote": [],
|
| 2033 |
+
"bbox": [
|
| 2034 |
+
509,
|
| 2035 |
+
383,
|
| 2036 |
+
665,
|
| 2037 |
+
473
|
| 2038 |
+
],
|
| 2039 |
+
"page_idx": 16
|
| 2040 |
+
},
|
| 2041 |
+
{
|
| 2042 |
+
"type": "image",
|
| 2043 |
+
"img_path": "images/116ff469e139b762bb2dbaef29a640c0d01e4d417409f3a49a220955d839b4d4.jpg",
|
| 2044 |
+
"image_caption": [
|
| 2045 |
+
"Reference (Un-Perturbed)"
|
| 2046 |
+
],
|
| 2047 |
+
"image_footnote": [],
|
| 2048 |
+
"bbox": [
|
| 2049 |
+
155,
|
| 2050 |
+
676,
|
| 2051 |
+
323,
|
| 2052 |
+
792
|
| 2053 |
+
],
|
| 2054 |
+
"page_idx": 16
|
| 2055 |
+
},
|
| 2056 |
+
{
|
| 2057 |
+
"type": "image",
|
| 2058 |
+
"img_path": "images/b26a49c88ceefdf033712e4ff4b33d84b2541c559cc4f5c67c35c8864a85858c.jpg",
|
| 2059 |
+
"image_caption": [
|
| 2060 |
+
"Energy-Perturbed"
|
| 2061 |
+
],
|
| 2062 |
+
"image_footnote": [],
|
| 2063 |
+
"bbox": [
|
| 2064 |
+
326,
|
| 2065 |
+
676,
|
| 2066 |
+
493,
|
| 2067 |
+
792
|
| 2068 |
+
],
|
| 2069 |
+
"page_idx": 16
|
| 2070 |
+
},
|
| 2071 |
+
{
|
| 2072 |
+
"type": "image",
|
| 2073 |
+
"img_path": "images/7da5e4ebb6f0938103f72d764ced35564468daebeb5a80ce0020b8418e55b6ca.jpg",
|
| 2074 |
+
"image_caption": [
|
| 2075 |
+
"Pitch-Perturbed",
|
| 2076 |
+
"Figure 5: Spectrogram and pitch contours of speech sample with the perturbed acoustic condition, remaining the linguistic content (\"really interesting work.\") unchanged. The altered units are printed in red upside the spectrogram."
|
| 2077 |
+
],
|
| 2078 |
+
"image_footnote": [],
|
| 2079 |
+
"bbox": [
|
| 2080 |
+
500,
|
| 2081 |
+
676,
|
| 2082 |
+
666,
|
| 2083 |
+
792
|
| 2084 |
+
],
|
| 2085 |
+
"page_idx": 16
|
| 2086 |
+
},
|
| 2087 |
+
{
|
| 2088 |
+
"type": "image",
|
| 2089 |
+
"img_path": "images/8ad43dd309243c97fbd8b0dfe18ee442d4f7cac8c3faba2ecb7ef559ca13c3dc.jpg",
|
| 2090 |
+
"image_caption": [
|
| 2091 |
+
"Rhythm-Perturbed"
|
| 2092 |
+
],
|
| 2093 |
+
"image_footnote": [],
|
| 2094 |
+
"bbox": [
|
| 2095 |
+
674,
|
| 2096 |
+
676,
|
| 2097 |
+
841,
|
| 2098 |
+
792
|
| 2099 |
+
],
|
| 2100 |
+
"page_idx": 16
|
| 2101 |
+
},
|
| 2102 |
+
{
|
| 2103 |
+
"type": "header",
|
| 2104 |
+
"text": "Published as a conference paper at ICLR 2023",
|
| 2105 |
+
"bbox": [
|
| 2106 |
+
173,
|
| 2107 |
+
32,
|
| 2108 |
+
478,
|
| 2109 |
+
47
|
| 2110 |
+
],
|
| 2111 |
+
"page_idx": 16
|
| 2112 |
+
},
|
| 2113 |
+
{
|
| 2114 |
+
"type": "page_number",
|
| 2115 |
+
"text": "17",
|
| 2116 |
+
"bbox": [
|
| 2117 |
+
488,
|
| 2118 |
+
946,
|
| 2119 |
+
506,
|
| 2120 |
+
959
|
| 2121 |
+
],
|
| 2122 |
+
"page_idx": 16
|
| 2123 |
+
}
|
| 2124 |
+
]
|
2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/8496ecb1-f6c2-4cfc-afce-97312b8c1375_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/8496ecb1-f6c2-4cfc-afce-97312b8c1375_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2fee1a1a124f252184be55f6bb6f10ee3cd95782a1e5ede2bf1cd5478a31263
|
| 3 |
+
size 1820662
|
2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/full.md
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TRANSPEECH: SPEECH-TO-SPEECH TRANSLATION WITH BILATERAL PERTURBATION
|
| 2 |
+
|
| 3 |
+
Rongjie Huang $^{1*}$ ; Jinglin Liu $^{1*}$ ; Huadai Liu $^{1*}$ ; Yi Ren $^{2}$ , Lichao Zhang $^{1}$ , Jinzheng He $^{1}$ , Zhou Zhao $^{1\dagger}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Zhejiang University {rongjiehuang, jinglinliu, huadailiu, zhaozhou}@zju.edu.cn
|
| 6 |
+
|
| 7 |
+
2ByteDance ren.yi@bytedance.com
|
| 8 |
+
|
| 9 |
+
# ABSTRACT
|
| 10 |
+
|
| 11 |
+
Direct speech-to-speech translation (S2ST) with discrete units leverages recent progress in speech representation learning. Specifically, a sequence of discrete representations derived in a self-supervised manner are predicted from the model and passed to a vocoder for speech reconstruction, while still facing the following challenges: 1) Acoustic multimodality: the discrete units derived from speech with same content could be indeterministic due to the acoustic property (e.g., rhythm, pitch, and energy), which causes deterioration of translation accuracy; 2) high latency: current S2ST systems utilize autoregressive models which predict each unit conditioned on the sequence previously generated, failing to take full advantage of parallelism. In this work, we propose TranSpeech, a speech-to-speech translation model with bilateral perturbation. To alleviate the acoustic multimodal problem, we propose bilateral perturbation (BiP), which consists of the style normalization and information enhancement stages, to learn only the linguistic information from speech samples and generate more deterministic representations. With reduced multimodality, we step forward and become the first to establish a non-autoregressive S2ST technique, which repeatedly masks and predicts unit choices and produces high-accuracy results in just a few cycles. Experimental results on three language pairs demonstrate that BiP yields an improvement of 2.9 BLEU on average compared with a baseline textless S2ST model. Moreover, our parallel decoding shows a significant reduction of inference latency, enabling speedup up to $21.4\mathrm{x}$ than autoregressive technique. $^{1}$
|
| 12 |
+
|
| 13 |
+
# 1 INTRODUCTION
|
| 14 |
+
|
| 15 |
+
Speech-to-speech translation (S2ST) aims at converting speech from one language into speech in another, significantly breaking down communication barriers between people not sharing a common language. Among the conventional method (Lavie et al., 1997; Nakamura et al., 2006; Wahlster, 2013), the cascaded system of automatic speech recognition (ASR), machine translation (MT), or speech-to-text translation (S2T) followed by text-to-speech synthesis (TTS) have demonstrated reasonable results yet suffering from expensive computational costs. Compared to these cascaded systems, recently proposed direct S2ST literature (Jia et al., 2019; Zhang et al., 2020; Jia et al., 2021; Lee et al., 2021a;b) demonstrate the benefits of lower latencies as fewer decoding stages are needed.
|
| 16 |
+
|
| 17 |
+
Among them, Lee et al. (2021a,b) leverage recent progress on self-supervised discrete units learned from unlabeled speech for building textless S2ST systems, further supporting translation between unwritten languages. As illustrated in Figure 1(a), the unit-based textless S2ST system consists of
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
(a) Direct speech-to-speech translation (S2ST) system
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
(b) Multimodality challenges
|
| 24 |
+
Figure 1: 1) Acoustic multimodality: Speech with the same content "Vielen dank" could be different due to a variety of acoustic conditions; 2) Linguistic multimodality (Gu et al., 2017; Wang et al., 2019): There are multiple correct target translations ("Danke schon" and "Vielen dank") for the same source word/phrase/sentence ("Thank you").
|
| 25 |
+
|
| 26 |
+
a speech-to-unit translation (S2UT) model followed by a unit-based vocoder that converts discrete units to speech, leading to a significant improvement over previous literature.
|
| 27 |
+
|
| 28 |
+
In modern textless speech-to-speech translation (S2ST), our goal is mainly two-fold: 1) high quality: direct S2ST is challenging, especially without using the transcription. 2) low latency: high inference speed is essential when considering real-time translation. However, the current development of the unit-based textless S2ST system is hampered by two major challenges: 1) It is challenging to achieve high translation accuracy due to the acoustic multimodality (as illustrated in the orange dotted box in Figure 1(b)): different from the language tokens (e.g., bpe) used in the text translation, the self-supervised representation derived from speech with the same content could be different due to a variety of acoustic conditions (e.g., speaker identity, rhythm, pitch, and energy), including both linguistic content and acoustic information. As such, the indeterministic training target for speech-to-unit translation fails to yield good results; and 2) Building a parallel model upon multimodal S2ST systems with reasonable accuracy is challenging as it introduces further indeterminacy. A non-autoregressive (NAR) S2ST system generates all tokens in parallel without any limitation of sequential dependency, making it a poor approximation to the actual target distribution. With the acoustic multimodality unsettled, the parallel decoding approaches increasingly burden S2ST capturing the distribution of target translation.
|
| 29 |
+
|
| 30 |
+
In this work, we propose TranSpeech, a fast speech-to-speech translation model with bilateral perturbation. To tackle the acoustic multimodal challenge, we propose a Bilateral Perturbation (BiP) technique that finetunes a self-supervised speech representation learning model with CTC loss to generate deterministic representation agnostic to acoustic variation. Based on preliminary speech analysis by decomposing a signal into linguistic and acoustic information, the bilateral perturbation consists of the 1) style normalization stage, which eliminates the acoustic-style information in speech and creates the style-agnostic "pseudo text" for finetuning; and 2) information enhancement stage, which applies information bottleneck to create speech samples variant in acoustic conditions (i.e., rhythm, pitch, and energy) while preserving linguistic information. The proposed bilateral perturbation guarantees the speech encoder to learn only the linguistic information from acoustic-variant speech samples, significantly reducing the acoustic multimodality in unit-based S2ST.
|
| 31 |
+
|
| 32 |
+
The proposed bilateral perturbation eases acoustic multimodality and makes it possible for NAR generation. As such, we further step forward and become the first to establish a NAR S2ST technique, which repeatedly masks and predicts unit choices and produces high-accuracy results in just a few cycles. Experimental results on three language pairs demonstrate that BiP yields an improvement of 2.9 BLEU on average compared with baseline textless S2ST models. The parallel decoding algorithm requires as few as 2 iterations to generate samples that outperformed competing systems, enabling a speedup by up to $21.4\mathrm{x}$ compared to the autoregressive baseline. TranSpeech further enjoys a speed-performance trade-off with advanced decoding choices, including multiple iterations, length beam, and noisy parallel decoding, trading by up to 3 BLEU points in translation results. The main contributions of this work include:
|
| 33 |
+
|
| 34 |
+
- Through preliminary speech analysis, we propose bilateral perturbation which assists in generating deterministic representations agnostic to acoustic variation. This novel technique alleviates the acoustic multimodal challenge and leads to significant improvement in S2ST.
|
| 35 |
+
|
| 36 |
+
- We step forward and become the first to establish a non-autoregressive S2ST technique with a mask-predict algorithm to speed up the inference procedure. To further reduce the linguistic multimodality in NAR translation, we apply the knowledge distillation technique and construct a less noisy and more deterministic corpus.
|
| 37 |
+
- Experimental results on three language pairs demonstrate that BiP yields the promotion of 2.9 BLEU on average compared with baseline textless S2ST models. In terms of inference speed, our parallel decoding enables speedup up to $21.4\mathrm{x}$ compared to the autoregressive baseline.
|
| 38 |
+
|
| 39 |
+
# 2 BACKGROUND: DIRECT SPEECH-TO-SPEECH TRANSLATION
|
| 40 |
+
|
| 41 |
+
Direct speech-to-speech translation has made huge progress to date. Translatotron (Jia et al., 2019) is the first direct S2ST model and shows reasonable translation accuracy and speech naturalness. Translatotron 2 (Jia et al., 2021) utilizes the auxiliary target phoneme decoder to promote translation quality but still needs phoneme data during training. UWSpeech (Zhang et al., 2020) builds the VQ-VAE model and discards transcript in the target language, while paired speech and phoneme corpora of written language are required.
|
| 42 |
+
|
| 43 |
+
Most recently, a direct S2ST system (Lee et al., 2021a) takes advantage of self-supervised learning (SSL) and demonstrates the results without using text data. However, the majority of SSL models are trained by reconstructing (Chorowski et al., 2019) or predicting unseen speech signals (Chung et al., 2019), which would inevitably include factors unrelated to the linguistic content (i.e., acoustic condition). As such, the indeterministic training target for speech-to-unit translation fails to yield good results. The textless S2ST system (Lee et al., 2021b) further demonstrates to obtain the speaker-invariant representation by finetuning the SSL model to disentangle the speaker-dependent information. However, this system only constrains speaker identity, and the remaining aspects (i.e., content, rhythm, pitch, and energy) are still lumped together.
|
| 44 |
+
|
| 45 |
+
At the same time, various approaches that perturb information flow to fine-tune acoustic models have demonstrated efficiency in promoting downstream performance. A line of works (Yang et al., 2021; Gao et al., 2022) utilizes the pre-trained encoder and introduces approaches that reprogram acoustic models in downstream tasks. For multi-lingual tuning, Yen et al. (2021) propose a novel adversarial reprogramming approach for low-resource spoken command recognition (SCR). Sharing a common insight, we tune a pre-trained acoustic model with bilateral perturbation technique and generates more deterministic units agnostic to acoustic conditions, including rhythm, pitch, and energy. Following the common textless setup in Figure 1(a), we design a challenging NAR S2ST technique especially for applications requiring low latency. More details have been attached in Appendix A.
|
| 46 |
+
|
| 47 |
+
# 3 SPEECH ANALYSIS AND BILATERAL PERTURBATION
|
| 48 |
+
|
| 49 |
+
# 3.1 ACOUSTIC MULTIMODALITY
|
| 50 |
+
|
| 51 |
+
As reported in previous textless S2ST system (Lee et al., 2021b), speech representations predicted by the self-supervised pre-trained model include both linguistic and acoustic information. As such, derived representations of speech samples with the same content can be different due to the acoustic variation, and the indeterministic training target for speech-to-unit translation (as illustrated in Figure 1(a)) fails to yield good results. To address this multimodal issue, we conduct a preliminary speech analysis and introduce the bilateral perturbation technique. More details on how indeterminacy units influence S2ST have been attached in Appendix C.
|
| 52 |
+
|
| 53 |
+
# 3.2 SPEECH ANALYSIS
|
| 54 |
+
|
| 55 |
+
In this part, we decompose speech variations (Cui et al., 2022; Huang et al., 2021; Yang et al., 2022a) into linguistic content and acoustic condition (e.g., speaker identity, rhythm, pitch, and energy) and provide a brief primer on each of these components.
|
| 56 |
+
|
| 57 |
+
Linguistic Content represents the meaning of speech signals. To translate a speech sample to another language, learning the linguistic information from the speech signal is crucial; Speaker Identity is perceived as the voice characteristics of a speaker. Rhythm characterizes how fast the speaker utters
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
(a) Speech Analysis and Bilateral Perturbation
|
| 61 |
+
(b) TranSpeech
|
| 62 |
+
Figure 2: In subfigure(a), we use $RR$ and $F$ to respectively denote the random resampling and a chain function for random pitch shifting. In subfigure(b), the "sinusoidal-like symbol" denotes the positional encoding, we have $N_{b}$ encoder and decoder blocks. During training, we randomly select the masked position and compute the cross-entropy loss (denoted as "CE").
|
| 63 |
+
|
| 64 |
+
each syllable, and the duration plays a vital role in acoustic variation; Pitch is an essential component of intonation, which is the result of a constant attempt to hit the pitch targets of each syllable; Energy affects the volume of speech, where stress and tone represent different energy values.
|
| 65 |
+
|
| 66 |
+
# 3.3 BILATERAL PERTURBATION
|
| 67 |
+
|
| 68 |
+
To alleviate the multimodal problem and increase the translation accuracy in the S2ST system, we propose bilateral perturbation that disentangles the acoustic variation and generates deterministic speech representations according to the linguistic content. Specifically, we leverage the success of connectionist temporal classification (CTC) finetuning (Baevski et al., 2019) with a pre-trained speech encoder, using the perturbed input speech and normalized target. Since how to obtain speaker-invariant representation has been well-studied (Lee et al., 2021b; Hsu et al., 2020), we focus on the more challenging acoustic conditions in a single-speaker scenario, including rhythm, pitch, and energy variations.
|
| 69 |
+
|
| 70 |
+
# 3.3.1 OVERVIEW
|
| 71 |
+
|
| 72 |
+
Denote the domain of speech samples by $S \subset \mathbb{R}$ and the perturbed speeches in style normalization and information enhancement by $\overline{S}, \hat{S}$ respectively. The source language is therefore a sequence of speech samples $X = \{x_{1},\dots,x_{N^{\prime}}\}$ , where $N^{\prime}$ is the number of frames in source speech. The SSL model is composed of a multi-layer convolutional feature encoder $f$ which takes as input raw audio $S$ and outputs discrete latent speech representations. In the end, the audio in the target language is represented as discrete units $Y = \{y_{1},\ldots ,y_{N}\}$ , where $N$ is the number of units. The overview of the information flow is shown in Figure 2(a), and we consider tackling the multimodality in bilateral sides for CTC finetuning, including 1) style normalization stage to eliminate the acoustic information in the CTC target and create the acoustic-agnostic "pseudo text"; and 2) information enhancement stage which applies bottleneck on acoustic features to create speech samples variant in acoustic conditions (e.g., rhythm, pitch, and energy) while preserving linguistic content information. In the final, we train an ASR model using the perturbed speech $\hat{S}$ as input and the "pseudo text" as the target. As a result, according to speeches with acoustic variation, the ASR model with CTC decoding is encouraged to learn the "average" information referring to linguistic content and generate deterministic representations, significantly reducing multimodality and promoting speech-to-unit translation. In the following subsections, we present the bilateral perturbation technique in detail:
|
| 73 |
+
|
| 74 |
+
# 3.3.2 STYLE NORMALIZATION
|
| 75 |
+
|
| 76 |
+
To create the acoustic-agnostic "pseudo text" for CTC finetuning, the acoustic-style information should be eliminated and disentangled: 1) We first compute the averaged pitch fundamental frequency $\bar{p}$ and energy $\bar{e}$ values in original dataset $S$ ; and 2) for each sample in $S$ , we conduct pitch shifting to
|
| 77 |
+
|
| 78 |
+
$\overline{p}$ and normalize its energy to $\overline{e}$ , resulting in a new dataset $\overline{S}$ with the averaged acoustic condition, where the style-specific information has been eliminated; finally, 3) the self-supervised learning (SSL) model encodes $\overline{S}$ and creates the normalized targets for CTC finetuning.
|
| 79 |
+
|
| 80 |
+
# 3.3.3 INFORMATION ENHANCEMENT
|
| 81 |
+
|
| 82 |
+
According to the speech samples with different acoustic conditions, the ASR model is supposed to learn the deterministic representation referring to linguistic content. As such, we apply the following functions as information bottleneck on acoustic features (e.g., rhythm, pitch, and energy) to create highly acoustic-variant speech samples $\hat{S}$ , while the linguistic content remains unchanged, including 1) formant shifting $fs$ , 2) pitch randomization $pr$ , 3) random frequency shaping using a parametric equalizer $peq$ , and 4) random resampling $RR$ .
|
| 83 |
+
|
| 84 |
+
- For rhythm information, random resampling $RR$ divides the input into segments of random lengths, and we randomly stretch or squeeze each segment along the time dimension.
|
| 85 |
+
- For pitch information, we apply the chain function $F = fs(pr(peq(S)))$ to randomly shift the pitch value of original speech $S$ .
|
| 86 |
+
- For energy information, we perturb the audio in the waveform domain.
|
| 87 |
+
|
| 88 |
+
The perturbed waveforms $\hat{S}$ are highly variant on acoustic features (i.e., rhythm, pitch, and energy) while preserving linguistic information. It guarantees the speech encoder to learn the "acoustic-averaged" information referring to linguistic content and generate deterministic representations. The hyperparameters of the perturbation functions have been included in Appendix E.
|
| 89 |
+
|
| 90 |
+
# 4 TRANSPEECH
|
| 91 |
+
|
| 92 |
+
The S2ST pipeline has been illustrated in Figure 2(a), we 1) use the SSL HuBERT (Hsu et al., 2021) tuned by BiP to derive discrete units of target speech; 2) build the sequence-to-sequence model TranSpeech for speech-to-unit translation (S2UT) and 3) apply a separately trained unit-based vocoder to convert the translated units into waveform.
|
| 93 |
+
|
| 94 |
+
In this section, we first overview the encoder-decoder architecture for TranSpeech, following which we introduce the knowledge distillation procedure to alleviate the linguistic multimodal challenges. Finally, we present the mask-predict algorithm in both training and decoding procedures and include more advanced decoding choices.
|
| 95 |
+
|
| 96 |
+
# 4.1 ARCHITECTURE
|
| 97 |
+
|
| 98 |
+
The overall architecture has been illustrated in Figure 2(b), and we put more details on the encoder and decoder block in Appendix B.
|
| 99 |
+
|
| 100 |
+
Conformer Encoder. Different from previous textless S2ST literature (Lee et al., 2021b), we use conformer blocks (Gulati et al., 2020) in place of transformer blocks (Vaswani et al., 2017). The conformer model (Guo et al., 2021; Chen et al., 2021) has demonstrated its efficiency in combining convolution neural networks and transformers to model both local and global dependencies of audio in a parameter-efficient way, achieving state-of-the-art results on various downstream tasks. Furthermore, we employ the multi-head self-attention with a relative sinusoidal positional encoding scheme from Transformer-XL (Dai et al., 2019), which promotes the robustness of the self-attention module and generalizes better to different utterance lengths.
|
| 101 |
+
|
| 102 |
+
Non-autoregressive Unit Decoder. Currently, S2ST systems utilize the autoregressive S2UT models and suffer from high inference latency. Given the $N'$ frames source speech $X = \{x_{1},\ldots ,x_{N'}\}$ , autoregressive model $\theta$ factors the distribution over possible outputs $Y = \{y_{1},\dots ,y_{N}\}$ by $p(Y\mid X;\theta) = \prod_{i = 1}^{N + 1}p(y_i\mid y_{0:i - 1},x_{1:N';\theta})$ , where the special tokens $y_0(\langle bos\rangle)$ and $y_{N + 1}(\langle eos\rangle)$ are used to represent the beginning and end of all target units.
|
| 103 |
+
|
| 104 |
+
Unlike the relatively well-studied non-autoregressive (NAR) MT (Gu et al., 2017; Wang et al., 2019; Gu et al., 2019; Ghazvininejad et al., 2019; Yin et al., 2023), building NAR S2UT models that generate units in parallel could be much more challenging due to the joint linguistic and acoustic
|
| 105 |
+
|
| 106 |
+
multimodality. Yet the proposed bilateral perturbation eases this acoustic multimodality and makes it possible for NAR modeling. As such, we further step forward and become the first to establish a NAR S2ST model $\theta$ .
|
| 107 |
+
|
| 108 |
+
It assumes that the target sequence length $N$ can be modeled with a separate conditional distribution $p_L$ , and the distribution becomes $p(Y \mid X; \theta) = p_L(T \mid x_{1:N'}; \theta) \cdot \prod_{i=1}^{N} p(y_i \mid x_{1:N'}; \theta)$ . The target units are conditionally independent of each other, and the individual probabilities $p$ is predicted for each token in $Y$ . Since the length of target units $N$ should be given in advance, TranSpeech predicts it by pooling the encoder outputs into a length predictor.
|
| 109 |
+
|
| 110 |
+
# 4.2 LINGUISTIC MULTIMODALITY
|
| 111 |
+
|
| 112 |
+
As illustrated in Figure 1(b), there might be multiple valid translations for the same source utterance, and thus this linguistic multimodality degrades the ability of NAR models to properly capture the target distribution. To alleviate this linguistic multimodality in NAR translation, we apply knowledge distillation to construct a sampled translation corpus from an autoregressive teacher, which is less noisy and more deterministic than the original one. The knowledge of the AR model is distilled to the NAR model, assisting to capture the target distribution for better accuracy.
|
| 113 |
+
|
| 114 |
+
# 4.3 MASK-PREDICT
|
| 115 |
+
|
| 116 |
+
The NAR unit decoder applies the mask-predict algorithm (Ghazvininejad et al., 2019) to repeatedly reconsider unit choices and produce high-accuracy translation results in just a few cycles.
|
| 117 |
+
|
| 118 |
+
Training. During training, the target units are given conditioned on source speech sample $X$ and the unmasked target units $Y_{obs}$ . As illustrated in Figure 2(b), given the length $N$ of the target sequence, we first sample the number of masked units from a uniform distribution $n \sim \mathrm{Unif}(\{1,\dots ,N\})$ , and then randomly choose the masked position. For the learning objective, we compute the cross-entropy (CE) loss with label smoothing between the generated and target units in masked places, and the CE loss for target length prediction is further added.
|
| 119 |
+
|
| 120 |
+
Decoding. In inference, the algorithm runs for pre-determined $T$ times of iterative refinement, and we perform a mask operation at each iteration, followed by predict.
|
| 121 |
+
|
| 122 |
+
In the first iteration $t = 0$ , we predict the length $N$ of target sequence and mask all units $Y = \{y_{1},\ldots ,y_{N}\}$ . In the following iterations, we mask $n$ units with the lowest probability scores $p$ :
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
Y _ {m a s k} ^ {t} = \arg \min _ {i} (p _ {i}, n) \quad Y _ {o b s} ^ {t} = Y \backslash Y _ {m a s k} ^ {t}, \tag {1}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
where $n$ is a function of the iteration $t$ , and we use linear decay $n = N \cdot \frac{T - t}{T}$ in this work.
|
| 129 |
+
|
| 130 |
+
After masking, TranSpeech predicts the masked units $Y_{mask}^{t}$ conditioned on the source speech $X$ and unmasked units $Y_{obs}$ . We select the prediction with the highest probability $p$ for each $y_{i} \in Y_{mask}^{t}$ and update its probability score accordingly:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
y _ {i} ^ {t} = \arg \max _ {w} P \left(y _ {i} = w \mid X, Y _ {o b s} ^ {t}; \theta\right) \quad p _ {i} ^ {t} = \max _ {w} P \left(y _ {i} = w \mid X, Y _ {o b s} ^ {t}; \theta\right) \tag {2}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
# 4.4 ADVANCED DECODING CHOICES
|
| 137 |
+
|
| 138 |
+
Target Length Beam. It has been reported (Ghazvininejad et al., 2019) that translating multiple candidate sequences of different lengths can improve performance. As such, we select the top $K$ length candidates with the highest probabilities and decode the same example with varying lengths in parallel. In the following, we pick up the sequence with the highest average log probability as our result. It avoids distinctly increasing the decoding time since the computation can be batched.
|
| 139 |
+
|
| 140 |
+
Noisy Parallel Decoding. The absence of the AR decoding procedure makes it more difficult to capture the target distribution in S2ST. To obtain the more accurate optimum of the target distribution and compute the best translation for each fertility sequence, we use the autoregressive teacher to identify the best overall translation.
|
| 141 |
+
|
| 142 |
+
# 5 EXPERIMENTS
|
| 143 |
+
|
| 144 |
+
# 5.1 EXPERIMENTAL SETUP
|
| 145 |
+
|
| 146 |
+
Following the common practice in the direct S2ST pipeline, we apply the publicly-available pretrained multilingual HuBERT (mHuBERT) model and unit-based HiFi-GAN vocoder (Polyak et al., 2021; Kong et al., 2020) and leave them unchanged.
|
| 147 |
+
|
| 148 |
+
Dataset. For a fair comparison, we use the benchmark CVSS-C dataset (Jia et al., 2022), which is derived from the CoVoST 2 (Wang et al., 2020b) speech-to-text translation corpus by synthesizing the translation text into speech using a single-speaker TTS system. To evaluate the performance of the proposed model, we conduct experiments on three language pairs, including French-English (Fr-En), English-Spanish (En-Es), and English-French (En-Fr).
|
| 149 |
+
|
| 150 |
+
Model Configurations and Training. For bilateral perturbation, we finetune the publicly-available mHuBERT model for each language separately with CTC loss until 25k updates using the Adam optimizer $(\beta_{1} = 0.9, \beta_{2} = 0.98, \epsilon = 10^{-8})$ . Following the practice in textless S2ST (Lee et al., 2021b), we use the k-means algorithm to cluster the representation given by the well-tuned mHuBERT into a vocabulary of 1000 units. TranSpeech computes 80-dimensional mel-filterbank features at every 10-ms for the source speech as input, and we set $N_{b}$ to 6 in encoding and decoding blocks. In training the TranSpeech, we remove the auxiliary tasks for simplification and follow the unwritten language scenario. TranSpeech is trained until convergence for 200k steps using 1 Tesla V100 GPU. A comprehensive table of hyperparameters is available in Appendix B.
|
| 151 |
+
|
| 152 |
+
Evaluation and Baseline models. For translation accuracy, we pre-train an ASR model to generate the corresponding text of the translated speech and then calculate the BLEU score (Papineni et al., 2002) between the generated and the reference text. In decoding speed, latency is computed as the time to decode the single n-frame speech sample averaged over the test set using 1 V100 GPU.
|
| 153 |
+
|
| 154 |
+
We compare TranSpeech with other systems using the publicly-available fairseq framework (Ott et al., 2019), including 1) Direct ASR, where we transcribe S2ST data with open-sourced ASR as reference and compute BELU; 2) Direct TTS, where we synthesize speech samples with target units, and then transcribe the speech to text and compute BELU; 3) S2T+TTS cascaded system, where we train the S2T basic transformer model (Wang et al., 2020a) and then apply TTS model (Ren et al., 2020; Kong et al., 2020) for speech generation; 4) basic transformer (Lee et al., 2021a) without using text, and 5) basic norm transformer (Lee et al., 2021b) with speaker normalization.
|
| 155 |
+
|
| 156 |
+
# 5.2 TRANSLATION ACCURACY AND SPEECH NATURALNESS
|
| 157 |
+
|
| 158 |
+
Table 1 summarizes the translation accuracy and inference latency among all systems, and we have the following observations: 1) Bilateral perturbation (3 vs. 4) improves S2ST performance by a large margin of 2.9 BLEU points. The proposed techniques address acoustic multimodality by disentangling the acoustic information and learning linguistic representation given speech samples, which produce more deterministic targets in speech-to-unit translation. 2) Conformer architecture (2 vs. 3) shows a 2.2 BLEU gain of translation accuracy. It combines convolution neural networks and transformers as joint architecture, exhibiting outperformed ability in learning local and global dependencies of an audio. 3) Knowledge distillation (6 vs. 7) is demonstrated to alleviate the linguistic multimodality where training on the distillation corpus provides a distinct promotion of around 1 BLEU points. For speech quality, we attach evaluation in Appendix D. When considering the speed-performance trade-off in the NAR unit decoder, we find that more iterative cycles (7 vs. 8), or advanced decoding methods (e.g., length beam (8 vs. 9) and noisy parallel decoding (9 vs. 10)) further lead to an improvement of translation accuracy, trading up to 1.5 BLEU points during decoding. In comparison with baseline systems, TranSpeech yields the highest BLEU scores than the best publicly-available direct S2ST baselines (2 vs. 6) by a considerable margin; in fact, only 2 mask-predict iterations (see Figure 3(b)) are necessary for achieving a new SOTA on textless S2ST.
|
| 159 |
+
|
| 160 |
+
# 5.3 DECODING SPEED
|
| 161 |
+
|
| 162 |
+
We visualize the relationship between the translation latency and the length of input speech in Figure 3(a). As can be seen, the autoregressive baselines have a latency linear in the decoding
|
| 163 |
+
|
| 164 |
+
Table 1: Translation quality (BLEU scores $(\uparrow)$ ) and inference speed (frame/second $(\uparrow)$ ) comparison with baseline systems. We set beam size to 5 in autoregressive decoding, and apply 5 iterative cycles in NAR naive decoding. $\dagger$ : In this work, we remove the auxiliary task (e.g., source and target CTC, auto-encoding) in training the S2ST system for simplification. Though the S2ST system can be further improved with the auxiliary task, this is beyond our focus. BiP: Bilateral Perturbation; NPD: noisy parallel decoding; b: length beam in NAR decoding.
|
| 165 |
+
|
| 166 |
+
<table><tr><td>ID</td><td>Model</td><td>BiP</td><td>Fr-En</td><td>En-Fr</td><td>En-Es</td><td>Speed</td><td>Speedup</td></tr><tr><td colspan="8">Autoregressive models</td></tr><tr><td>1</td><td>Basic Transformer (Lee et al., 2021a)†</td><td>×</td><td>15.44</td><td>15.28</td><td>10.07</td><td rowspan="2">870</td><td rowspan="2">1.00×</td></tr><tr><td>2</td><td>Basic Norm Transformer (Lee et al., 2021b)†</td><td>×</td><td>15.81</td><td>15.93</td><td>12.98</td></tr><tr><td>3</td><td>Basic Conformer</td><td>×</td><td>18.02</td><td>17.07</td><td>13.75</td><td rowspan="2">895</td><td rowspan="2">1.02×</td></tr><tr><td>4</td><td>Basic Conformer</td><td>✓</td><td>22.39</td><td>19.65</td><td>14.94</td></tr><tr><td colspan="8">Non-autoregressive models with naive decoding</td></tr><tr><td>5</td><td>TranSpeech - Distill</td><td>×</td><td>14.86</td><td>14.12</td><td>10.27</td><td rowspan="3">9610</td><td rowspan="3">11.04×</td></tr><tr><td>6</td><td>Transpeech - Distill</td><td>✓</td><td>16.23</td><td>15.9</td><td>10.94</td></tr><tr><td>7</td><td>TranSpeech</td><td>✓</td><td>17.24</td><td>16.3</td><td>11.79</td></tr><tr><td colspan="8">Non-autoregressive models with advanced decoding</td></tr><tr><td>8</td><td>TranSpeech (iter=15)</td><td>✓</td><td>18.03</td><td>16.97</td><td>12.62</td><td>4651</td><td>5.34×</td></tr><tr><td>9</td><td>TranSpeech (iter=15 + b=15)</td><td>✓</td><td>18.10</td><td>17.05</td><td>12.70</td><td>2394</td><td>2.75×</td></tr><tr><td>10</td><td>TranSpeech (iter=15 + b=15 + NPD)</td><td>✓</td><td>18.39</td><td>17.50</td><td>12.77</td><td>2208</td><td>2.53×</td></tr><tr><td colspan="8">Cascaded systems</td></tr><tr><td>11</td><td>S2T + TTS</td><td>/</td><td>27.17</td><td>34.85</td><td>32.86</td><td>/</td><td>/</td></tr><tr><td>12</td><td>Direct ASR</td><td>/</td><td>71.61</td><td>50.92</td><td>68.75</td><td>/</td><td>/</td></tr><tr><td>13</td><td>Direct TTS</td><td>/</td><td>82.41</td><td>76.87</td><td>83.69</td><td>/</td><td>/</td></tr></table>
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
(a) Translation latency
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
(b) Performance-speed trade-off.
|
| 173 |
+
Figure 3: The translation latency is computed as the time to decode the n-frame speech sample, averaged over the test set using 1 NVIDIA V100. b: length beam. NPD: noisy parallel decoding.
|
| 174 |
+
|
| 175 |
+
length. At the same time, NAR TranSpeech is nearly constant for typical lengths, even with multiple cycles of mask-predict iterative refinement. We further illustrate the versatile speed-performance trade-off for NAR decoding in Figure 3(b). TranSpeech enables a speedup up to $21.4\mathrm{x}$ compared to the autoregressive baseline. On the other, it could alternatively retain the highest quality with BELU 18.39 while gaining a $253\%$ speedup.
|
| 176 |
+
|
| 177 |
+
# 5.4 CASE STUDY
|
| 178 |
+
|
| 179 |
+
We present several translation examples sampled from the Fr-En language pair in Table 2, and have the following findings: 1) Models trained with original units suffer severely from the issue of noisy and incomplete translation due to the indeterministic training targets, while with the bilateral perturbation brought in, this multimodal issue is largely alleviated; 2) the advanced decoding methods lead to a distinct improvement in translation accuracy. As can be seen, the results produced by the TranSpeech
|
| 180 |
+
|
| 181 |
+
Table 2: Two examples comparing translations produced by TranSpeech and baseline models. We use the bond fonts to indicate the the issue of noisy and incomplete translation.
|
| 182 |
+
|
| 183 |
+
<table><tr><td>Source:</td><td>l'origine de la rue est liée à la construction de la place rihour.</td></tr><tr><td>Target:</td><td>the origin of the street is linked to the construction of rihour square.</td></tr><tr><td>Basic Conformer:</td><td>the origin of the street is linked to the construction of the.</td></tr><tr><td>TranSpeech:</td><td>th origin of the seti is linked to the construction of the rear.</td></tr><tr><td>TranSpeech+BiP:</td><td>the origin of the street is linked to the construction of the ark.</td></tr><tr><td>TranSpeech+BiP+Advanced:</td><td>the origin of the street is linked to the construction of the work.</td></tr><tr><td>Source:</td><td>il participe aux activités du patronage laïquè et des pionniers de saint-ouen.</td></tr><tr><td>Target:</td><td>he participates in the secular patronage and pioneer activities of saint ouen.</td></tr><tr><td>Basic Conformer:</td><td>he participated in the activities of the late patronage a d see.</td></tr><tr><td>TranSpeech:</td><td>he takes in the patronage activities in of saint.</td></tr><tr><td>TranSpeech+BiP:</td><td>he participated in the activities of the lake patronage and say pointing</td></tr><tr><td>TranSpeech+BiP+Advanced:</td><td>he participated in the activities of the wake patronage and saint pioneers</td></tr></table>
|
| 184 |
+
|
| 185 |
+
with advanced decoding (more iterations and NPD), while of a similar quality to those produced by the autoregressive basic conformer, are noticeably more literal.
|
| 186 |
+
|
| 187 |
+
# 5.5 ABLATION STUDY
|
| 188 |
+
|
| 189 |
+
We conduct ablation studies to demonstrate the effectiveness of several detailed designs in this work, including the bilateral perturbation and the conformer architecture in TranSpeech. The results have been presented in Table 3, and we have the following observations: 1) Style normalization and information enhance
|
| 190 |
+
|
| 191 |
+
ment in bilateral perturbation both demonstrate a performance gain, and they work in a joint effort to learn deterministic representations, leading to improvements in translation accuracy. 2) Replacing the relative positional encoding in the self-attention layer by the vanilla one (Vaswani et al., 2017) witnesses a distinct degradation in translation accuracy, demonstrating the outperformed capability of modeling both local and global audio dependencies brought by architecture designs.
|
| 192 |
+
|
| 193 |
+
Table 3: Ablation study results. SN: style normalization; IE: information enhancement; PE: positional encoding.
|
| 194 |
+
|
| 195 |
+
<table><tr><td>ID</td><td>Model</td><td>PE</td><td>Fr-En</td><td>En-Fr</td><td>En-Es</td></tr><tr><td>1</td><td>Basic Conformer</td><td>Relative</td><td>18.02</td><td>17.07</td><td>13.75</td></tr><tr><td>2</td><td>Basic Conformer + IE</td><td>Relative</td><td>21.98</td><td>19.60</td><td>14.91</td></tr><tr><td>3</td><td>Basic Conformer + SN</td><td>Relative</td><td>21.54</td><td>18.53</td><td>13.97</td></tr><tr><td>4</td><td>Basic Conformer</td><td>Absolute</td><td>17.23</td><td>16.19</td><td>13.06</td></tr></table>
|
| 196 |
+
|
| 197 |
+
# 6 CONCLUSION
|
| 198 |
+
|
| 199 |
+
In this work, we propose TranSpeech, a speech-to-speech translation model with bilateral perturbation. To tackle the acoustic multimodal issue in S2ST, the bilateral perturbation, which included style normalization and information enhancement, had been proposed to learn only the linguistic information from acoustic-variant speech samples. It assisted in generating deterministic representation agnostic to acoustic conditions, significantly reducing the acoustic multimodality and making it possible for non-autoregressive (NAR) generation. As such, we further stepped forward and became the first to establish a NAR S2ST technique. TranSpeech took full advantage of parallelism and leveraged the mask-predict algorithm to generate results in a constant number of iterations. To address linguistic multimodality, we applied knowledge distillation by constructing a less noisy sampled translation corpus. Experimental results demonstrated that BiP yields an improvement of 2.9 BLEU on average compared with a baseline textless S2ST model. Moreover, TranSpeech showed a significant improvement in inference latency, which required as few as 2 iterations to generate outperformed samples, enabling a sampling speed of up to $21.4\mathrm{x}$ faster than the autoregressive baseline. We envisage that our work will serve as a basis for future textless S2ST studies.
|
| 200 |
+
|
| 201 |
+
# ACKNOWLEDGEMENTS
|
| 202 |
+
|
| 203 |
+
This work was supported in part by the National Natural Science Foundation of China under Grant No. 62222211, National Key R&D Program of China under Grant No.2020YFC0832505, Zhejiang Electric Power Co., Ltd. Science and Technology Project No.5211YF22006 and Yiwise.
|
| 204 |
+
|
| 205 |
+
# REFERENCES
|
| 206 |
+
|
| 207 |
+
Alexei Baevski, Michael Auli, and Abdelrahman Mohamed. Effectiveness of self-supervised pretraining for speech recognition. arXiv preprint arXiv:1911.03912, 2019.
|
| 208 |
+
Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33:12449-12460, 2020.
|
| 209 |
+
Nanxin Chen, Shinji Watanabe, Jesús Villalba, Piotr Želasko, and Najim Dehak. Non-autoregressive transformer for speech recognition. IEEE Signal Processing Letters, 28:121-125, 2020.
|
| 210 |
+
Sanyuan Chen, Yu Wu, Zhuo Chen, Jian Wu, Jinyu Li, Takuya Yoshioka, Chengyi Wang, Shujie Liu, and Ming Zhou. Continuous speech separation with conformer. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5749-5753. IEEE, 2021.
|
| 211 |
+
Hyeong-Seok Choi, Juheon Lee, Wansoo Kim, Jie Lee, Hoon Heo, and Kyogu Lee. Neural analysis and synthesis: Reconstructing speech from self-supervised representations. Advances in Neural Information Processing Systems, 34, 2021.
|
| 212 |
+
Jan Chorowski, Ron J Weiss, Samy Bengio, and Aaron Van Den Oord. Unsupervised speech representation learning using wavenet autoencoders. IEEE/ACM transactions on audio, speech, and language processing, 27(12):2041-2053, 2019.
|
| 213 |
+
Yu-An Chung, Wei-Ning Hsu, Hao Tang, and James Glass. An unsupervised autoregressive model for speech representation learning. arXiv preprint arXiv:1904.03240, 2019.
|
| 214 |
+
Chenye Cui, Yi Ren, Jinglin Liu, Feiyang Chen, Rongjie Huang, Ming Lei, and Zhou Zhao. Emovie: A mandarin emotion speech dataset with a simple emotional text-to-speech model. arXiv preprint arXiv:2106.09317, 2021.
|
| 215 |
+
Chenye Cui, Yi Ren, Jinglin Liu, Rongjie Huang, and Zhou Zhao. Varietysound: Timbre-controllable video to sound generation via unsupervised information disentanglement. arXiv preprint arXiv:2211.10666, 2022.
|
| 216 |
+
Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019.
|
| 217 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
|
| 218 |
+
Heting Gao, Junrui Ni, Kaizhi Qian, Yang Zhang, Shiyu Chang, and Mark Hasegawa-Johnson. Wavprompt: Towards few-shot spoken language understanding with frozen language models. arXiv preprint arXiv:2203.15863, 2022.
|
| 219 |
+
Marjan Ghazvininejad, Omer Levy, Yinhan Liu, and Luke Zettlemoyer. Mask-predict: Parallel decoding of conditional masked language models. arXiv preprint arXiv:1904.09324, 2019.
|
| 220 |
+
Jiatao Gu, James Bradbury, Caiming Xiong, Victor OK Li, and Richard Socher. Non-autoregressive neural machine translation. arXiv preprint arXiv:1711.02281, 2017.
|
| 221 |
+
Jiatao Gu, Changhan Wang, and Junbo Zhao. Levenshtein transformer. Advances in Neural Information Processing Systems, 32, 2019.
|
| 222 |
+
|
| 223 |
+
Anmol Gulati, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, Shibo Wang, Zhengdong Zhang, Yonghui Wu, et al. Conformer: Convolution-augmented transformer for speech recognition. arXiv preprint arXiv:2005.08100, 2020.
|
| 224 |
+
Pengcheng Guo, Florian Boyer, Xuankai Chang, Tomoki Hayashi, Yosuke Higuchi, Hirofumi Inaguma, Naoyuki Kamo, Chenda Li, Daniel Garcia-Romero, Jiatong Shi, et al. Recent developments on espnet toolkit boosted by conformer. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5874-5878. IEEE, 2021.
|
| 225 |
+
Wei-Ning Hsu, David Harwath, Christopher Song, and James Glass. Text-free image-to-speech synthesis using learned segmental units. arXiv preprint arXiv:2012.15454, 2020.
|
| 226 |
+
Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021.
|
| 227 |
+
Rongjie Huang, Feiyang Chen, Yi Ren, Jinglin Liu, Chenye Cui, and Zhou Zhao. Multi-singer: Fast multi-singer singing voice vocoder with a large-scale corpus. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 3945-3954, 2021.
|
| 228 |
+
Rongjie Huang, Chenye Cui, Feiyang Chen, Yi Ren, Jinglin Liu, Zhou Zhao, Baoxing Huai, and Zhefeng Wang. Singgan: Generative adversarial network for high-fidelity singing voice generation. In Proceedings of the 30th ACM International Conference on Multimedia, pp. 2525-2535, 2022a.
|
| 229 |
+
Rongjie Huang, Max WY Lam, Jun Wang, Dan Su, Dong Yu, Yi Ren, and Zhou Zhao. Fastdiff: A fast conditional diffusion model for high-quality speech synthesis. arXiv preprint arXiv:2204.09934, 2022b.
|
| 230 |
+
Rongjie Huang, Yi Ren, Jinglin Liu, Chenye Cui, and Zhou Zhao. Generspeech: Towards style transfer for generalizable out-of-domain text-to-speech synthesis. arXiv preprint arXiv:2205.07211, 2022c.
|
| 231 |
+
Rongjie Huang, Zhou Zhao, Huadai Liu, Jinglin Liu, Chenye Cui, and Yi Ren. Prodiff: Progressive fast diffusion model for high-quality text-to-speech. arXiv preprint arXiv:2207.06389, 2022d.
|
| 232 |
+
Ye Jia, Ron J Weiss, Fadi Biadsy, Wolfgang Macherey, Melvin Johnson, Zhifeng Chen, and Yonghui Wu. Direct speech-to-speech translation with a sequence-to-sequence model. arXiv preprint arXiv:1904.06037, 2019.
|
| 233 |
+
Ye Jia, Michelle Tadmor Ramanovich, Tal Remez, and Roi Pomerantz. Translatotron 2: Robust direct speech-to-speech translation. arXiv preprint arXiv:2107.08661, 2021.
|
| 234 |
+
Ye Jia, Michelle Tadmor Ramanovich, Quan Wang, and Heiga Zen. Cvss corpus and massively multilingual speech-to-speech translation. arXiv preprint arXiv:2201.03713, 2022.
|
| 235 |
+
Jungil Kong, Jaehyeon Kim, and Jaekyoung Bae. Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis. Advances in Neural Information Processing Systems, 33:17022-17033, 2020.
|
| 236 |
+
Max WY Lam, Jun Wang, Rongjie Huang, Dan Su, and Dong Yu. Bilateral denoising diffusion models. arXiv preprint arXiv:2108.11514, 2021.
|
| 237 |
+
Alon Lavie, Alex Waibel, Lori Levin, Michael Finke, Donna Gates, Marsal Gavalda, Torsten Zeppenfeld, and Puming Zhan. Janus-iii: Speech-to-speech translation in multiple languages. In 1997 IEEE International Conference on Acoustics, Speech, and Signal Processing, volume 1, pp. 99-102. IEEE, 1997.
|
| 238 |
+
Ann Lee, Peng-Jen Chen, Changhan Wang, Jiatao Gu, Xutai Ma, Adam Polyak, Yossi Adi, Qing He, Yun Tang, Juan Pino, et al. Direct speech-to-speech translation with discrete units. arXiv preprint arXiv:2107.05604, 2021a.
|
| 239 |
+
|
| 240 |
+
Ann Lee, Hongyu Gong, Paul-Ambroise Duquenne, Holger Schwenk, Peng-Jen Chen, Changhan Wang, Sravya Popuri, Juan Pino, Jiatao Gu, and Wei-Ning Hsu. Textless speech-to-speech translation on real data. arXiv preprint arXiv:2112.08352, 2021b.
|
| 241 |
+
Zhijie Lin, Zhou Zhao, Haoyuan Li, Jinglin Liu, Meng Zhang, Xingshan Zeng, and Xiaofei He. Simullr: Simultaneous lip reading transducer with attention-guided adaptive memory. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 1359-1367, 2021.
|
| 242 |
+
Satoshi Nakamura, Konstantin Markov, Hiromi Nakaiwa, Gen-ichiro Kikui, Hisashi Kawai, Takatoshi Jitsuhiro, J-S Zhang, Hirofumi Yamamoto, Eiichiro Sumita, and Seiichi Yamamoto. The atr multilingual speech-to-speech translation system. IEEE Transactions on Audio, Speech, and Language Processing, 14(2):365-376, 2006.
|
| 243 |
+
Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. *fairseq: A fast, extensible toolkit for sequence modeling*. arXiv preprint arXiv:1904.01038, 2019.
|
| 244 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pp. 311-318, 2002.
|
| 245 |
+
Adam Polyak and Lior Wolf. Attention-based wavenet autoencoder for universal voice conversion. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6800-6804. IEEE, 2019.
|
| 246 |
+
Adam Polyak, Yossi Adi, Jade Copet, Eugene Kharitonov, Kushal Lakhotia, Wei-Ning Hsu, Abdelrahman Mohamed, and Emmanuel Dupoux. Speech resynthesis from discrete disentangled self-supervised representations. arXiv preprint arXiv:2104.00355, 2021.
|
| 247 |
+
Kaizhi Qian, Yang Zhang, Shiyu Chang, Mark Hasegawa-Johnson, and David Cox. Unsupervised speech decomposition via triple information bottleneck. In International Conference on Machine Learning, pp. 7836-7846. PMLR, 2020.
|
| 248 |
+
Kaizhi Qian, Yang Zhang, Shiyu Chang, Jinjun Xiong, Chuang Gan, David Cox, and Mark Hasegawa-Johnson. Global prosody style transfer without text transcriptions. In International Conference on Machine Learning, pp. 8650-8660. PMLR, 2021.
|
| 249 |
+
Yi Ren, Chenxu Hu, Xu Tan, Tao Qin, Sheng Zhao, Zhou Zhao, and Tie-Yan Liu. Fastspeech 2: Fast and high-quality end-to-end text to speech. arXiv preprint arXiv:2006.04558, 2020.
|
| 250 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.
|
| 251 |
+
Wolfgang Wahlster. Verbmobil: foundations of speech-to-speech translation. Springer Science & Business Media, 2013.
|
| 252 |
+
Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, and Juan Pino. fairseq s2t: Fast speech-to-text modeling with fairseq. In Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations, 2020a.
|
| 253 |
+
Changhan Wang, Anne Wu, and Juan Pino. Covost 2 and massively multilingual speech-to-text translation. arXiv preprint arXiv:2007.10310, 2020b.
|
| 254 |
+
Yiren Wang, Fei Tian, Di He, Tao Qin, ChengXiang Zhai, and Tie-Yan Liu. Non-autoregressive machine translation with auxiliary regularization. In Proceedings of the AAAI conference on artificial intelligence, volume 33, pp. 5377-5384, 2019.
|
| 255 |
+
Yan Xia, Zhou Zhao, Shangwei Ye, Yang Zhao, Haoyuan Li, and Yi Ren. Video-guided curriculum learning for spoken video grounding. In Proceedings of the 30th ACM International Conference on Multimedia, pp. 5191-5200, 2022.
|
| 256 |
+
Bang Yang, Fenglin Liu, and Yuexian Zou. Non-autoregressive video captioning with iterative refinement. 2019.
|
| 257 |
+
|
| 258 |
+
Chao-Han Huck Yang, Yun-Yun Tsai, and Pin-Yu Chen. Voice2series: Reprogramming acoustic models for time series classification. In International Conference on Machine Learning, pp. 11808-11819. PMLR, 2021.
|
| 259 |
+
Dongchao Yang, Songxiang Liu, Jianwei Yu, Helin Wang, Chao Weng, and Yuexian Zou. Norespeech: Knowledge distillation based conditional diffusion model for noise-robust expressive tts. arXiv preprint arXiv:2211.02448, 2022a.
|
| 260 |
+
Dongchao Yang, Jianwei Yu, Helin Wang, Wen Wang, Chao Weng, Yuexian Zou, and Dong Yu. Diffsound: Discrete diffusion model for text-to-sound generation. arXiv preprint arXiv:2207.09983, 2022b.
|
| 261 |
+
Dongchao Yang, Songxiang Liu, Rongjie Huang, Guangzhi Lei, Chao Weng, Helen Meng, and Dong Yu. Instructtts: Modelling expressive tt's in discrete latent space with natural language style prompt. arXiv preprint arXiv:2301.13662, 2023.
|
| 262 |
+
Zhenhui Ye, Zhou Zhao, Yi Ren, and Fei Wu. Syntaspeech: Syntax-aware generative adversarial text-to-speech. arXiv preprint arXiv:2204.11792, 2022.
|
| 263 |
+
Zhenhui Ye, Ziyue Jiang, Yi Ren, Jinglin Liu, Jinzheng He, and Zhou Zhao. Geneface: Generalized and high-fidelity audio-driven 3d talking face synthesis. arXiv preprint arXiv:2301.13430, 2023.
|
| 264 |
+
Hao Yen, Pin-Jui Ku, Chao-Han Huck Yang, Hu Hu, Sabato Marco Siniscalchi, Pin-Yu Chen, and Yu Tsao. A study of low-resource speech commands recognition based on adversarial reprogramming. arXiv preprint arXiv:2110.03894, 2021.
|
| 265 |
+
Aoxiong Yin, Zhou Zhao, Jinglin Liu, Weike Jin, Meng Zhang, Xingshan Zeng, and Xiaofei He. Simulslt: End-to-end simultaneous sign language translation. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 4118-4127, 2021.
|
| 266 |
+
Aoxiong Yin, Zhou Zhao, Weike Jin, Meng Zhang, Xingshan Zeng, and Xiaofei He. Mlslt: Towards multilingual sign language translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5109-5119, 2022.
|
| 267 |
+
Aoxiong Yin, Tianyun Zhong, Li Tang, Weike Jin, Tao Jin, and Zhou Zhao. Gloss attention for gloss-free sign language translation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, Canada, June 17-23, 2023. IEEE, 2023.
|
| 268 |
+
Chen Zhang, Xu Tan, Yi Ren, Tao Qin, Kejun Zhang, and Tie-Yan Liu. Uwspeech: Speech to speech translation for unwritten languages. arXiv preprint arXiv:2006.07926, 59:132, 2020.
|
| 269 |
+
Jie Zhang, Chen Chen, Bo Li, Lingjuan Lyu, Shuang Wu, Shouhong Ding, Chunhua Shen, and Chao Wu. Dense: Data-free one-shot federated learning. In Advances in Neural Information Processing Systems.
|
| 270 |
+
Jie Zhang, Bo Li, Jianghe Xu, Shuang Wu, Shouhong Ding, Lei Zhang, and Chao Wu. Towards efficient data free black-box adversarial attack. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15115-15125, 2022a.
|
| 271 |
+
Jie Zhang, Bo Li, Chen Chen, Lingjuan Lyu, Shuang Wu, Shouhong Ding, and Chao Wu. Delving into the adversarial robustness of federated learning. arXiv preprint arXiv:2302.09479, 2023a.
|
| 272 |
+
Zijian Zhang, Zhou Zhao, and Zhijie Lin. Unsupervised representation learning from pre-trained diffusion probabilistic models. In Advances in Neural Information Processing Systems, 2022b.
|
| 273 |
+
Zijian Zhang, Zhou Zhao, Jun Yu, and Qi Tian. Shiftddpms: Exploring conditional diffusion models by shifting diffusion trajectories. arXiv preprint arXiv:2302.02373, 2023b.
|
| 274 |
+
|
| 275 |
+
# Appendices
|
| 276 |
+
|
| 277 |
+
# TranSpeech: Speech-to-Speech Translation With Bilateral Perturbation
|
| 278 |
+
|
| 279 |
+
A RELATED WORK
|
| 280 |
+
|
| 281 |
+
A.1 SELF-SUPERVISED REPRESENTATION LEARNING
|
| 282 |
+
|
| 283 |
+
There has been an increasing interest in self-supervised learning in the machine learning (Zhang et al., 2022a; Lam et al., 2021; Zhang et al., 2023b; 2022b) and multimodal processing community (Xia et al., 2022; Zhang et al., 2023a; Zhang et al.; Huang et al., 2022b;a). Wav2Vec 2.0 (Baevski et al., 2020) trains a convolutional neural network to distinguish true future samples from random distractor samples using a contrastive predictive coding (CPC) loss function. HuBERT (Hsu et al., 2021) is trained with a masked prediction with masked continuous audio signals. The majority of self-supervised representation learning models are trained by reconstructing (Chorowski et al., 2019) or predicting unseen speech signals (Chung et al., 2019), which would inevitably include factors unrelated to the linguistic content (i.e., acoustic condition).
|
| 284 |
+
|
| 285 |
+
A.2 PERTURBATION-BASED SPEECH REPROGRAMMING
|
| 286 |
+
|
| 287 |
+
Various approaches that perturb information flow in acoustic models have demonstrated the efficiency in promoting downstream performance: SpeechSplit (Qian et al., 2020), AutoPST (Qian et al., 2021), and NANSY (Choi et al., 2021) perturb the speech variations during the analysis stage to encourage the synthesis stage to use the supplied more stable representations. Voice2Series (Yang et al., 2021) introduces a novel end-to-end approach that reprograms pre-trained acoustic models for time series classification by input transformation learning and output label mapping. Wavprompt (Gao et al., 2022) utilizes the pre-trained audio encoder as part of an ASR to convert the speech in the demonstrations into embeddings digestible to the language model. For multi-lingual tuning, Yen et al. (2021) propose a novel adversarial reprogramming approach for low-resource spoken command recognition (SCR), which repurposes a pre-trained SCR model to modify the acoustic signals. In this work, we propose the bilateral perturbation technique with style normalization and information enhancement to perturb the acoustic conditions in speech.
|
| 288 |
+
|
| 289 |
+
A.3 NON-AUTOREGRESSIVE SEQUENCE GENERATION
|
| 290 |
+
|
| 291 |
+
An autoregressive model (Lin et al., 2021; Yin et al., 2021; 2022) takes in a source sequence and then generates target sentences one by one with the causal structure during the inference process. It prevents parallelism during inference, and thus the computational power of GPU cannot be fully exploited. To reduce the inference latency, (Gu et al., 2017) introduces a non-autoregressive (NAR) transformer-based approach with explicit word fertility, and identifies the multimodality problem of linguistic information between the source and target language. (Ghazvininejad et al., 2019) introduced the masked language modeling objective from BERT (Devlin et al., 2018) to non-autoregressively predict and refine translations. Besides the study of neural machine translation, many works bring NAR model into other sequence-to-sequence tasks (Cui et al., 2021; Ye et al., 2023; Huang et al., 2022c; Yang et al., 2022b), such as video caption (Yang et al., 2019), speech recognition (Chen et al., 2020) and speech synthesis (Ye et al., 2022; Huang et al., 2022d; Yang et al., 2023). In contrast, we focus on non-autoregressive generation in direct S2ST, which is relatively overlooked.
|
| 292 |
+
|
| 293 |
+
B MODEL ARCHITECTURES
|
| 294 |
+
|
| 295 |
+
In this section, we list the model hyper-parameters of TranSpeech in Table 4.
|
| 296 |
+
|
| 297 |
+
<table><tr><td colspan="2">Hyperparameter</td><td>TranSpeech</td></tr><tr><td rowspan="6">Conformer Encoder</td><td>Conv1d Layers</td><td>2</td></tr><tr><td>Conv1d Kernel</td><td>(5, 5)</td></tr><tr><td>Encoder Block</td><td>6</td></tr><tr><td>Encoder Hidden</td><td>512</td></tr><tr><td>Encoder Attention Heads</td><td>8</td></tr><tr><td>Encoder Dropout</td><td>0.1</td></tr><tr><td>Length Predictor</td><td>Projection Dim</td><td>512</td></tr><tr><td rowspan="5">Unit Decoder</td><td>Unit Dictionary</td><td>1000</td></tr><tr><td>Decoder Block</td><td>6</td></tr><tr><td>Decoder Hidden</td><td>512</td></tr><tr><td>Decoder Attention Headers</td><td>8</td></tr><tr><td>Decoder Dropout</td><td>0.1</td></tr></table>
|
| 298 |
+
|
| 299 |
+
# C IMPACT OF INDETERMINISTIC TRAINING TARGET
|
| 300 |
+
|
| 301 |
+
To visualize the acoustic multimodality and demonstrate the effectiveness of proposed bilateral perturbation, we apply the information bottleneck on acoustic features (i.e., rhythm, pitch, and energy) to create perturbed speech samples $\hat{S}_r$ , $\hat{S}_p$ , $\hat{S}_e$ , respectively. We further plot the spectrogram and pitch contours of the original and acoustic-perturbed samples in Figure 5 in Appendix F. The unit error rate (UER) is further adopted as an evaluation matrix to measure the undeterminacy and multimodality according to acoustic variation, and we have the following observations: 1) In the pre-trained SSL model, the acoustic dynamics result in UERs by up to $22.7\%$ (in rhythm), indicating the distinct alteration of derived representations. The pre-trained SSL model learns both linguistic and acoustic information given speech, and thus the units derived from speech with the same content can be indeterministic; however, 2) with the proposed bilateral perturbation (BiP), a distinct drop of UER (in energy) by up to $82.8\%$ could be witnessed, demonstrating the efficiency of BiP in producing deterministic representations referring to linguistic content.
|
| 302 |
+
|
| 303 |
+
Table 4: Hyperparameters of TranSpeech.
|
| 304 |
+
|
| 305 |
+
<table><tr><td>Acoustic</td><td>Pretrained</td><td>BiP-Tuned</td></tr><tr><td>Reference</td><td>0.0</td><td>0.0</td></tr><tr><td>Rhythmˆr</td><td>22.7</td><td>10.2</td></tr><tr><td>Pitchˆp</td><td>16.3</td><td>4.3</td></tr><tr><td>Energyˆe</td><td>10.5</td><td>1.8</td></tr></table>
|
| 306 |
+
|
| 307 |
+
Table 5: We calculate UER between units derived from original and perturbed speeches respectively using the pre-trained and fine-tuned SSL model, which is calculated averaged over the dataset. It measures the ability of the SSL model to generate acoustic-agnostic representations referring to linguistic content.
|
| 308 |
+
|
| 309 |
+
# D EVALUATION ON SPEECH QUALITY
|
| 310 |
+
|
| 311 |
+
Following the publicly-available implementation fairseq (Ott et al., 2019), we include the SNR as an evaluation matrix to measure the speech quality across the test set. We approximate the noise by subtracting the output of the enhancement model from the input-noisy speech and then compute the SNR between the two. Further, we conduct crowd-sourced human evaluations with MOS, rated from 1 to 5 and reported with $95\%$ confidence intervals (CI). For easy comparison, the results are compiled and presented in the following table:
|
| 312 |
+
|
| 313 |
+
As illustrated in Table 6, TranSpeech has achieved the SNR and MOS with scores of 46.56 and 4.03 competitive with the baseline systems. Since we apply the publicly-available pre-trained unit
|
| 314 |
+
|
| 315 |
+
<table><tr><td>Method</td><td>SNR (↑)</td><td>MOS (↑)</td></tr><tr><td>Translation GT</td><td>/</td><td>4.22±0.06</td></tr><tr><td>DirectS2ST</td><td>46.45</td><td>4.01±0.07</td></tr><tr><td>TextlessS2ST</td><td>47.22</td><td>4.05±0.06</td></tr><tr><td>TranSpeech</td><td>46.56</td><td>4.03±0.06</td></tr></table>
|
| 316 |
+
|
| 317 |
+
Table 6: Speech quality (SNR(↑) and MOS(↑)) comparison with baseline systems.
|
| 318 |
+
|
| 319 |
+
vocoder and leave it unchanged for unit-to-speech, we expect our model to exhibit high-quality speech generation as baseline models while achieving a significant improvement in translation accuracy.
|
| 320 |
+
|
| 321 |
+
# E INFORMATION ENHANCEMENT
|
| 322 |
+
|
| 323 |
+
We apply the following functions (Qian et al., 2020; Choi et al., 2021) on acoustic features (e.g., rhythm, pitch, and energy) to create acoustic-perturbed speech samples $\hat{S}$ , while the linguistic content remains unchanged, including 1) formant shifting $fs$ , 2) pitch randomization $pr$ , 3) random frequency shaping using a parametric equalizer $peq$ , and 4) random resampling $RR$ . As shown in Figure 4, we further illustrate the mel-spectrogram of the single-perturbed utterance in bilateral perturbation.
|
| 324 |
+
|
| 325 |
+
- For $fs$ , a formant shifting ratio is sampled uniformly from $\mathrm{Unif}(1, 1.4)$ . After sampling the ratio, we again randomly decided whether to take the reciprocal of the sampled ratio or not.
|
| 326 |
+
- In $pr$ , a pitch shift ratio and pitch range ratio are sampled uniformly from Unif(1,2) and Unif(1,1.5), respectively. Again, we randomly decide whether to take the reciprocal of the sampled ratios or not. For more details for formant shifting and pitch randomization, please refer to Parselmouth https://github.com/YannickJadoul/Parselmouth.
|
| 327 |
+
- peq represents a serial composition of low-shelving, peaking, and high-shelving filters. We use one low-shelving HLS, one high-shelving HHS, and eight peaking filters HPeak.
|
| 328 |
+
- $RR$ denotes a random resampling to modify the rhythm. The input signal is divided into segments, whose length is randomly uniformly drawn from 19 frames to 32 frames (Polyak & Wolf, 2019). Each segment is resampled using linear interpolation with a resampling factor randomly drawn from 0.5 to 1.5.
|
| 329 |
+
|
| 330 |
+
# F VISUALIZATION OF ACOUSTIC-PERTURBED SPEECH SAMPLES
|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
Pitch Mean
|
| 334 |
+
|
| 335 |
+

|
| 336 |
+
|
| 337 |
+

|
| 338 |
+
Energy Norm
|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
|
| 342 |
+

|
| 343 |
+
F
|
| 344 |
+
|
| 345 |
+

|
| 346 |
+
|
| 347 |
+

|
| 348 |
+
RR
|
| 349 |
+
|
| 350 |
+

|
| 351 |
+
|
| 352 |
+

|
| 353 |
+
Un-Perturbed Source Speech
|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
Figure 4: Spectrogram and pitch contours of the utterance with the single-perturbed acoustic condition, remaining the linguistic content ("really interesting work will finally be undertaken on that topic") unchanged. RR: random resampling. F: a chain function $F = f s(pr(peq(x)))$ for random pitch shifting.
|
| 357 |
+
|
| 358 |
+

|
| 359 |
+
Reference (Un-Perturbed)
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
Energy-Perturbed
|
| 363 |
+
|
| 364 |
+

|
| 365 |
+
Pitch-Perturbed
|
| 366 |
+
Figure 5: Spectrogram and pitch contours of speech sample with the perturbed acoustic condition, remaining the linguistic content ("really interesting work.") unchanged. The altered units are printed in red upside the spectrogram.
|
| 367 |
+
|
| 368 |
+

|
| 369 |
+
Rhythm-Perturbed
|
2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8c5c2ab624e50c31eb0f8166968e9a9e13d6e346e368aabb3a26a6de7316578
|
| 3 |
+
size 611578
|
2023/TranSpeech_ Speech-to-Speech Translation With Bilateral Perturbation/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Transfer Learning with Deep Tabular Models/1dc89b4f-36aa-4331-9391-4480874346bd_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Transfer Learning with Deep Tabular Models/1dc89b4f-36aa-4331-9391-4480874346bd_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Transfer Learning with Deep Tabular Models/1dc89b4f-36aa-4331-9391-4480874346bd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:719465fca71241b86d92def4add08f264611d44ef537de49ce1d58184930f1a8
|
| 3 |
+
size 1554993
|
2023/Transfer Learning with Deep Tabular Models/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Transfer Learning with Deep Tabular Models/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54e166d3e91dea38dfd029b873c28a4247d934e9c437b02bf351dada31714766
|
| 3 |
+
size 3152620
|
2023/Transfer Learning with Deep Tabular Models/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Transferable Unlearnable Examples/c25345b7-d8be-4162-9c61-be3dc45172d4_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|