SlowGuess commited on
Commit
36fa3d5
·
verified ·
1 Parent(s): 318037b

Add Batch 48573fa8-d0e8-4820-b63f-2944f70f2409

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +63 -0
  2. 2501.03xxx/2501.03847/73b93507-55a7-403b-876e-01ee04faaab0_content_list.json +1998 -0
  3. 2501.03xxx/2501.03847/73b93507-55a7-403b-876e-01ee04faaab0_model.json +0 -0
  4. 2501.03xxx/2501.03847/73b93507-55a7-403b-876e-01ee04faaab0_origin.pdf +3 -0
  5. 2501.03xxx/2501.03847/full.md +392 -0
  6. 2501.03xxx/2501.03847/images.zip +3 -0
  7. 2501.03xxx/2501.03847/layout.json +0 -0
  8. 2501.03xxx/2501.03895/d7c2366b-b4d8-4abe-ac93-634e84e98114_content_list.json +0 -0
  9. 2501.03xxx/2501.03895/d7c2366b-b4d8-4abe-ac93-634e84e98114_model.json +0 -0
  10. 2501.03xxx/2501.03895/d7c2366b-b4d8-4abe-ac93-634e84e98114_origin.pdf +3 -0
  11. 2501.03xxx/2501.03895/full.md +0 -0
  12. 2501.03xxx/2501.03895/images.zip +3 -0
  13. 2501.03xxx/2501.03895/layout.json +0 -0
  14. 2501.03xxx/2501.03931/7c87c34f-6034-4a6a-95cf-83d77cdef7f4_content_list.json +0 -0
  15. 2501.03xxx/2501.03931/7c87c34f-6034-4a6a-95cf-83d77cdef7f4_model.json +0 -0
  16. 2501.03xxx/2501.03931/7c87c34f-6034-4a6a-95cf-83d77cdef7f4_origin.pdf +3 -0
  17. 2501.03xxx/2501.03931/full.md +753 -0
  18. 2501.03xxx/2501.03931/images.zip +3 -0
  19. 2501.03xxx/2501.03931/layout.json +0 -0
  20. 2501.03xxx/2501.03936/a62f678c-729d-427d-9baf-eab6a30b2833_content_list.json +0 -0
  21. 2501.03xxx/2501.03936/a62f678c-729d-427d-9baf-eab6a30b2833_model.json +0 -0
  22. 2501.03xxx/2501.03936/a62f678c-729d-427d-9baf-eab6a30b2833_origin.pdf +3 -0
  23. 2501.03xxx/2501.03936/full.md +961 -0
  24. 2501.03xxx/2501.03936/images.zip +3 -0
  25. 2501.03xxx/2501.03936/layout.json +0 -0
  26. 2501.03xxx/2501.03939/10878171-fb3a-4a9c-9c00-9e4356a62c21_content_list.json +0 -0
  27. 2501.03xxx/2501.03939/10878171-fb3a-4a9c-9c00-9e4356a62c21_model.json +0 -0
  28. 2501.03xxx/2501.03939/10878171-fb3a-4a9c-9c00-9e4356a62c21_origin.pdf +3 -0
  29. 2501.03xxx/2501.03939/full.md +0 -0
  30. 2501.03xxx/2501.03939/images.zip +3 -0
  31. 2501.03xxx/2501.03939/layout.json +0 -0
  32. 2501.04xxx/2501.04001/561b2e22-1967-4178-81b9-be05fc459b25_content_list.json +0 -0
  33. 2501.04xxx/2501.04001/561b2e22-1967-4178-81b9-be05fc459b25_model.json +0 -0
  34. 2501.04xxx/2501.04001/561b2e22-1967-4178-81b9-be05fc459b25_origin.pdf +3 -0
  35. 2501.04xxx/2501.04001/full.md +0 -0
  36. 2501.04xxx/2501.04001/images.zip +3 -0
  37. 2501.04xxx/2501.04001/layout.json +0 -0
  38. 2501.04xxx/2501.04003/fe7989b2-6d25-4d52-94ba-58857e0d7417_content_list.json +0 -0
  39. 2501.04xxx/2501.04003/fe7989b2-6d25-4d52-94ba-58857e0d7417_model.json +0 -0
  40. 2501.04xxx/2501.04003/fe7989b2-6d25-4d52-94ba-58857e0d7417_origin.pdf +3 -0
  41. 2501.04xxx/2501.04003/full.md +0 -0
  42. 2501.04xxx/2501.04003/images.zip +3 -0
  43. 2501.04xxx/2501.04003/layout.json +0 -0
  44. 2501.04xxx/2501.04164/62206504-e3ec-477f-bcdd-019e9d8a4ba1_content_list.json +0 -0
  45. 2501.04xxx/2501.04164/62206504-e3ec-477f-bcdd-019e9d8a4ba1_model.json +0 -0
  46. 2501.04xxx/2501.04164/62206504-e3ec-477f-bcdd-019e9d8a4ba1_origin.pdf +3 -0
  47. 2501.04xxx/2501.04164/full.md +679 -0
  48. 2501.04xxx/2501.04164/images.zip +3 -0
  49. 2501.04xxx/2501.04164/layout.json +0 -0
  50. 2501.04xxx/2501.04167/17e4f3b6-5b7c-4e3c-9a0f-250f368ded8c_content_list.json +0 -0
.gitattributes CHANGED
@@ -5293,3 +5293,66 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
5293
  2501.07xxx/2501.07730/4658d25e-7b77-43fd-a600-d41c8581a318_origin.pdf filter=lfs diff=lfs merge=lfs -text
5294
  2501.07xxx/2501.07811/a63844d5-9070-443b-8828-a8365d0da04f_origin.pdf filter=lfs diff=lfs merge=lfs -text
5295
  2501.07xxx/2501.07834/e13ebff1-4bb1-4d3a-a1ed-9fc798a82f3d_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5293
  2501.07xxx/2501.07730/4658d25e-7b77-43fd-a600-d41c8581a318_origin.pdf filter=lfs diff=lfs merge=lfs -text
5294
  2501.07xxx/2501.07811/a63844d5-9070-443b-8828-a8365d0da04f_origin.pdf filter=lfs diff=lfs merge=lfs -text
5295
  2501.07xxx/2501.07834/e13ebff1-4bb1-4d3a-a1ed-9fc798a82f3d_origin.pdf filter=lfs diff=lfs merge=lfs -text
5296
+ 2501.03xxx/2501.03847/73b93507-55a7-403b-876e-01ee04faaab0_origin.pdf filter=lfs diff=lfs merge=lfs -text
5297
+ 2501.03xxx/2501.03895/d7c2366b-b4d8-4abe-ac93-634e84e98114_origin.pdf filter=lfs diff=lfs merge=lfs -text
5298
+ 2501.03xxx/2501.03931/7c87c34f-6034-4a6a-95cf-83d77cdef7f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
5299
+ 2501.03xxx/2501.03936/a62f678c-729d-427d-9baf-eab6a30b2833_origin.pdf filter=lfs diff=lfs merge=lfs -text
5300
+ 2501.03xxx/2501.03939/10878171-fb3a-4a9c-9c00-9e4356a62c21_origin.pdf filter=lfs diff=lfs merge=lfs -text
5301
+ 2501.04xxx/2501.04001/561b2e22-1967-4178-81b9-be05fc459b25_origin.pdf filter=lfs diff=lfs merge=lfs -text
5302
+ 2501.04xxx/2501.04003/fe7989b2-6d25-4d52-94ba-58857e0d7417_origin.pdf filter=lfs diff=lfs merge=lfs -text
5303
+ 2501.04xxx/2501.04164/62206504-e3ec-477f-bcdd-019e9d8a4ba1_origin.pdf filter=lfs diff=lfs merge=lfs -text
5304
+ 2501.04xxx/2501.04167/17e4f3b6-5b7c-4e3c-9a0f-250f368ded8c_origin.pdf filter=lfs diff=lfs merge=lfs -text
5305
+ 2501.04xxx/2501.04227/492498be-9831-46fa-9936-3b16fd849212_origin.pdf filter=lfs diff=lfs merge=lfs -text
5306
+ 2501.04xxx/2501.04299/2d25b267-8154-44bb-bab9-a0d8c6fe8ce1_origin.pdf filter=lfs diff=lfs merge=lfs -text
5307
+ 2501.04xxx/2501.04306/818f6703-a050-4674-bd71-ff8928624ff8_origin.pdf filter=lfs diff=lfs merge=lfs -text
5308
+ 2501.04xxx/2501.04376/a0d324b6-083d-432b-b679-477f43646425_origin.pdf filter=lfs diff=lfs merge=lfs -text
5309
+ 2501.04xxx/2501.04377/7198452b-f35a-41f8-b565-f865e05345c5_origin.pdf filter=lfs diff=lfs merge=lfs -text
5310
+ 2501.04xxx/2501.04437/6ac7064a-8e85-4996-8020-8ed307b7464e_origin.pdf filter=lfs diff=lfs merge=lfs -text
5311
+ 2501.04xxx/2501.04440/69d23332-27ac-4c51-82ca-265017321cb7_origin.pdf filter=lfs diff=lfs merge=lfs -text
5312
+ 2501.04xxx/2501.04467/1de27825-8580-4072-8a38-f58f21087215_origin.pdf filter=lfs diff=lfs merge=lfs -text
5313
+ 2501.04xxx/2501.04486/dfd82eea-3c6f-4085-9f58-deca2a9cc8f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
5314
+ 2501.04xxx/2501.04519/dc7f1dc3-f9b4-4e23-8e58-648ab5b58552_origin.pdf filter=lfs diff=lfs merge=lfs -text
5315
+ 2501.04xxx/2501.04575/2041a619-a5ee-4f42-98a0-bc8d22a56fdc_origin.pdf filter=lfs diff=lfs merge=lfs -text
5316
+ 2501.04xxx/2501.04606/d5b75433-1244-4bab-9a17-f3b22bf9764f_origin.pdf filter=lfs diff=lfs merge=lfs -text
5317
+ 2501.04xxx/2501.04628/fcf75382-100c-422e-9d88-797baa3a1046_origin.pdf filter=lfs diff=lfs merge=lfs -text
5318
+ 2501.04xxx/2501.04648/04cc76a4-1cd4-4afb-8c39-f333b8e46638_origin.pdf filter=lfs diff=lfs merge=lfs -text
5319
+ 2501.04xxx/2501.04678/94792d61-28a3-4e84-8698-20c62e8db7ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
5320
+ 2501.04xxx/2501.04682/0233303d-3497-40e5-971f-205e6d0ddfcb_origin.pdf filter=lfs diff=lfs merge=lfs -text
5321
+ 2501.04xxx/2501.04686/67514aea-d670-4527-b690-c970605fd7d8_origin.pdf filter=lfs diff=lfs merge=lfs -text
5322
+ 2501.04xxx/2501.04689/a5a63f01-f367-4ac3-98fb-bc6f59606b91_origin.pdf filter=lfs diff=lfs merge=lfs -text
5323
+ 2501.04xxx/2501.04693/fcd55be2-5e16-41b2-9d0b-f5c61e89f178_origin.pdf filter=lfs diff=lfs merge=lfs -text
5324
+ 2501.04xxx/2501.04697/1e990cf0-6f1c-485a-846d-91567206f7fd_origin.pdf filter=lfs diff=lfs merge=lfs -text
5325
+ 2501.04xxx/2501.04698/f1e7d6d1-68ce-4f13-8939-e5b1ddc0b742_origin.pdf filter=lfs diff=lfs merge=lfs -text
5326
+ 2501.04xxx/2501.04699/83a90e5a-a314-4843-b693-4454cd13feb9_origin.pdf filter=lfs diff=lfs merge=lfs -text
5327
+ 2501.04xxx/2501.04746/4ff45626-9d00-46bf-a3e6-515312b30a34_origin.pdf filter=lfs diff=lfs merge=lfs -text
5328
+ 2501.04xxx/2501.04855/51082728-10ee-4115-aa13-5dd2d3976554_origin.pdf filter=lfs diff=lfs merge=lfs -text
5329
+ 2501.04xxx/2501.04908/d51416bf-a47f-4c8c-8069-bcc1df4a3392_origin.pdf filter=lfs diff=lfs merge=lfs -text
5330
+ 2501.04xxx/2501.04914/273fe07a-a076-4445-bdf2-6dfdb2bdd8f6_origin.pdf filter=lfs diff=lfs merge=lfs -text
5331
+ 2501.04xxx/2501.04931/976b4f1b-2211-4660-a5a6-01b67da97c8f_origin.pdf filter=lfs diff=lfs merge=lfs -text
5332
+ 2501.04xxx/2501.04944/8b6bc67f-e7e8-40cb-ab7a-0c6ff86806e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
5333
+ 2501.04xxx/2501.04952/dd9cde58-efcb-4952-92b7-75f74dc7702f_origin.pdf filter=lfs diff=lfs merge=lfs -text
5334
+ 2501.04xxx/2501.04961/838d6dd9-c5a7-49f4-bf80-efb98ce736c3_origin.pdf filter=lfs diff=lfs merge=lfs -text
5335
+ 2501.04xxx/2501.04996/004970a1-9581-4b4f-874a-7e1f18fdcfa4_origin.pdf filter=lfs diff=lfs merge=lfs -text
5336
+ 2501.05xxx/2501.05014/f40eb331-4388-477c-b213-dd17ffa46900_origin.pdf filter=lfs diff=lfs merge=lfs -text
5337
+ 2501.05xxx/2501.05031/4f2e5ee0-639f-4152-81bc-65469e8c41f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
5338
+ 2501.05xxx/2501.05040/744eb72c-6ac7-4390-a02d-bfe235e1b95a_origin.pdf filter=lfs diff=lfs merge=lfs -text
5339
+ 2501.05xxx/2501.05053/4cb0112c-8983-412f-b601-d9927f4ee755_origin.pdf filter=lfs diff=lfs merge=lfs -text
5340
+ 2501.05xxx/2501.05098/a3f6a162-1e93-4d03-9a86-cb7d1815a2b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
5341
+ 2501.05xxx/2501.05171/8db03c74-f4ee-4e38-aad5-b0f7c60705a1_origin.pdf filter=lfs diff=lfs merge=lfs -text
5342
+ 2501.05xxx/2501.05204/cba6b946-2e1e-4f5c-863e-ec71f63948ba_origin.pdf filter=lfs diff=lfs merge=lfs -text
5343
+ 2501.05xxx/2501.05232/da3b7c35-058f-4c1a-a9bc-f8f4ccc5c89b_origin.pdf filter=lfs diff=lfs merge=lfs -text
5344
+ 2501.05xxx/2501.05269/1da616e6-f677-4484-830a-44a03e7e30ea_origin.pdf filter=lfs diff=lfs merge=lfs -text
5345
+ 2501.05xxx/2501.05272/1e4cb430-bdda-4a8a-8fec-f09da6284e1b_origin.pdf filter=lfs diff=lfs merge=lfs -text
5346
+ 2501.05xxx/2501.05366/2e3ceba3-bc35-4ba4-8f88-b948ae1fab19_origin.pdf filter=lfs diff=lfs merge=lfs -text
5347
+ 2501.05xxx/2501.05370/5695f121-cd87-42d6-b716-e1b6f15f2e2f_origin.pdf filter=lfs diff=lfs merge=lfs -text
5348
+ 2501.05xxx/2501.05398/df0f1127-9130-437f-bc9d-fe42a8ac01b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
5349
+ 2501.05xxx/2501.05435/29a8dd8e-c11d-4a58-a551-1e043baa4a2b_origin.pdf filter=lfs diff=lfs merge=lfs -text
5350
+ 2501.05xxx/2501.05441/843ae6a7-f7be-4657-9d15-9966349de67d_origin.pdf filter=lfs diff=lfs merge=lfs -text
5351
+ 2501.05xxx/2501.05444/99ffd504-7610-4885-8ba3-6f2d09ab5c44_origin.pdf filter=lfs diff=lfs merge=lfs -text
5352
+ 2501.05xxx/2501.05452/6719a611-16b9-45a6-86ea-c6e9403ae956_origin.pdf filter=lfs diff=lfs merge=lfs -text
5353
+ 2501.05xxx/2501.05453/b6f2a87e-c9b3-4e56-83f7-53bc17733397_origin.pdf filter=lfs diff=lfs merge=lfs -text
5354
+ 2501.05xxx/2501.05510/e0026d3b-c640-4aed-a578-3c5c1db63540_origin.pdf filter=lfs diff=lfs merge=lfs -text
5355
+ 2501.05xxx/2501.05580/5ec2cf1f-2e0c-4955-9d86-03c7c12dd089_origin.pdf filter=lfs diff=lfs merge=lfs -text
5356
+ 2501.06xxx/2501.06250/05d59c03-2094-4dce-913e-71176b55dd15_origin.pdf filter=lfs diff=lfs merge=lfs -text
5357
+ 2501.07xxx/2501.07593/bd9db80c-efbc-47e1-9fac-2609bd273002_origin.pdf filter=lfs diff=lfs merge=lfs -text
5358
+ 2501.09xxx/2501.09026/946a71eb-92c3-4201-a1cb-e46c36276713_origin.pdf filter=lfs diff=lfs merge=lfs -text
2501.03xxx/2501.03847/73b93507-55a7-403b-876e-01ee04faaab0_content_list.json ADDED
@@ -0,0 +1,1998 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Diffusion as Shader: 3D-aware Video Diffusion for Versatile Video Generation Control",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 78,
8
+ 95,
9
+ 844,
10
+ 142
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "ZEKAI GU, Hong Kong University of Science and Technology, China \nRUI YAN, Zhejiang University, China \nJIAHAO LU, Hong Kong University of Science and Technology, China \nPENG LI, Hong Kong University of Science and Technology, China \nZHIYANG DOU, The University of Hong Kong, China \nCHENYANG SI, Nanyang Technological University, Singapore \nZHEN DONG, Wuhan University, China \nQIFENG LIU, Hong Kong University of Science and Technology, China \nCHENG LIN, The University of Hong Kong, China \nZIWEI LIU, Nanyang Technological University, Singapore \nWENPING WANG, Texas A&M University, U.S.A \nYUAN LIU, Hong Kong University of Science and Technology, China",
17
+ "bbox": [
18
+ 78,
19
+ 151,
20
+ 553,
21
+ 359
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "image",
27
+ "img_path": "images/3cbfeacd022e1030e0f91208808d64b5327adf483519d365f7758f3f2e934c58.jpg",
28
+ "image_caption": [
29
+ "(a) Diffusion as Shader"
30
+ ],
31
+ "image_footnote": [],
32
+ "bbox": [
33
+ 86,
34
+ 376,
35
+ 183,
36
+ 611
37
+ ],
38
+ "page_idx": 0
39
+ },
40
+ {
41
+ "type": "image",
42
+ "img_path": "images/ead6ff7c8cb3cc6e24f0162ab251c800d010365132d7fe4cca26284790ece4f8.jpg",
43
+ "image_caption": [],
44
+ "image_footnote": [],
45
+ "bbox": [
46
+ 197,
47
+ 375,
48
+ 313,
49
+ 435
50
+ ],
51
+ "page_idx": 0
52
+ },
53
+ {
54
+ "type": "image",
55
+ "img_path": "images/efd136b4e3f5d44b4d772e9f04a1f4c3e24f324e7c79d4ebe30a877dbd4efc6e.jpg",
56
+ "image_caption": [],
57
+ "image_footnote": [],
58
+ "bbox": [
59
+ 197,
60
+ 436,
61
+ 315,
62
+ 497
63
+ ],
64
+ "page_idx": 0
65
+ },
66
+ {
67
+ "type": "image",
68
+ "img_path": "images/b0d22c54810587509ff087f48801917e4e71c7402c3d6c43967eded4b4960770.jpg",
69
+ "image_caption": [],
70
+ "image_footnote": [],
71
+ "bbox": [
72
+ 197,
73
+ 498,
74
+ 434,
75
+ 559
76
+ ],
77
+ "page_idx": 0
78
+ },
79
+ {
80
+ "type": "image",
81
+ "img_path": "images/73628fceb521443e49094fdfaecfc630712fb415f9573ee8798be6a3cd2c66e5.jpg",
82
+ "image_caption": [
83
+ "Fig. 1. Diffusion as Shader (DaS) is (a) a 3D-aware video diffusion method enabling versatile video control tasks including (b) animating meshes to video generation, (c) motion transfer, (d) camera control, and (e) object manipulation."
84
+ ],
85
+ "image_footnote": [],
86
+ "bbox": [
87
+ 197,
88
+ 559,
89
+ 434,
90
+ 619
91
+ ],
92
+ "page_idx": 0
93
+ },
94
+ {
95
+ "type": "image",
96
+ "img_path": "images/fd541b4a3b4563641813b3c16643cf616c9974c27691eb4ea6a6e3342ce52b74.jpg",
97
+ "image_caption": [],
98
+ "image_footnote": [],
99
+ "bbox": [
100
+ 437,
101
+ 375,
102
+ 553,
103
+ 436
104
+ ],
105
+ "page_idx": 0
106
+ },
107
+ {
108
+ "type": "image",
109
+ "img_path": "images/73f232c772f1a648cd79e4b89c9ecb23d18eaa73c9584bbc6d4dc2d33c8913d6.jpg",
110
+ "image_caption": [],
111
+ "image_footnote": [],
112
+ "bbox": [
113
+ 437,
114
+ 436,
115
+ 553,
116
+ 498
117
+ ],
118
+ "page_idx": 0
119
+ },
120
+ {
121
+ "type": "image",
122
+ "img_path": "images/db5346859b02d554b70bbe52db22f7a386e0ff4ec514402a7e4287d2b1281784.jpg",
123
+ "image_caption": [],
124
+ "image_footnote": [],
125
+ "bbox": [
126
+ 437,
127
+ 498,
128
+ 553,
129
+ 558
130
+ ],
131
+ "page_idx": 0
132
+ },
133
+ {
134
+ "type": "image",
135
+ "img_path": "images/de252c04747de383ca09971014a91c3de7d61e857e43a2b3b60c3ac5f3756337.jpg",
136
+ "image_caption": [],
137
+ "image_footnote": [],
138
+ "bbox": [
139
+ 437,
140
+ 559,
141
+ 553,
142
+ 619
143
+ ],
144
+ "page_idx": 0
145
+ },
146
+ {
147
+ "type": "image",
148
+ "img_path": "images/1bd7dcecd305989c8cc2282d87f696c48e35eabfc8185721bc4cd3fdf8157d5c.jpg",
149
+ "image_caption": [],
150
+ "image_footnote": [],
151
+ "bbox": [
152
+ 555,
153
+ 375,
154
+ 671,
155
+ 436
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "image",
161
+ "img_path": "images/175ec96d23b8e7882a7a83ff189f06ec2f516fae9cd9805c5d43371ae9d69747.jpg",
162
+ "image_caption": [],
163
+ "image_footnote": [],
164
+ "bbox": [
165
+ 555,
166
+ 436,
167
+ 671,
168
+ 498
169
+ ],
170
+ "page_idx": 0
171
+ },
172
+ {
173
+ "type": "image",
174
+ "img_path": "images/9711c00114afc9a7ceecc4622ec2e5cff79578d0811f1b0cc7290d03e1846535.jpg",
175
+ "image_caption": [],
176
+ "image_footnote": [],
177
+ "bbox": [
178
+ 555,
179
+ 498,
180
+ 671,
181
+ 558
182
+ ],
183
+ "page_idx": 0
184
+ },
185
+ {
186
+ "type": "image",
187
+ "img_path": "images/e604c615761d4adede2dacabde122138eaf625124b5185544eab9fa30ade6434.jpg",
188
+ "image_caption": [],
189
+ "image_footnote": [],
190
+ "bbox": [
191
+ 555,
192
+ 559,
193
+ 671,
194
+ 619
195
+ ],
196
+ "page_idx": 0
197
+ },
198
+ {
199
+ "type": "image",
200
+ "img_path": "images/5aab5615e26b815c99c9bc0f3cf8ff09c077aaba6142e64e2ed733db8187e25c.jpg",
201
+ "image_caption": [],
202
+ "image_footnote": [],
203
+ "bbox": [
204
+ 674,
205
+ 375,
206
+ 790,
207
+ 436
208
+ ],
209
+ "page_idx": 0
210
+ },
211
+ {
212
+ "type": "image",
213
+ "img_path": "images/28b0727e1b2f5aa8a8991e53902d24ce03af4701124f4a906b503f6c16def39b.jpg",
214
+ "image_caption": [],
215
+ "image_footnote": [],
216
+ "bbox": [
217
+ 674,
218
+ 436,
219
+ 790,
220
+ 558
221
+ ],
222
+ "page_idx": 0
223
+ },
224
+ {
225
+ "type": "image",
226
+ "img_path": "images/bdf68f5d5489b1bdb6e506e3b4735981511b433b7d282ba9c3509a3d367b8dc5.jpg",
227
+ "image_caption": [],
228
+ "image_footnote": [],
229
+ "bbox": [
230
+ 674,
231
+ 558,
232
+ 790,
233
+ 619
234
+ ],
235
+ "page_idx": 0
236
+ },
237
+ {
238
+ "type": "image",
239
+ "img_path": "images/f3f93fc2386ab1c4ac884633d8c9a66b45b7fb70a370734f5f7c0c2bf59da601.jpg",
240
+ "image_caption": [],
241
+ "image_footnote": [],
242
+ "bbox": [
243
+ 794,
244
+ 375,
245
+ 911,
246
+ 436
247
+ ],
248
+ "page_idx": 0
249
+ },
250
+ {
251
+ "type": "image",
252
+ "img_path": "images/2611f9362c0d9336e000e8cbefc916b12d4e23a4bf5fd0ade3e77727be5faca5.jpg",
253
+ "image_caption": [],
254
+ "image_footnote": [],
255
+ "bbox": [
256
+ 794,
257
+ 436,
258
+ 911,
259
+ 558
260
+ ],
261
+ "page_idx": 0
262
+ },
263
+ {
264
+ "type": "image",
265
+ "img_path": "images/ddbd9d07e0c571d3cc4964ea78c249134a0e492cbf130a8ccc34f0e217fd809d.jpg",
266
+ "image_caption": [],
267
+ "image_footnote": [],
268
+ "bbox": [
269
+ 794,
270
+ 558,
271
+ 911,
272
+ 619
273
+ ],
274
+ "page_idx": 0
275
+ },
276
+ {
277
+ "type": "text",
278
+ "text": "Diffusion models have demonstrated impressive performance in generating high-quality videos from text prompts or images. However, precise control over the video generation process—such as camera manipulation or content editing—remains a significant challenge. Existing methods for controlled video generation are typically limited to a single control type, lacking the flexibility to handle diverse control demands. In this paper, we introduce Diffusion as Shader (DaS), a novel approach that supports multiple video control tasks within a unified architecture. Our key insight is that achieving versatile video control necessitates leveraging 3D control signals, as videos are fundamentally 2D renderings of dynamic 3D content. Unlike prior methods limited to 2D control signals, DaS leverages 3D tracking videos as control inputs, making the video diffusion process inherently 3D-aware. This innovation allows DaS to achieve a wide range of video controls by simply manipulating the 3D tracking videos. A further advantage of using 3D tracking videos is their ability to effectively link frames, significantly enhancing the temporal consistency of the generated videos. With just 3 days of fine-tuning on 8 H800 GPUs using less than 10k videos, DaS demonstrates",
279
+ "bbox": [
280
+ 78,
281
+ 660,
282
+ 482,
283
+ 875
284
+ ],
285
+ "page_idx": 0
286
+ },
287
+ {
288
+ "type": "text",
289
+ "text": "strong control capabilities across diverse tasks, including mesh-to-video generation, camera control, motion transfer, and object manipulation. Codes and more results are available at https://igl-hkust.github.io/das/.",
290
+ "bbox": [
291
+ 513,
292
+ 660,
293
+ 915,
294
+ 699
295
+ ],
296
+ "page_idx": 0
297
+ },
298
+ {
299
+ "type": "text",
300
+ "text": "1 INTRODUCTION",
301
+ "text_level": 1,
302
+ "bbox": [
303
+ 514,
304
+ 732,
305
+ 661,
306
+ 746
307
+ ],
308
+ "page_idx": 0
309
+ },
310
+ {
311
+ "type": "text",
312
+ "text": "The development of diffusion generative models [Blattmann et al. 2023; Brooks et al. 2024; Ho et al. 2020; Lin et al. 2024; Rombach et al. 2022; Zheng et al. 2024b] enables high-quality video generation from text prompts or a starting image. Recent emerging models, e.g. Sora [Brooks et al. 2024], CogVideo-X [Yang et al. 2024b], Keling [Kuaishou 2024], and Hunyuan [Kong et al. 2024], have shown impressive video generation ability with strong temporal consistency and appealing visual effects, which becomes a promising tool for artists to create stunning videos using just few images or text",
313
+ "bbox": [
314
+ 511,
315
+ 750,
316
+ 916,
317
+ 876
318
+ ],
319
+ "page_idx": 0
320
+ },
321
+ {
322
+ "type": "aside_text",
323
+ "text": "arXiv:2501.03847v2 [cs.CV] 9 Jan 2025",
324
+ "bbox": [
325
+ 22,
326
+ 273,
327
+ 57,
328
+ 700
329
+ ],
330
+ "page_idx": 0
331
+ },
332
+ {
333
+ "type": "text",
334
+ "text": "prompts. These advancements show strong potential to revolutionize the advertising, film, robotics, and game industries, becoming fundamental elements for various generative AI-based applications.",
335
+ "bbox": [
336
+ 78,
337
+ 99,
338
+ 482,
339
+ 141
340
+ ],
341
+ "page_idx": 1
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "A major challenge in video generation lies in achieving versatile and precise control to align seamlessly with users' creative visions. While recent methods have introduced strategies to integrate control into the video generation process [Guo et al. 2024; He et al. 2024b,a; Huang et al. 2023; Ma et al. 2024b,a; Namekata et al. 2024; Polyak et al. 2024; Wang et al. 2024f,c; Yuan et al. 2024], they predominantly focus on specific control types, relying on specialized architectures that lack adaptability to emerging control requirements. Furthermore, these approaches are generally limited to high-level adjustments—such as camera movements or maintaining identity—falling short when it comes to enabling fine-grained modifications, like precisely raising an avatar's left hand.",
346
+ "bbox": [
347
+ 78,
348
+ 142,
349
+ 482,
350
+ 306
351
+ ],
352
+ "page_idx": 1
353
+ },
354
+ {
355
+ "type": "text",
356
+ "text": "We argue that achieving versatile and precise video generation control fundamentally requires 3D control signals in the diffusion model. Videos are 2D renderings of dynamic 3D content. In a traditional Computer Graphics (CG)-based video-making pipeline, we can effectively control all aspects of a video in detail by manipulating the underlying 3D representations, such as meshes or particles. However, existing video control methods solely apply 2D control signals on rendered pixels, lacking the 3D awareness in the video generation process and thus struggling to achieve versatile and fine-grained controls. Thus, to this end, we present a novel 3D-aware video diffusion method, called Diffusion as Shader (DaS) in this paper, which utilizes 3D control signals to enable diverse and precise control tasks within a unified architecture.",
357
+ "bbox": [
358
+ 78,
359
+ 308,
360
+ 482,
361
+ 487
362
+ ],
363
+ "page_idx": 1
364
+ },
365
+ {
366
+ "type": "text",
367
+ "text": "Specifically, as shown in Figure 1 (a), DaS is an image-to-video diffusion model that takes a 3D tracking video as the 3D control signals for various control tasks. The 3D tracking video contains the motion trajectories of 3D points whose colors are defined by their coordinates in the camera coordinate system of the first frame. In this way, the 3D tracking video represents the underlying 3D motion of this video. The video diffusion model acts like a shader to compute shaded appearances on the dynamic 3D points to generate the video. Thus, we call our model Diffusion as Shader.",
368
+ "bbox": [
369
+ 78,
370
+ 488,
371
+ 482,
372
+ 611
373
+ ],
374
+ "page_idx": 1
375
+ },
376
+ {
377
+ "type": "text",
378
+ "text": "Using 3D tracking videos as control signals offers a significant advantage over depth videos with enhanced temporal consistency. While a straightforward approach to incorporating 3D control into video diffusion models involves using depth maps as control signals, depth maps only define the structural properties of the underlying 3D content without explicitly linking frames across time. In contrast, 3D tracking videos provide a consistent association between frames, as identical 3D points maintain the same colors across the video. These color anchors ensure consistent appearances for the same 3D points, thereby significantly improving temporal coherence in the generated videos. Our experiments demonstrate that even when a 3D region temporarily disappears and later reappears, DaS effectively preserves the appearance consistency of that region, thanks to the temporal consistency enabled by the tracking video.",
379
+ "bbox": [
380
+ 78,
381
+ 613,
382
+ 482,
383
+ 805
384
+ ],
385
+ "page_idx": 1
386
+ },
387
+ {
388
+ "type": "text",
389
+ "text": "By leveraging 3D tracking videos, DaS enables versatile video generation controls, encompassing but not limited to the following video control tasks.",
390
+ "bbox": [
391
+ 78,
392
+ 806,
393
+ 482,
394
+ 847
395
+ ],
396
+ "page_idx": 1
397
+ },
398
+ {
399
+ "type": "list",
400
+ "sub_type": "text",
401
+ "list_items": [
402
+ "(1) Animating meshes to videos. Using advanced 3D tools like Blender, we can design animated 3D meshes based on predefined templates. These animated meshes are transformed into 3D tracking videos to guide high-quality video generation (Figure 1 (b)).",
403
+ "(2) Motion transfer. Starting with an input video, we employ a 3D tracker [Xiao et al. 2024b] to generate a corresponding 3D tracking video. Next, the depth-to-image Flux model [Labs 2024] is used to modify the style or content of the first frame. Based on the updated first frame and the 3D tracking video, DaS generates a new video that replicates the motion patterns of the original while reflecting the new style or content (Figure 1 (c)).",
404
+ "(3) Camera control. To enable precise camera control, depth maps are estimated to extract 3D points [Bochkovskii et al. 2024]. These 3D points are then projected onto a specified camera path to create a 3D tracking video, which guides the generation of videos with customized camera movements (Figure 1 (d)).",
405
+ "(4) Object manipulation. By integrating object segmentation techniques [Kirillov et al. 2023] with a monocular depth estimator [Bochkovskii et al. 2024], the 3D points of specific objects can be extracted and manipulated. These modified 3D points are used to construct a 3D tracking video, which guides the creation of videos for object manipulation (Figure 1 (e))."
406
+ ],
407
+ "bbox": [
408
+ 514,
409
+ 99,
410
+ 916,
411
+ 405
412
+ ],
413
+ "page_idx": 1
414
+ },
415
+ {
416
+ "type": "text",
417
+ "text": "Due to the 3D awareness of DaS, DaS is data-efficient. Finetuning with less than 10k videos on 8 H800 GPUs for 3 days already gives the powerful control ability to DaS, which is demonstrated by various control tasks. We compare DaS with baseline methods on camera control [He et al. 2024b; Wang et al. 2024c] and motion transfer [Geyer et al. 2023a], which demonstrates that DaS achieves significantly improved performances in these two controlling tasks than baselines. For the remaining two tasks, i.e. mesh-to-video and object manipulation, we provide extensive qualitative results to show the superior generation quality of our method.",
418
+ "bbox": [
419
+ 513,
420
+ 409,
421
+ 916,
422
+ 547
423
+ ],
424
+ "page_idx": 1
425
+ },
426
+ {
427
+ "type": "text",
428
+ "text": "2 RELATED WORK",
429
+ "text_level": 1,
430
+ "bbox": [
431
+ 514,
432
+ 561,
433
+ 660,
434
+ 575
435
+ ],
436
+ "page_idx": 1
437
+ },
438
+ {
439
+ "type": "text",
440
+ "text": "2.1 Video diffusion",
441
+ "text_level": 1,
442
+ "bbox": [
443
+ 514,
444
+ 580,
445
+ 656,
446
+ 594
447
+ ],
448
+ "page_idx": 1
449
+ },
450
+ {
451
+ "type": "text",
452
+ "text": "In recent years, the success of diffusion models in image generation [Ho et al. 2020; Peebles and Xie 2023a; Rombach et al. 2022] has sparked interest in exploring video generation [Blattmann et al. 2023; Brooks et al. 2024; Chen et al. 2023b, 2024b; Guo et al. 2023; He et al. 2022; Ho et al. 2022; Kong et al. 2024; Kuaishou 2024; Lin et al. 2024; Xing et al. 2024; Yang et al. 2024b; Zheng et al. 2024b]. VDM [Ho et al. 2022] is the first work to explore the feasibility of diffusion in the field of video generation. SVD [Blattmann et al. 2023] introduces a unified strategy for training a robust video generation model. Sora [Brooks et al. 2024], through training on extensive video data, suggests that scaling video generation models is a promising path towards building general-purpose simulators of the physical world. CogVideo-X [Yang et al. 2024b], VideoCrafter [Chen et al. 2023b, 2024b], DynamiCrafter [Xing et al. 2024], Keling [Kuaishou 2024], and Hunyuan [Kong et al. 2024] have demonstrated impressive video generation performance with strong temporal consistency.",
453
+ "bbox": [
454
+ 511,
455
+ 598,
456
+ 916,
457
+ 819
458
+ ],
459
+ "page_idx": 1
460
+ },
461
+ {
462
+ "type": "text",
463
+ "text": "Controllable video generation. Existing works still lack an effective way to control the generation process. There are many works [Guo et al. 2024; He et al. 2024b,a; Huang et al. 2023; Ma et al. 2024b,a,a; Namekata et al. 2024; Polyak et al. 2024; Qiu et al.",
464
+ "bbox": [
465
+ 513,
466
+ 820,
467
+ 916,
468
+ 876
469
+ ],
470
+ "page_idx": 1
471
+ },
472
+ {
473
+ "type": "page_number",
474
+ "text": "2",
475
+ "bbox": [
476
+ 81,
477
+ 69,
478
+ 91,
479
+ 78
480
+ ],
481
+ "page_idx": 1
482
+ },
483
+ {
484
+ "type": "header",
485
+ "text": "Zekai Gu, et al.",
486
+ "bbox": [
487
+ 112,
488
+ 68,
489
+ 187,
490
+ 78
491
+ ],
492
+ "page_idx": 1
493
+ },
494
+ {
495
+ "type": "text",
496
+ "text": "2024; Wang et al. 2024f,c; Yu et al. 2024; Yuan et al. 2024] that introduce a specific control signal in the video generation process which can only achieve one control type like identity preserving, camera control, and motion transfer. Our method is more versatile in various video control types by using a 3D-aware video generation with 3D tracking videos as conditions.",
497
+ "bbox": [
498
+ 80,
499
+ 99,
500
+ 480,
501
+ 183
502
+ ],
503
+ "page_idx": 2
504
+ },
505
+ {
506
+ "type": "text",
507
+ "text": "2.2 Controlled video generation",
508
+ "text_level": 1,
509
+ "bbox": [
510
+ 81,
511
+ 207,
512
+ 307,
513
+ 220
514
+ ],
515
+ "page_idx": 2
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "We review the following 4 types of controlled video generation.",
520
+ "bbox": [
521
+ 81,
522
+ 224,
523
+ 464,
524
+ 238
525
+ ],
526
+ "page_idx": 2
527
+ },
528
+ {
529
+ "type": "text",
530
+ "text": "Animating meshes to videos. Animating meshes to videos aims to texture meshes. Several works [Cai et al. 2024; Cao et al. 2023; Richardson et al. 2023; Wang et al. 2023] have demonstrated the feasibility of mesh texturization using powerful diffusion models. TexFusion [Cao et al. 2023] applies the diffusion model's denoiser on a set of 2D renders of the 3D object, optimizing an intermediate neural color field to output final RGB textures. TEXTure [Richardson et al. 2023] introduces a dynamic trimap representation and a novel diffusion sampling process, leveraging this trimap to generate seamless textures from various views. G-Rendering [Cai et al. 2024] takes a dynamic mesh as input. To preserve consistency, G-Rendering employs UV-guided noise initialization and correspondence-aware blending of both pre- and post-attention features. Following G-Rendering, our method also targets dynamic meshes, utilizing a diffusion model as a Shader to incorporate realistic texture information. Unlike G-Rendering, which preserves consistency at the noise and attention levels, our approach leverages 3D tracking videos as supplementary information, integrating them into the diffusion model to ensure both temporal and spatial consistency.",
531
+ "bbox": [
532
+ 80,
533
+ 239,
534
+ 482,
535
+ 501
536
+ ],
537
+ "page_idx": 2
538
+ },
539
+ {
540
+ "type": "text",
541
+ "text": "Camera control. Camera control [Bahmani et al. 2024; Geng et al. 2024; He et al. 2024b; Wang et al. 2024e,c; Xiao et al. 2024a; Yang et al. 2024a; Yu et al. 2024; Zheng et al. 2024a] is an important capability for enhancing the realism of generated videos and increasing user engagement by allowing customized viewpoints. Recently, many efforts have been made to introduce camera control in video generation. MotionCtrl [Wang et al. 2024c] incorporates a flexible motion controller for video generation, which can independently or jointly control camera motion and object motion in generated videos. CameraCtrl [He et al. 2024b] adopts Plücker embeddings [Sitzmann et al. 2021] as the primary form of camera parameters, enabling the ViewCrafter [Yu et al. 2024] employs a point-based representation for free-view rendering, enabling precise camera control. AC3D [Bahmani et al. 2024] optimizes pose conditioning schedules during training and testing to accelerate convergence and restricts the injection of camera conditioning to specific positions, reducing interference with other meaningful video features. CPA [Wang et al. 2024e] incorporates a Sparse Motion Encoding Module to embed the camera pose information and integrating the embedded motion information via temporal attention. Our method aims to use 3D tracking videos as an intermediary to achieve precise and consistent camera control.",
542
+ "bbox": [
543
+ 80,
544
+ 502,
545
+ 482,
546
+ 805
547
+ ],
548
+ "page_idx": 2
549
+ },
550
+ {
551
+ "type": "text",
552
+ "text": "Motion transfer. Motion transfer [Esser et al. 2023; Geng et al. 2024; Geyer et al. 2023a; Meral et al. 2024; Park et al. 2024; Pondaven et al. 2024; Wang et al. 2024d,c; Yatim et al. 2024] aims to synthesize novel videos by following the motion of the original one. Gen-1 [Esser et al. 2023] employs depth estimation results [Bochkovskii",
553
+ "bbox": [
554
+ 81,
555
+ 806,
556
+ 480,
557
+ 875
558
+ ],
559
+ "page_idx": 2
560
+ },
561
+ {
562
+ "type": "text",
563
+ "text": "et al. 2024; Lu et al. 2024; Ranftl et al. 2020] to guide the motion TokenFlow [Geyer et al. 2023a] achieves consistent motion transfer by enforcing consistency in the diffusion feature space. MotionCtrl [Wang et al. 2024c] also achieves motion transfer by incorporating a motion controller. DiTFlow [Pondaven et al. 2024] proposes Attention Motion Flow as guidance for motion transfer on DiTs [Peebles and Xie 2023a]. Motion Prompting [Geng et al. 2024] utilizes 2D motions as prompts to realize impressive motion transfer. Unlike these approaches, our method employs 3D tracking as guidance for motion transfer, enabling a more comprehensive capture of each object's motion and the relationships between them within the video. This ensures accurate and globally consistent geometric and temporal consistency.",
564
+ "bbox": [
565
+ 511,
566
+ 99,
567
+ 915,
568
+ 280
569
+ ],
570
+ "page_idx": 2
571
+ },
572
+ {
573
+ "type": "text",
574
+ "text": "Object manipulation. Object manipulation refers to versatile object movement control for image-to-video generation. Different from camera control, which focuses on changes in perspective, object manipulation emphasizes the movement of the objects themselves. Currently, mainstream methods [Chen et al. 2023a; Geng et al. 2024; Jain et al. 2024; Li et al. 2024; Ma et al. 2024b; Mou et al. 2024; Qiu et al. 2024; Teng et al. 2023; Wang et al. 2024f,c; Yang et al. 2024a; Yin et al. 2023] typically achieve object manipulation by utilizing directed trajectories or modeling the relationships between bounding boxes with specific semantic meanings. However, these methods primarily rely on 2D guidance to represent the spatial movement of target objects, which often fails to accurately capture user intent and frequently results in distorted outputs. ObjCtrl-2.5D [Wang et al. 2024a] tries to address this limitation by extending 2D trajectories with depth information, creating a single 3D trajectory as the control signal. Better than the single 3D trajectory, our method leverages 3D tracking videos, which offer greater details and more effectively represent the motion relationships between foreground and background for more precise and realistic object manipulation.",
575
+ "bbox": [
576
+ 511,
577
+ 281,
578
+ 915,
579
+ 542
580
+ ],
581
+ "page_idx": 2
582
+ },
583
+ {
584
+ "type": "text",
585
+ "text": "Concurrent works. Recently, several works [Feng et al. 2024a; Geng et al. 2024; Jeong et al. 2024; Koroglu et al. 2024; Lei et al. 2024; Niu et al. 2024; Shi et al. 2024; Zhang et al. 2024] have explored utilizing motion as control signals. These approaches can be broadly categorized into two groups: 2D motion-based and 3D motion-based methods. [Koroglu et al. 2024; Lei et al. 2024; Shi et al. 2024] leverage 2D optical flow to condition motion, while [Geng et al. 2024; Jeong et al. 2024; Niu et al. 2024] utilize 2D tracks, which are sparser than optical flow, to track or control video motion. [Zhang et al. 2024] learns to generate 3D coordinates in the video diffusion model, which 3D awareness. [Feng et al. 2024a] lifts videos into 3D space and extracts the motion of 3D points, enabling a more accurate capture of spatial relationships between objects and supporting tasks such as object manipulation and camera control. Our method, DaS, also leverages recent tracking methods [Xiao et al. 2024b; Zhang et al. 2025] to construct videos. However, we extend the applicability by unifying a broader range of control tasks, including mesh-to-video generation and motion transfer.",
586
+ "bbox": [
587
+ 511,
588
+ 544,
589
+ 915,
590
+ 792
591
+ ],
592
+ "page_idx": 2
593
+ },
594
+ {
595
+ "type": "text",
596
+ "text": "3 METHOD",
597
+ "text_level": 1,
598
+ "bbox": [
599
+ 517,
600
+ 811,
601
+ 609,
602
+ 824
603
+ ],
604
+ "page_idx": 2
605
+ },
606
+ {
607
+ "type": "text",
608
+ "text": "3.1 Overview",
609
+ "text_level": 1,
610
+ "bbox": [
611
+ 517,
612
+ 830,
613
+ 617,
614
+ 842
615
+ ],
616
+ "page_idx": 2
617
+ },
618
+ {
619
+ "type": "text",
620
+ "text": "DaS is an image-to-video (I2V) diffusion generative model, which applies both an input image and a 3D tracking video as conditions",
621
+ "bbox": [
622
+ 517,
623
+ 848,
624
+ 913,
625
+ 875
626
+ ],
627
+ "page_idx": 2
628
+ },
629
+ {
630
+ "type": "header",
631
+ "text": "Diffusion as Shader: 3D-aware Video Diffusion for Versatile Video Generation Control",
632
+ "bbox": [
633
+ 477,
634
+ 68,
635
+ 883,
636
+ 79
637
+ ],
638
+ "page_idx": 2
639
+ },
640
+ {
641
+ "type": "page_number",
642
+ "text": "3",
643
+ "bbox": [
644
+ 888,
645
+ 69,
646
+ 915,
647
+ 78
648
+ ],
649
+ "page_idx": 2
650
+ },
651
+ {
652
+ "type": "image",
653
+ "img_path": "images/6355c9439780ddbe41bdf6863467236c8bbcc179b7b76af42c52be020f5b43cc.jpg",
654
+ "image_caption": [
655
+ "Fig. 2. Architecture of DaS. (a) We colorize dynamic 3D points according to their coordinates to get (b) a 3D tracking video. (c) The input image and the 3D tracking video are processed by (d) a transformer-based latent diffusion with a variational autoencoder (VAE). The 3D tracking video is processed by a trainable copy of the denoising DiT and zero linear layers are used to inject the condition features from 3D tracking videos into the denoising process."
656
+ ],
657
+ "image_footnote": [],
658
+ "bbox": [
659
+ 88,
660
+ 97,
661
+ 916,
662
+ 308
663
+ ],
664
+ "page_idx": 3
665
+ },
666
+ {
667
+ "type": "text",
668
+ "text": "for controllable video generation. In the following, we first review the backend I2V video diffusion model in Sec. 3.2. Then, we discuss the definition of the 3D tracking video and how to inject the 3D tracking video into the generation process as a condition in Sec. 3.3. Finally, in Sec. 3.4, we discuss how to apply DaS in various types of video generation control.",
669
+ "bbox": [
670
+ 78,
671
+ 380,
672
+ 483,
673
+ 464
674
+ ],
675
+ "page_idx": 3
676
+ },
677
+ {
678
+ "type": "text",
679
+ "text": "3.2 Backend video diffusion model",
680
+ "text_level": 1,
681
+ "bbox": [
682
+ 78,
683
+ 478,
684
+ 326,
685
+ 491
686
+ ],
687
+ "page_idx": 3
688
+ },
689
+ {
690
+ "type": "text",
691
+ "text": "DaS is finetuned from the CogVideoX [Yang et al. 2024b] model that is a transformer-based video diffusion model [Peebles and Xie 2023a] operating on a latent space. Specifically, as shown in Figure 2 (d), we adopt the I2V CogVideoX model as the base model, which takes an image $\\mathbf{I} \\in \\mathbb{R}^{H \\times W \\times 3}$ as input and generate a video $\\mathbf{V} \\in \\mathbb{R}^{T \\times H \\times W \\times 3}$ . The generated video $\\mathbf{V}$ has $T$ frames with the same image size of width $W$ height $H$ as the input image. The input image $\\mathbf{I}$ is first padded with zeros to get an input condition video with the same size $T \\times H \\times W \\times 3$ as the target video. Then, a VAE encoder is applied to the padded condition video to get a latent vector of size $\\frac{T}{4} \\times \\frac{H}{8} \\times \\frac{W}{8} \\times 16$ , which is concatenated with a noise of the same size. A diffusion transformer (DiT) [Peebles and Xie 2023b] is iteratively used to denoise the noise latent for a predefined number of steps and the output denoised latent is processed by a VAE decoder to get the video $\\mathbf{V}$ . In the following, we discuss how to add a 3D tracking video as an additional condition on this base model.",
692
+ "bbox": [
693
+ 78,
694
+ 496,
695
+ 483,
696
+ 718
697
+ ],
698
+ "page_idx": 3
699
+ },
700
+ {
701
+ "type": "text",
702
+ "text": "3.3 Finetuning with 3D tracking videos",
703
+ "text_level": 1,
704
+ "bbox": [
705
+ 78,
706
+ 733,
707
+ 357,
708
+ 748
709
+ ],
710
+ "page_idx": 3
711
+ },
712
+ {
713
+ "type": "text",
714
+ "text": "We add a 3D tracking video as an additional condition to our video diffusion model. As shown in Figure 2 (a, b), the 3D tracking video is rendered from a set of moving 3D points $\\{\\mathbf{p}_i(t) \\in \\mathbb{R}^3\\}$ , where $t = 1, \\dots, T$ means the frame index in the video. The colors of these points are determined by their coordinates in the first frame, where we normalize the coordinates into $[0,1]^3$ and convert the coordinates into RGB colors $\\{\\mathbf{c}_i\\}$ . Note we adopt the reciprocal of z-coordinate in the normalization. These colors remain the same for different timesteps $t$ . Then, to get a specific $t$ -th frame of the tracking video,",
715
+ "bbox": [
716
+ 78,
717
+ 750,
718
+ 483,
719
+ 876
720
+ ],
721
+ "page_idx": 3
722
+ },
723
+ {
724
+ "type": "text",
725
+ "text": "we project these 3D points onto the $t$ -th camera to render this frame. In Sec. 3.4, we will discuss how to get these moving 3D points and the camera poses of different frames for different control tasks. Next, we first introduce the architecture to utilize the 3D tracking video as a condition for video generation.",
726
+ "bbox": [
727
+ 511,
728
+ 380,
729
+ 916,
730
+ 449
731
+ ],
732
+ "page_idx": 3
733
+ },
734
+ {
735
+ "type": "text",
736
+ "text": "Injecting 3D tracking control. We follow a similar design as the ControlNet [Chen et al. 2024a; Zhang et al. 2023] in DaS to add the 3D tracking video as the additional condition. As shown in Figure 2 (d), we apply the pretrained VAE encoder to encode the 3D tracking video to get the latent vector. Then, we make a trainable copy of the pretrained denoising DiT, called condition DiT, to process the latent vector of the 3D tracking video. The denoising DiT contains 42 blocks and we copy the first 18 blocks as the condition DiT. In the condition DiT, we extract the output feature of each DiT block, process it with a zero-initialized linear layer, and add the feature to the corresponding feature map of the denoising DiT. We finetune the condition DiT with the diffusion losses while freezing the pretrained denoising DiT.",
737
+ "bbox": [
738
+ 511,
739
+ 450,
740
+ 916,
741
+ 628
742
+ ],
743
+ "page_idx": 3
744
+ },
745
+ {
746
+ "type": "text",
747
+ "text": "Finetuning details. To train the DaS model, we construct a training dataset containing both real-world videos and synthetic rendered videos. The real-world videos are from MiraData [Ju et al. 2024] while we use the meshes and motion sequences from Mixamo to render synthetic videos. All videos are center-cropped and resized to $720 \\times 480$ resolution with 49 frames. We only finetune the copied condition DiT while freezing all the original denoising DiT. To construct the 3D tracking video for the rendered videos, since we have access to the ground-truth 3D meshes and camera poses for the synthetic videos, we construct our 3D tracking videos directly using these dense ground-truth 3D points, which results in dense 3D point tracking. For real-world videos, we adopt SpatialTracker [Xiao et al. 2024b] to detect 3D points and their trajectories in the 3D space. Specifically, for each real-world video, we detect 4,900 3D evenly distributed points and track their trajectories. For training, we employ a learning rate of $1 \\times 10^{-4}$ using the AdamW optimizer. We train the model for 2000 steps using the gradient accumulation",
748
+ "bbox": [
749
+ 511,
750
+ 630,
751
+ 918,
752
+ 866
753
+ ],
754
+ "page_idx": 3
755
+ },
756
+ {
757
+ "type": "page_number",
758
+ "text": "4",
759
+ "bbox": [
760
+ 81,
761
+ 69,
762
+ 91,
763
+ 78
764
+ ],
765
+ "page_idx": 3
766
+ },
767
+ {
768
+ "type": "header",
769
+ "text": "Zekai Gu, et al.",
770
+ "bbox": [
771
+ 112,
772
+ 68,
773
+ 189,
774
+ 79
775
+ ],
776
+ "page_idx": 3
777
+ },
778
+ {
779
+ "type": "image",
780
+ "img_path": "images/23c0461025a07a92eddadc619f9d94ae866e63783af0e30220f6aaeb11380fa1.jpg",
781
+ "image_caption": [],
782
+ "image_footnote": [],
783
+ "bbox": [
784
+ 83,
785
+ 98,
786
+ 908,
787
+ 227
788
+ ],
789
+ "page_idx": 4
790
+ },
791
+ {
792
+ "type": "image",
793
+ "img_path": "images/c75e1d4c2dd29108d43afdeb2b0624cb2db2aceac70cb93d2597a55a614f3278.jpg",
794
+ "image_caption": [
795
+ "Fig. 3. 3D tracking video generation in (a) object manipulation, (b) animating mesh to video generation, (c) camera control, and (d) motion transfer."
796
+ ],
797
+ "image_footnote": [],
798
+ "bbox": [
799
+ 83,
800
+ 229,
801
+ 906,
802
+ 372
803
+ ],
804
+ "page_idx": 4
805
+ },
806
+ {
807
+ "type": "text",
808
+ "text": "strategy to get an effective batch size of 64. The training takes 3 days on 8 H800 GPUs.",
809
+ "bbox": [
810
+ 78,
811
+ 421,
812
+ 480,
813
+ 448
814
+ ],
815
+ "page_idx": 4
816
+ },
817
+ {
818
+ "type": "text",
819
+ "text": "3.4 Video generation control",
820
+ "text_level": 1,
821
+ "bbox": [
822
+ 78,
823
+ 463,
824
+ 284,
825
+ 477
826
+ ],
827
+ "page_idx": 4
828
+ },
829
+ {
830
+ "type": "text",
831
+ "text": "In this section, we describe how to utilize DaS for the following controllable video generation.",
832
+ "bbox": [
833
+ 78,
834
+ 482,
835
+ 480,
836
+ 510
837
+ ],
838
+ "page_idx": 4
839
+ },
840
+ {
841
+ "type": "list",
842
+ "sub_type": "text",
843
+ "list_items": [
844
+ "3.4.1 Object manipulation. DaS can generate a video to manipulate a specific object. As shown in Figure 3 (a), given an image, we estimate the depth map using Depth Pro [Bochkovskii et al. 2024] or MoGE [Wang et al. 2024b] and segment out the object using SAM [Kirillov et al. 2023]. Then, we are able to manipulate the point cloud of the object to construct a 3D tracking video for object manipulation video generation.",
845
+ "3.4.2 Animating meshes to videos. DaS enables the creation of visually appealing, high-quality videos from simple animated meshes. While many Computer Graphics (CG) software tools provide basic 3D models and motion templates to generate animated meshes, these outputs are often simplistic and lack the detailed appearance and geometry needed for high-quality animations. Starting with these simple animated meshes, as shown in Figure 3 (b), we generate an initial visually appealing frame using a depth-to-image FLUX model [Labs 2024]. We then produce 3D tracking videos from the animated meshes, which, when combined with the generated first frame, guide DaS to transform the basic meshes into visually rich and appealing videos.",
846
+ "3.4.3 Camera control. Previous approaches [He et al. 2024b; Wang et al. 2024c] rely on camera or ray embeddings as conditions to control the camera trajectory in video generation. However, these embeddings lack true 3D awareness, leaving the diffusion models to infer the scene's 3D structure and simulate camera movement."
847
+ ],
848
+ "bbox": [
849
+ 78,
850
+ 520,
851
+ 482,
852
+ 875
853
+ ],
854
+ "page_idx": 4
855
+ },
856
+ {
857
+ "type": "text",
858
+ "text": "In contrast, DaS significantly enhances 3D awareness by incorporating 3D tracking videos for precise camera control. To generate videos with a specific camera trajectory, as shown in Figure 3 (c), we first estimate the depth map of the initial frame using Depth Pro [Bochkovskii et al. 2024] and convert it into colored 3D points. These points are then projected onto the given camera trajectory, constructing a 3D tracking video that enables DaS to control camera movements with high 3D accuracy.",
859
+ "bbox": [
860
+ 511,
861
+ 421,
862
+ 916,
863
+ 532
864
+ ],
865
+ "page_idx": 4
866
+ },
867
+ {
868
+ "type": "text",
869
+ "text": "3.4.4 Motion transfer. As shown in Figure 3 (d), DaS also facilitates creating a new video by transferring motion from an existing source video. First, we estimate the depth map of the source video's first frame and apply the depth-to-image FLUX model [Labs 2024] to repaint the frame into a target appearance guided by text prompts. Then, using SpatialTracker [Xiao et al. 2024b], we generate a 3D tracking video from the source video to serve as control signals. Finally, the DaS model generates the target video by combining the edited first frame with the 3D tracking video.",
870
+ "bbox": [
871
+ 511,
872
+ 539,
873
+ 916,
874
+ 665
875
+ ],
876
+ "page_idx": 4
877
+ },
878
+ {
879
+ "type": "text",
880
+ "text": "4 EXPERIMENTS",
881
+ "text_level": 1,
882
+ "bbox": [
883
+ 513,
884
+ 676,
885
+ 645,
886
+ 689
887
+ ],
888
+ "page_idx": 4
889
+ },
890
+ {
891
+ "type": "text",
892
+ "text": "We conduct experiments on five tasks, including camera control, motion transfer, mesh-to-video generation, and object manipulation to demonstrate the versatility of DaS in controlling the video generation process.",
893
+ "bbox": [
894
+ 511,
895
+ 694,
896
+ 916,
897
+ 750
898
+ ],
899
+ "page_idx": 4
900
+ },
901
+ {
902
+ "type": "text",
903
+ "text": "4.1 Camera control",
904
+ "text_level": 1,
905
+ "bbox": [
906
+ 513,
907
+ 761,
908
+ 658,
909
+ 773
910
+ ],
911
+ "page_idx": 4
912
+ },
913
+ {
914
+ "type": "text",
915
+ "text": "Baseline methods. To evaluate the ability to control camera motions of generated videos, we select two representative methodologies, MotionCtrl [Wang et al. 2024c] and CameraCtrl [He et al. 2024b] as baseline methods, both of which allow camera trajectories as input and use camera or ray embeddings for camera control.",
916
+ "bbox": [
917
+ 511,
918
+ 777,
919
+ 916,
920
+ 847
921
+ ],
922
+ "page_idx": 4
923
+ },
924
+ {
925
+ "type": "text",
926
+ "text": "Metrics. To measure the accuracy of the camera trajectories of generated videos, we evaluate the consistency between the estimated",
927
+ "bbox": [
928
+ 511,
929
+ 849,
930
+ 916,
931
+ 875
932
+ ],
933
+ "page_idx": 4
934
+ },
935
+ {
936
+ "type": "header",
937
+ "text": "Diffusion as Shader: 3D-aware Video Diffusion for Versatile Video Generation Control",
938
+ "bbox": [
939
+ 475,
940
+ 68,
941
+ 882,
942
+ 78
943
+ ],
944
+ "page_idx": 4
945
+ },
946
+ {
947
+ "type": "page_number",
948
+ "text": "1",
949
+ "bbox": [
950
+ 901,
951
+ 68,
952
+ 911,
953
+ 75
954
+ ],
955
+ "page_idx": 4
956
+ },
957
+ {
958
+ "type": "image",
959
+ "img_path": "images/dddf88953eabdf82f30156ea7361c7e3b43f00a7332434b1f9530e93236f3e76.jpg",
960
+ "image_caption": [
961
+ "Fig. 4. Qualitative results of DaS on the camera control task. We show 4 trajectories (left, right, up, down) with large movements."
962
+ ],
963
+ "image_footnote": [],
964
+ "bbox": [
965
+ 84,
966
+ 99,
967
+ 911,
968
+ 406
969
+ ],
970
+ "page_idx": 5
971
+ },
972
+ {
973
+ "type": "text",
974
+ "text": "camera poses from the generated videos and the input ground-truth camera poses using rotation errors and translation errors. Specifically, for each frame of a generated video, we reconstruct its relative pose given the first frame using SIFT [Ng and Henikoff 2003]. Then, we get the normalized quaternion and translation vectors for the rotation and translation. Finally, we calculate the cosine similarity between the estimated camera poses with the given camera poses.",
975
+ "bbox": [
976
+ 78,
977
+ 452,
978
+ 482,
979
+ 551
980
+ ],
981
+ "page_idx": 5
982
+ },
983
+ {
984
+ "type": "equation",
985
+ "text": "\n$$\n\\mathbf {R o t E r r} = \\operatorname {a r c c o s} \\left(\\frac {1}{T - 1} \\sum_ {i = 2} ^ {T} \\langle \\mathbf {q} _ {\\mathrm {g e n}} ^ {i}, \\mathbf {q} _ {\\mathrm {g t}} ^ {i} \\rangle\\right),\n$$\n",
986
+ "text_format": "latex",
987
+ "bbox": [
988
+ 153,
989
+ 561,
990
+ 406,
991
+ 599
992
+ ],
993
+ "page_idx": 5
994
+ },
995
+ {
996
+ "type": "equation",
997
+ "text": "\n$$\n\\mathbf {T r a n s E r r} = \\arccos \\left(\\frac {1}{T - 1} \\sum_ {i = 2} ^ {T} \\langle \\mathbf {t} _ {\\mathrm {g e n}} ^ {i}, \\mathbf {t} _ {\\mathrm {g t}} ^ {i} \\rangle\\right),\n$$\n",
998
+ "text_format": "latex",
999
+ "bbox": [
1000
+ 151,
1001
+ 619,
1002
+ 410,
1003
+ 657
1004
+ ],
1005
+ "page_idx": 5
1006
+ },
1007
+ {
1008
+ "type": "text",
1009
+ "text": "where $T$ is the number of frames, $\\mathbf{q}^i$ and $\\mathbf{t}^i$ are the normalized quaternion and translation vector of the $i$ -th frame, and $\\langle \\cdot, \\cdot \\rangle$ means the dot product between two vectors.",
1010
+ "bbox": [
1011
+ 78,
1012
+ 667,
1013
+ 482,
1014
+ 709
1015
+ ],
1016
+ "page_idx": 5
1017
+ },
1018
+ {
1019
+ "type": "text",
1020
+ "text": "Results. We compare against baseline methods on 100 random trajectories from RealEstate10K [Zhou et al. 2018]. But since most of the random trajectories only contain small movements, we further test the models on larger fixed movements (moving left, right, up, down, spiral) as shown in Figure 4. As shown in Table 1, our method outperforms the baseline methods, which demonstrates that our method achieves stable and accurate control of the camera poses of the generated videos. The main reason is that due to the utilization of the 3D tracking videos, our method is fully 3D-aware to enable accurate spatial inference in the video generation process. In comparison, baseline methods [He et al. 2024b; Wang et al. 2024c] only adopt implicit camera or ray embeddings for camera control.",
1021
+ "bbox": [
1022
+ 78,
1023
+ 709,
1024
+ 482,
1025
+ 876
1026
+ ],
1027
+ "page_idx": 5
1028
+ },
1029
+ {
1030
+ "type": "table",
1031
+ "img_path": "images/965b2cca14f798247824176bb1c432adbf6fc9ee211698c6c816ea9047739f62.jpg",
1032
+ "table_caption": [],
1033
+ "table_footnote": [
1034
+ "Table 1. Quantitative results on camera control of MotionCtrl [Wang et al. 2024c], CameraCtrl [He et al. 2024b], and our method. \"TransErr\" and \"RotErr\" are the angle differences between the estimated translation and rotation and the ground-truth ones in degree."
1035
+ ],
1036
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"2\">Small Movement</td><td colspan=\"2\">Large Movement</td></tr><tr><td>TransErr ↓</td><td>RotErr ↓</td><td>TransErr ↓</td><td>RotErr ↓</td></tr><tr><td>MotionCtrl</td><td>44.23</td><td>8.92</td><td>67.05</td><td>39.86</td></tr><tr><td>CameraCtrl</td><td>42.31</td><td>7.82</td><td>66.76</td><td>29.70</td></tr><tr><td>Ours</td><td>27.85</td><td>5.97</td><td>37.17</td><td>10.40</td></tr></table>",
1037
+ "bbox": [
1038
+ 524,
1039
+ 449,
1040
+ 908,
1041
+ 523
1042
+ ],
1043
+ "page_idx": 5
1044
+ },
1045
+ {
1046
+ "type": "text",
1047
+ "text": "4.2 Motion transfer",
1048
+ "text_level": 1,
1049
+ "bbox": [
1050
+ 514,
1051
+ 622,
1052
+ 661,
1053
+ 635
1054
+ ],
1055
+ "page_idx": 5
1056
+ },
1057
+ {
1058
+ "type": "text",
1059
+ "text": "Baseline methods. We compare DaS with two famous motion transfer methods, TokenFlow [Geyer et al. 2023b] and CCEdit [Feng et al. 2024b]. TokenFlow represents video motions with the feature consistency across different timesteps extracted by a diffusion model. Then, the feature consistency is propagated to several keyframes generated by a text prompt for video generation. For TokenFlow, we adopt the Stable Diffusion 2.1 [Rombach et al. 2022] model for the motion transfer task. CCEdit adopts depth maps as conditions to control the video motion and transfers the motion using a new repainted frame to generate a video.",
1060
+ "bbox": [
1061
+ 511,
1062
+ 640,
1063
+ 916,
1064
+ 777
1065
+ ],
1066
+ "page_idx": 5
1067
+ },
1068
+ {
1069
+ "type": "text",
1070
+ "text": "Metrics. Since all methods generate the transferred videos based on text prompts, we aim to evaluate the alignment between the generated videos and the text prompts, as well as the video coherence, using the CLIP [Radford et al. 2021]. Specifically, for video-text alignment, we extract multiple frames from the video and compare them with the corresponding text prompts by calculating the CLIP score [Hessel et al. 2022] for each frame. This score reflects",
1071
+ "bbox": [
1072
+ 511,
1073
+ 777,
1074
+ 916,
1075
+ 876
1076
+ ],
1077
+ "page_idx": 5
1078
+ },
1079
+ {
1080
+ "type": "page_number",
1081
+ "text": "6",
1082
+ "bbox": [
1083
+ 81,
1084
+ 69,
1085
+ 91,
1086
+ 78
1087
+ ],
1088
+ "page_idx": 5
1089
+ },
1090
+ {
1091
+ "type": "header",
1092
+ "text": "Zekai Gu, et al.",
1093
+ "bbox": [
1094
+ 112,
1095
+ 68,
1096
+ 189,
1097
+ 78
1098
+ ],
1099
+ "page_idx": 5
1100
+ },
1101
+ {
1102
+ "type": "image",
1103
+ "img_path": "images/f18c7a0fa736625d9d8187bb8141b7325e4c0c994c814d32a921b0c994b03fad.jpg",
1104
+ "image_caption": [
1105
+ "Fig. 5. Qualitative comparison on motion transfer between our method, CCEdit [Feng et al. 2024b], and TokenFlow [Geyer et al. 2023b]."
1106
+ ],
1107
+ "image_footnote": [],
1108
+ "bbox": [
1109
+ 84,
1110
+ 98,
1111
+ 503,
1112
+ 415
1113
+ ],
1114
+ "page_idx": 6
1115
+ },
1116
+ {
1117
+ "type": "image",
1118
+ "img_path": "images/f0d3ce417e5da9dc9611d267137512d83d85820506deb20e40964da8f50f4943.jpg",
1119
+ "image_caption": [],
1120
+ "image_footnote": [],
1121
+ "bbox": [
1122
+ 513,
1123
+ 98,
1124
+ 911,
1125
+ 415
1126
+ ],
1127
+ "page_idx": 6
1128
+ },
1129
+ {
1130
+ "type": "table",
1131
+ "img_path": "images/d177aaee587dc8cf00897ced6a553c354f15023b550862b9a693a7d945d23085.jpg",
1132
+ "table_caption": [],
1133
+ "table_footnote": [],
1134
+ "table_body": "<table><tr><td>Method</td><td>Tex-Ali</td><td>↑</td><td>Tem-Con</td><td>↑</td></tr><tr><td>CCEdit</td><td>16.9</td><td></td><td>0.932</td><td></td></tr><tr><td>Tokenflow</td><td>31.9</td><td></td><td>0.956</td><td></td></tr><tr><td>Ours</td><td>32.6</td><td></td><td>0.971</td><td></td></tr></table>",
1135
+ "bbox": [
1136
+ 163,
1137
+ 462,
1138
+ 400,
1139
+ 521
1140
+ ],
1141
+ "page_idx": 6
1142
+ },
1143
+ {
1144
+ "type": "text",
1145
+ "text": "the alignment between image content and textual descriptions. For temporal consistency, we extract normalized CLIP features from adjacent video frames and compute the cosine similarity between the adjacent features.",
1146
+ "bbox": [
1147
+ 78,
1148
+ 595,
1149
+ 482,
1150
+ 651
1151
+ ],
1152
+ "page_idx": 6
1153
+ },
1154
+ {
1155
+ "type": "text",
1156
+ "text": "Results. As shown in Table 2, our method demonstrates outstanding performance in both text alignment and frame consistency, surpassing two baseline methods. Furthermore, Figure 5 presents the qualitative comparison of our method, CCEdit, and TokenFlow. It shows that CCEdit produces frames of low quality and struggles to maintain temporal coherence. TokenFlow produces semantically consistent frames but has difficulty producing coherent videos. In contrast, our method accurately transfers the video motion with strong temporal coherence as shown in Figure 6.",
1157
+ "bbox": [
1158
+ 78,
1159
+ 651,
1160
+ 482,
1161
+ 776
1162
+ ],
1163
+ "page_idx": 6
1164
+ },
1165
+ {
1166
+ "type": "text",
1167
+ "text": "4.3 Animating meshes to videos",
1168
+ "text_level": 1,
1169
+ "bbox": [
1170
+ 78,
1171
+ 787,
1172
+ 308,
1173
+ 803
1174
+ ],
1175
+ "page_idx": 6
1176
+ },
1177
+ {
1178
+ "type": "text",
1179
+ "text": "Qualitative comparison. We compare our method against a state-of-the-art human image animation method CHAMP [Zhu et al. 2024] on the mesh-to-video task. Champ takes a human image and a motion sequence as input and generates a corresponding human video. The motion sequence is represented by an animated SMPL [Loper",
1180
+ "bbox": [
1181
+ 78,
1182
+ 806,
1183
+ 483,
1184
+ 876
1185
+ ],
1186
+ "page_idx": 6
1187
+ },
1188
+ {
1189
+ "type": "table",
1190
+ "img_path": "images/6f8fc05927dcc791cc90176ab6c25ac7f01423f541d53c2a486e5966110f7578.jpg",
1191
+ "table_caption": [
1192
+ "Table 2. CLIP scores for motion transfer of CCEdit [Feng et al. 2024b], TokenFlow [Geyer et al. 2023b], and our method. \"Text-Ali\" is the semantic CLIP consistency between generated videos and the given text prompts. \"Tem-Con\" is the temporal CLIP consistency between neighboring frames."
1193
+ ],
1194
+ "table_footnote": [],
1195
+ "table_body": "<table><tr><td>Depth</td><td>Tracking</td><td>#Tracks</td><td>PSNR ↑</td><td>SSIM ↑</td><td>LPIPS ↓</td><td>FVD ↓</td></tr><tr><td>✓</td><td></td><td>-</td><td>18.08</td><td>0.573</td><td>0.312</td><td>645.1</td></tr><tr><td></td><td>✓</td><td>900</td><td>18.52</td><td>0.586</td><td>0.337</td><td>765.3</td></tr><tr><td></td><td>✓</td><td>2500</td><td>19.17</td><td>0.632</td><td>0.263</td><td>566.4</td></tr><tr><td></td><td>✓</td><td>4900</td><td>19.27</td><td>0.658</td><td>0.261</td><td>551.3</td></tr><tr><td></td><td>✓</td><td>8100</td><td>19.11</td><td>0.649</td><td>0.262</td><td>599.0</td></tr></table>",
1196
+ "bbox": [
1197
+ 526,
1198
+ 462,
1199
+ 906,
1200
+ 549
1201
+ ],
1202
+ "page_idx": 6
1203
+ },
1204
+ {
1205
+ "type": "text",
1206
+ "text": "Table 3. Analysis of applying different 3D control signals for image to video generation. We evaluate PSNR, SSIM, LPIPS, and FVD of generated videos on the validation set of the DAVIS and MiraData datasets. \"Depth\" means using depth maps as the 3D control signals. \"Tracking\" means using 3D tracking videos as the control signals. #Tracks means the number of 3D points used in the 3D tracking video.",
1207
+ "bbox": [
1208
+ 513,
1209
+ 550,
1210
+ 916,
1211
+ 626
1212
+ ],
1213
+ "page_idx": 6
1214
+ },
1215
+ {
1216
+ "type": "text",
1217
+ "text": "et al. 2023] mesh. We use the same input image but the SMPL mesh for CHAMP and generate the corresponding animation videos for qualitative comparison as shown in Figure 8. We also generate different styles of videos from the same animated 3D meshes as shown in Figure 8. Compared to CHAMP, our method demonstrates better consistency in the 3D structure and texture details of the avatar on different motion sequences and across different styles.",
1218
+ "bbox": [
1219
+ 511,
1220
+ 650,
1221
+ 916,
1222
+ 748
1223
+ ],
1224
+ "page_idx": 6
1225
+ },
1226
+ {
1227
+ "type": "text",
1228
+ "text": "4.4 Object manipulation",
1229
+ "text_level": 1,
1230
+ "bbox": [
1231
+ 513,
1232
+ 761,
1233
+ 692,
1234
+ 775
1235
+ ],
1236
+ "page_idx": 6
1237
+ },
1238
+ {
1239
+ "type": "text",
1240
+ "text": "Qualitative results. For the object manipulation, we adopt the SAM [Kirillov et al. 2023] and depth estimation models [Bochkovskii et al. 2024; Wang et al. 2024b] to get the object points. Then, we evaluate two kinds of manipulation, i.e. translation and rotation. The results are shown in Figure 9, which demonstrate that DaS achieves accurate object manipulation to produce photorealistic videos with strong multiview consistency for these objects.",
1241
+ "bbox": [
1242
+ 511,
1243
+ 777,
1244
+ 916,
1245
+ 876
1246
+ ],
1247
+ "page_idx": 6
1248
+ },
1249
+ {
1250
+ "type": "header",
1251
+ "text": "Diffusion as Shader: 3D-aware Video Diffusion for Versatile Video Generation Control",
1252
+ "bbox": [
1253
+ 477,
1254
+ 68,
1255
+ 882,
1256
+ 78
1257
+ ],
1258
+ "page_idx": 6
1259
+ },
1260
+ {
1261
+ "type": "page_number",
1262
+ "text": "·7",
1263
+ "bbox": [
1264
+ 888,
1265
+ 69,
1266
+ 915,
1267
+ 77
1268
+ ],
1269
+ "page_idx": 6
1270
+ },
1271
+ {
1272
+ "type": "image",
1273
+ "img_path": "images/4821eb61b8ce9a3e7d30626bc033a45df4a5c167b0de25e984eeb539e278adfb.jpg",
1274
+ "image_caption": [
1275
+ "Transferred Source"
1276
+ ],
1277
+ "image_footnote": [],
1278
+ "bbox": [
1279
+ 112,
1280
+ 99,
1281
+ 243,
1282
+ 167
1283
+ ],
1284
+ "page_idx": 7
1285
+ },
1286
+ {
1287
+ "type": "image",
1288
+ "img_path": "images/0eb5a3d9f567a7001ebe31d3730e0e3103cb9fdbca3df09653d5ba8c0aec26b1.jpg",
1289
+ "image_caption": [],
1290
+ "image_footnote": [],
1291
+ "bbox": [
1292
+ 243,
1293
+ 99,
1294
+ 372,
1295
+ 167
1296
+ ],
1297
+ "page_idx": 7
1298
+ },
1299
+ {
1300
+ "type": "image",
1301
+ "img_path": "images/298e3176a284957f7acae2c59ba9fea385a4121270c507f2a93a3770f13329ee.jpg",
1302
+ "image_caption": [],
1303
+ "image_footnote": [],
1304
+ "bbox": [
1305
+ 374,
1306
+ 99,
1307
+ 504,
1308
+ 167
1309
+ ],
1310
+ "page_idx": 7
1311
+ },
1312
+ {
1313
+ "type": "image",
1314
+ "img_path": "images/c47839aeac28899685d78c33fc3dde14c3faa4cb3dd41d9ad1d30cd9f478a768.jpg",
1315
+ "image_caption": [
1316
+ "Transferred"
1317
+ ],
1318
+ "image_footnote": [],
1319
+ "bbox": [
1320
+ 112,
1321
+ 170,
1322
+ 241,
1323
+ 238
1324
+ ],
1325
+ "page_idx": 7
1326
+ },
1327
+ {
1328
+ "type": "image",
1329
+ "img_path": "images/08009dc059a460546760a9bb83a95efac241ff8cd494fc8ffc6b985673c88a1c.jpg",
1330
+ "image_caption": [
1331
+ "\"An animated red car moves from left to right, with a deserted city in the background.\""
1332
+ ],
1333
+ "image_footnote": [],
1334
+ "bbox": [
1335
+ 243,
1336
+ 170,
1337
+ 372,
1338
+ 238
1339
+ ],
1340
+ "page_idx": 7
1341
+ },
1342
+ {
1343
+ "type": "image",
1344
+ "img_path": "images/c8a828a5adfdfd6b8481b15689b4877128ea03505b072fca85662482417b0f67.jpg",
1345
+ "image_caption": [],
1346
+ "image_footnote": [],
1347
+ "bbox": [
1348
+ 374,
1349
+ 170,
1350
+ 504,
1351
+ 238
1352
+ ],
1353
+ "page_idx": 7
1354
+ },
1355
+ {
1356
+ "type": "image",
1357
+ "img_path": "images/97a90adbef36d2f62c4f4563c1945a3cf7029a8970b7120bd5e9d56d2f8eaf83.jpg",
1358
+ "image_caption": [],
1359
+ "image_footnote": [],
1360
+ "bbox": [
1361
+ 519,
1362
+ 99,
1363
+ 648,
1364
+ 167
1365
+ ],
1366
+ "page_idx": 7
1367
+ },
1368
+ {
1369
+ "type": "image",
1370
+ "img_path": "images/2e1d692fe55d62c9739125f7b179709c1759110b33beda6243f8a549a0935f49.jpg",
1371
+ "image_caption": [],
1372
+ "image_footnote": [],
1373
+ "bbox": [
1374
+ 650,
1375
+ 99,
1376
+ 777,
1377
+ 167
1378
+ ],
1379
+ "page_idx": 7
1380
+ },
1381
+ {
1382
+ "type": "image",
1383
+ "img_path": "images/1f03952b31c0b126d476aaec1e044b934fcefc3435ad7033bdccd6cc197218a1.jpg",
1384
+ "image_caption": [],
1385
+ "image_footnote": [],
1386
+ "bbox": [
1387
+ 779,
1388
+ 99,
1389
+ 911,
1390
+ 167
1391
+ ],
1392
+ "page_idx": 7
1393
+ },
1394
+ {
1395
+ "type": "image",
1396
+ "img_path": "images/1bdebb62c9f3190c4eabc2e08a5a801dde36962e2fec79b775c07795537a595c.jpg",
1397
+ "image_caption": [
1398
+ "\"A herd of bird-deer in a towering, wooded forest.\""
1399
+ ],
1400
+ "image_footnote": [],
1401
+ "bbox": [
1402
+ 519,
1403
+ 170,
1404
+ 648,
1405
+ 237
1406
+ ],
1407
+ "page_idx": 7
1408
+ },
1409
+ {
1410
+ "type": "image",
1411
+ "img_path": "images/d0a2ba7a43cdad083473c12faab0af9667e66ecb52129ae5c82dba847f7afe50.jpg",
1412
+ "image_caption": [],
1413
+ "image_footnote": [],
1414
+ "bbox": [
1415
+ 650,
1416
+ 170,
1417
+ 777,
1418
+ 237
1419
+ ],
1420
+ "page_idx": 7
1421
+ },
1422
+ {
1423
+ "type": "image",
1424
+ "img_path": "images/2c1d953412d792e35ef89728d47396d5110002c86e9b84d61e88c2f990d76c1c.jpg",
1425
+ "image_caption": [],
1426
+ "image_footnote": [],
1427
+ "bbox": [
1428
+ 779,
1429
+ 170,
1430
+ 911,
1431
+ 237
1432
+ ],
1433
+ "page_idx": 7
1434
+ },
1435
+ {
1436
+ "type": "image",
1437
+ "img_path": "images/bf12a9414692318a0ccf5a1432ae2e7c4be9554a72671d85d4056442835e61a6.jpg",
1438
+ "image_caption": [
1439
+ "Transferred"
1440
+ ],
1441
+ "image_footnote": [],
1442
+ "bbox": [
1443
+ 112,
1444
+ 271,
1445
+ 241,
1446
+ 340
1447
+ ],
1448
+ "page_idx": 7
1449
+ },
1450
+ {
1451
+ "type": "image",
1452
+ "img_path": "images/352465d10bf0786d58455e84d925c25e221a82e21a1be8765d8cf1e008ca551f.jpg",
1453
+ "image_caption": [],
1454
+ "image_footnote": [],
1455
+ "bbox": [
1456
+ 243,
1457
+ 271,
1458
+ 372,
1459
+ 340
1460
+ ],
1461
+ "page_idx": 7
1462
+ },
1463
+ {
1464
+ "type": "image",
1465
+ "img_path": "images/8c294ef157e3ba417a8f65c3ed2bdf8ba4a8c855cd777a3379095db8f42a54c8.jpg",
1466
+ "image_caption": [],
1467
+ "image_footnote": [],
1468
+ "bbox": [
1469
+ 374,
1470
+ 271,
1471
+ 503,
1472
+ 340
1473
+ ],
1474
+ "page_idx": 7
1475
+ },
1476
+ {
1477
+ "type": "image",
1478
+ "img_path": "images/a4f2457bf2bb8d7d30fdec3b5ee58df8d5cea2b6f448788b03d6acca2deda213.jpg",
1479
+ "image_caption": [
1480
+ "Transferred Source"
1481
+ ],
1482
+ "image_footnote": [],
1483
+ "bbox": [
1484
+ 112,
1485
+ 342,
1486
+ 241,
1487
+ 407
1488
+ ],
1489
+ "page_idx": 7
1490
+ },
1491
+ {
1492
+ "type": "image",
1493
+ "img_path": "images/df9a839873720bcaa079a28c52b03a8472f7a9f762031efe204ab370131d9508.jpg",
1494
+ "image_caption": [
1495
+ "\"A green alien is generating ancient cityscapes displayed on a computer screen.\""
1496
+ ],
1497
+ "image_footnote": [],
1498
+ "bbox": [
1499
+ 243,
1500
+ 342,
1501
+ 372,
1502
+ 407
1503
+ ],
1504
+ "page_idx": 7
1505
+ },
1506
+ {
1507
+ "type": "image",
1508
+ "img_path": "images/ec4bcd646ada9cb6b383b05cc08d562de0478113df067530ab3f1605ea3a41be.jpg",
1509
+ "image_caption": [],
1510
+ "image_footnote": [],
1511
+ "bbox": [
1512
+ 374,
1513
+ 342,
1514
+ 503,
1515
+ 407
1516
+ ],
1517
+ "page_idx": 7
1518
+ },
1519
+ {
1520
+ "type": "image",
1521
+ "img_path": "images/2130ff0f3de97f4c9b9d4e41aa75f366ec3c8d82122c341e6373f446ab3ec2b6.jpg",
1522
+ "image_caption": [],
1523
+ "image_footnote": [],
1524
+ "bbox": [
1525
+ 519,
1526
+ 271,
1527
+ 648,
1528
+ 340
1529
+ ],
1530
+ "page_idx": 7
1531
+ },
1532
+ {
1533
+ "type": "image",
1534
+ "img_path": "images/b2a5df0f12b8a2da4bac305c4a45bc4503f6353fa97f26b0bfb2040b85455951.jpg",
1535
+ "image_caption": [],
1536
+ "image_footnote": [],
1537
+ "bbox": [
1538
+ 650,
1539
+ 271,
1540
+ 777,
1541
+ 340
1542
+ ],
1543
+ "page_idx": 7
1544
+ },
1545
+ {
1546
+ "type": "image",
1547
+ "img_path": "images/e774ac37fe2cdef64d2767fd2447fe4c6ec455b0b699a70f46ebeddbe7422cdb.jpg",
1548
+ "image_caption": [],
1549
+ "image_footnote": [],
1550
+ "bbox": [
1551
+ 779,
1552
+ 271,
1553
+ 911,
1554
+ 340
1555
+ ],
1556
+ "page_idx": 7
1557
+ },
1558
+ {
1559
+ "type": "image",
1560
+ "img_path": "images/717e2087de75bc586fad3d0a28f553f6ecee5718aca6963e6aacdaf89f7c8cfc.jpg",
1561
+ "image_caption": [
1562
+ "\"An anime girl with a white hat and tanned skin sits by the edge of a tranquil mountain lake.\""
1563
+ ],
1564
+ "image_footnote": [],
1565
+ "bbox": [
1566
+ 519,
1567
+ 340,
1568
+ 648,
1569
+ 407
1570
+ ],
1571
+ "page_idx": 7
1572
+ },
1573
+ {
1574
+ "type": "image",
1575
+ "img_path": "images/b5d9a23a917da8d8046a5630064a7312e3b0a0236180d48c992d3a7e10563dbb.jpg",
1576
+ "image_caption": [],
1577
+ "image_footnote": [],
1578
+ "bbox": [
1579
+ 650,
1580
+ 340,
1581
+ 777,
1582
+ 407
1583
+ ],
1584
+ "page_idx": 7
1585
+ },
1586
+ {
1587
+ "type": "image",
1588
+ "img_path": "images/a95465ef21636cf570827a47d4b2c7586ee8689a6a4fcdcf10fa607348e87db8.jpg",
1589
+ "image_caption": [],
1590
+ "image_footnote": [],
1591
+ "bbox": [
1592
+ 779,
1593
+ 340,
1594
+ 911,
1595
+ 407
1596
+ ],
1597
+ "page_idx": 7
1598
+ },
1599
+ {
1600
+ "type": "image",
1601
+ "img_path": "images/9f1c73a820cad5fa38538c07fa5a48261ed5017bf7b7d66265a8ac9060fd82b2.jpg",
1602
+ "image_caption": [
1603
+ "Fig. 6. Qualitative results on motion transfer of our method.",
1604
+ "Fig. 7. More results of the animating mesh to video generation task. Our method enables the generation of different styles from the same mesh."
1605
+ ],
1606
+ "image_footnote": [],
1607
+ "bbox": [
1608
+ 86,
1609
+ 478,
1610
+ 913,
1611
+ 839
1612
+ ],
1613
+ "page_idx": 7
1614
+ },
1615
+ {
1616
+ "type": "page_number",
1617
+ "text": "8",
1618
+ "bbox": [
1619
+ 81,
1620
+ 69,
1621
+ 91,
1622
+ 78
1623
+ ],
1624
+ "page_idx": 7
1625
+ },
1626
+ {
1627
+ "type": "header",
1628
+ "text": "Zekai Gu, et al.",
1629
+ "bbox": [
1630
+ 112,
1631
+ 68,
1632
+ 189,
1633
+ 78
1634
+ ],
1635
+ "page_idx": 7
1636
+ },
1637
+ {
1638
+ "type": "image",
1639
+ "img_path": "images/b015635a124a6b65433c67f58456287662d3934905e05aec3f1e514dc6b7792b.jpg",
1640
+ "image_caption": [
1641
+ "Fig. 8. Qualitative comparison on the animating mesh to video task between our method and CHAMP [Zhu et al. 2024]."
1642
+ ],
1643
+ "image_footnote": [],
1644
+ "bbox": [
1645
+ 98,
1646
+ 98,
1647
+ 898,
1648
+ 301
1649
+ ],
1650
+ "page_idx": 8
1651
+ },
1652
+ {
1653
+ "type": "image",
1654
+ "img_path": "images/9f3675d45ab9387fa973bbd5c73acb7528a5fcca23e872bb39464eec004f8c2c.jpg",
1655
+ "image_caption": [
1656
+ "Fig. 9. Qualitative results of our method on the object manipulation task. The top part shows the results of translation while the bottom part shows the results of rotating the object."
1657
+ ],
1658
+ "image_footnote": [],
1659
+ "bbox": [
1660
+ 86,
1661
+ 339,
1662
+ 911,
1663
+ 773
1664
+ ],
1665
+ "page_idx": 8
1666
+ },
1667
+ {
1668
+ "type": "header",
1669
+ "text": "Diffusion as Shader: 3D-aware Video Diffusion for Versatile Video Generation Control",
1670
+ "bbox": [
1671
+ 477,
1672
+ 68,
1673
+ 882,
1674
+ 78
1675
+ ],
1676
+ "page_idx": 8
1677
+ },
1678
+ {
1679
+ "type": "page_number",
1680
+ "text": "9",
1681
+ "bbox": [
1682
+ 908,
1683
+ 69,
1684
+ 915,
1685
+ 78
1686
+ ],
1687
+ "page_idx": 8
1688
+ },
1689
+ {
1690
+ "type": "image",
1691
+ "img_path": "images/bbdd6af3b0ecb4cd2d1f5b4716ca4cdea6ef1ec46548d4467e40231c6476f85a.jpg",
1692
+ "image_caption": [
1693
+ "Fig. 10. Generated videos using depth maps or 3D tracking videos as control signals. Our 3D tracking videos provide better quality on the cross-frame consistency for video generation than depth maps."
1694
+ ],
1695
+ "image_footnote": [],
1696
+ "bbox": [
1697
+ 84,
1698
+ 99,
1699
+ 478,
1700
+ 247
1701
+ ],
1702
+ "page_idx": 9
1703
+ },
1704
+ {
1705
+ "type": "text",
1706
+ "text": "4.5 Analysis",
1707
+ "text_level": 1,
1708
+ "bbox": [
1709
+ 80,
1710
+ 306,
1711
+ 176,
1712
+ 321
1713
+ ],
1714
+ "page_idx": 9
1715
+ },
1716
+ {
1717
+ "type": "list",
1718
+ "sub_type": "text",
1719
+ "list_items": [
1720
+ "We conduct analysis on the choice of 3D control signals, i.e. depth maps or 3D tracking videos, and the number of 3D tracking points. To achieve this, we randomly selected 50 videos from the validation split of the DAVIS [Pont-Tuset et al. 2017] and MiraData [Ju et al. 2024] video dataset. We extract the first-frame images as the input image and apply different models to re-generate these videos. To evaluate the quality of the generated videos, we compute PSNR, SSIM [Wang et al. 2004], LPIPS [Zhang et al. 2018], and FVD [Unterthiner et al. 2019] between the generated videos and the ground-truth videos.",
1721
+ "4.5.1 Depth maps vs. 3D tracking videos. To illustrate the effectiveness of our 3D tracking videos, we compare DaS with a baseline using depth maps as conditions instead of 3D tracking videos. Specifically, the baseline adopts the same architecture as DaS but replaces the 3D tracking video with a depth map video. We adopt the Depth Pro [Bochkovskii et al. 2024] to generate the video depth video for this baseline method. As shown in Table 3, our model outperforms this baseline in all metrics, demonstrating that the 3D tracking videos provide a better signal for the diffusion model to recover ground-truth videos than the depth map conditions. Figure 10 shows the generated videos, which demonstrate that our method produces more consistent videos with the ground truth. The main reason is that the 3D tracking videos effectively associate different frames of a video while the depth maps only provide some cues of the scene structures without constraining the motion of the video.",
1722
+ "4.5.2 Point density. In Table 3, we further present an ablation study with varying numbers of 3D tracking points as control signals. The number of 3D tracking points ranges from 900 $(30\\times 30)$ to 8100 $(90\\times 90)$ . Though the generated videos with 4900 tracking points perform slightly better than the other ones, the visual qualities of 2500, 4900, and 8100 tracking points are very similar to each other. Since tracking too many points with SpatialTracker [Xiao et al. 2024b] would be slow, we choose 4900 as our default setting in all our other experiments using 3D point tracking.",
1723
+ "4.5.3 Runtime. In the inference stage, we employ the DDIM [Song et al. 2020] sampler with 50 steps, classifier-free guidance of magnitude 7.0, which costs about 2.5 minutes to generate 49 frames on a H800 GPU at a resolution of $480 \\times 720$ ."
1724
+ ],
1725
+ "bbox": [
1726
+ 78,
1727
+ 323,
1728
+ 483,
1729
+ 875
1730
+ ],
1731
+ "page_idx": 9
1732
+ },
1733
+ {
1734
+ "type": "image",
1735
+ "img_path": "images/2d4d25646022fad28faf390587257adabcf5b8713cefd0803e76382238165379.jpg",
1736
+ "image_caption": [
1737
+ "Fig. 11. Failure cases. (Top) Incompatible tracking video. When a tracking video that does not correspond to the structures of the input image is provided, DaS will generate a video with a scene transition to a compatible new scene. (Bottom) Out of tracking range. For regions without 3D tracking points, the tracking video fails to constrain these regions and DaS may generate some uncontrolled content."
1738
+ ],
1739
+ "image_footnote": [],
1740
+ "bbox": [
1741
+ 521,
1742
+ 98,
1743
+ 915,
1744
+ 421
1745
+ ],
1746
+ "page_idx": 9
1747
+ },
1748
+ {
1749
+ "type": "text",
1750
+ "text": "5 LIMITATIONS AND CONCLUSIONS",
1751
+ "text_level": 1,
1752
+ "bbox": [
1753
+ 514,
1754
+ 539,
1755
+ 792,
1756
+ 551
1757
+ ],
1758
+ "page_idx": 9
1759
+ },
1760
+ {
1761
+ "type": "text",
1762
+ "text": "Limitations and future works. Though DaS achieves control over the video generation process in most cases, it still suffers from multiple failure cases mainly caused by incorrect 3Dtracking videos. The first failure case is that the input image should be compatible with the 3D tracking videos. Otherwise, the generated videos would be implausible as shown in Figure 11 (top). Another failure case is that for regions without 3D tracking points, the generated contents may be out-of-control and produce some unnatural results (Figure 11 (bottom)). For future works, we currently rely on provided animated meshes or existing videos to get high-quality 3D tracking videos and a promising direction is to learn to generate these 3D tracking videos with a new diffusion model.",
1763
+ "bbox": [
1764
+ 511,
1765
+ 556,
1766
+ 916,
1767
+ 720
1768
+ ],
1769
+ "page_idx": 9
1770
+ },
1771
+ {
1772
+ "type": "text",
1773
+ "text": "Conclusions. In this paper, we introduce Diffusion as Shader (DaS) for controllable video generation. The key idea of DaS is to adopt the 3D tracking videos as 3D control signals for video generation. The 3D tracking videos are constructed from colored dynamic 3D points which represent the underlying 3D motion of the video. Then, diffusion models are applied to generate a video following the motion of the 3D tracking video. We demonstrate that the 3D tracking videos not only improve the temporal consistency of the generated videos but also enable versatile control of the video content, including mesh-to-video generation, camera control, motion transfer, and object manipulation.",
1774
+ "bbox": [
1775
+ 511,
1776
+ 723,
1777
+ 916,
1778
+ 876
1779
+ ],
1780
+ "page_idx": 9
1781
+ },
1782
+ {
1783
+ "type": "page_number",
1784
+ "text": "10",
1785
+ "bbox": [
1786
+ 83,
1787
+ 69,
1788
+ 94,
1789
+ 78
1790
+ ],
1791
+ "page_idx": 9
1792
+ },
1793
+ {
1794
+ "type": "header",
1795
+ "text": "Zekai Gu, et al.",
1796
+ "bbox": [
1797
+ 112,
1798
+ 68,
1799
+ 192,
1800
+ 78
1801
+ ],
1802
+ "page_idx": 9
1803
+ },
1804
+ {
1805
+ "type": "text",
1806
+ "text": "REFERENCES",
1807
+ "text_level": 1,
1808
+ "bbox": [
1809
+ 81,
1810
+ 99,
1811
+ 178,
1812
+ 112
1813
+ ],
1814
+ "page_idx": 10
1815
+ },
1816
+ {
1817
+ "type": "list",
1818
+ "sub_type": "ref_text",
1819
+ "list_items": [
1820
+ "Sherwin Bahmani, Ivan Skorokhodov, Guocheng Qian, Aliaksandr Siarohin, Willi Menapace, Andrea Tagliasacchi, David B Lindell, and Sergey Tulyakov. 2024. AC3D: Analyzing and Improving 3D Camera Control in Video Diffusion Transformers. arXiv preprint arXiv:2411.18673 (2024).",
1821
+ "Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. 2023. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127 (2023).",
1822
+ "Aleksei Bochkovskii, Amael Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R Richter, and Vladlen Koltun. 2024. Depth pro: Sharp monocular metric depth in less than a second. arXiv preprint arXiv:2410.02073 (2024).",
1823
+ "Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. 2024. Video generation models as world simulators. https://openai.com/research/video-generation-models-as-world-simulators",
1824
+ "Shengqu Cai, Duygu Ceylan, Matheus Gadelha, Chun-Hao Paul Huang, Tuanfeng Yang Wang, and Gordon Wetzstein. 2024. Generative rendering: Controllable 4d-guided video generation with 2d diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 7611-7620.",
1825
+ "Tianshi Cao, Karsten Kreis, Sanja Fidler, Nicholas Sharp, and Kangxue Yin. 2023. Texfusion: Synthesizing 3d textures with text-guided image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 4169-4181.",
1826
+ "Haoxin Chen, Menghan Xia, Yingqing He, Yong Zhang, Xiaodong Cun, Shaoshu Yang, Jinbo Xing, Yaofang Liu, Qifeng Chen, Xintao Wang, et al. 2023b. Videocrafter1: Open diffusion models for high-quality video generation. arXiv preprint arXiv:2310.19512 (2023).",
1827
+ "Haoxin Chen, Yong Zhang, Xiaodong Cun, Menghan Xia, Xintao Wang, Chao Weng, and Ying Shan. 2024b. Videocrafter2: Overcoming data limitations for high-quality video diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 7310-7320.",
1828
+ "Junsong Chen, Chongjian Ge, Enze Xie, Yue Wu, Lewei Yao, Xiaozhe Ren, Zhongdao Wang, Ping Luo, Huchuan Lu, and Zhenguo Li. 2024a. PIXART-Sigma: Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation. In European Conference on Computer Vision. Springer, 74-91.",
1829
+ "Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. 2023a. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404 (2023).",
1830
+ "Patrick Esser, Johnathan Chiu, Parmida Atighehchian, Jonathan Granskog, and Anastasiis Germanidis. 2023. Structure and content-guided video synthesis with diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 7346-7356.",
1831
+ "Ruoyu Feng, Wenming Weng, Yanhui Wang, Yuhui Yuan, Jianmin Bao, Chong Luo, Zhibo Chen, and Baining Guo. 2024b. CCEdit: Creative and Controllable Video Editing via Diffusion Models. arXiv:2309.16496 [cs.CV] https://arxiv.org/abs/2309.16496",
1832
+ "Wanquan Feng, Tianhao Qi, Jiawei Liu, Mingzhen Sun, Pengqi Tu, Tianxiang Ma, Fei Dai, Songtao Zhao, Siyu Zhou, and Qian He. 2024a. I2VControl: Disentangled and Unified Video Motion Synthesis Control. arXiv preprint arXiv:2411.17765 (2024).",
1833
+ "Daniel Geng, Charles Herrmann, Junhwa Hur, Forrester Cole, Serena Zhang, Tobias Pfaff, Tatiana Lopez-Guevara, Carl Doersch, Yusuf Aytar, Michael Rubinstein, et al. 2024. Motion Prompting: Controlling Video Generation with Motion Trajectories. arXiv preprint arXiv:2412.02700 (2024).",
1834
+ "Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. 2023a. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373 (2023).",
1835
+ "Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. 2023b. TokenFlow: Consistent Diffusion Features for Consistent Video Editing. arXiv:2307.10373 [cs.CV] https://arxiv.org/abs/2307.10373",
1836
+ "Yuwei Guo, Ceyuan Yang, Anyi Rao, Maneesh Agrawala, Dahua Lin, and Bo Dai. 2024. Sparsectrl: Adding sparse controls to text-to-video diffusion models. In ECCV. 330-348.",
1837
+ "Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. 2023. Animatediff: Imagine your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725 (2023).",
1838
+ "Hao Ye, Yinghao Xu, Yuwei Guo, Gordon Wetzstein, Bo Dai, Hongsheng Li, and Ceyuan Yang. 2024b. Cameractrl: Enabling camera control for text-to-video generation. arXiv preprint arXiv:2404.02101 (2024).",
1839
+ "Xuanhua He, Quande Liu, Shengju Qian, Xin Wang, Tao Hu, Ke Cao, Keyu Yan, and Jie Zhang. 2024a. Id-animator: Zero-shot identity-preserving human video generation. arXiv preprint arXiv:2404.15275 (2024).",
1840
+ "Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. 2022. Latent video diffusion models for high-fidelity long video generation. arXiv preprint arXiv:2211.13221 (2022)."
1841
+ ],
1842
+ "bbox": [
1843
+ 81,
1844
+ 116,
1845
+ 482,
1846
+ 862
1847
+ ],
1848
+ "page_idx": 10
1849
+ },
1850
+ {
1851
+ "type": "list",
1852
+ "sub_type": "ref_text",
1853
+ "list_items": [
1854
+ "Jack Hessel, Ari Holtzman, Maxwell Forbes, Ronan Le Bras, and Yejin Choi. 2022. CLIPScore: A Reference-free Evaluation Metric for Image Captioning. arXiv:2104.08718 [cs.CV] https://arxiv.org/abs/2104.08718",
1855
+ "Jonathan Ho, Ajay Jain, and Pieter Abbeel. 2020. Denoising diffusion probabilistic models. NeurIPS (2020).",
1856
+ "Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. 2022. Video diffusion models. Advances in Neural Information Processing Systems 35 (2022), 8633-8646.",
1857
+ "Hsin-Ping Huang, Yu-Chuan Su, Deqing Sun, Lu Jiang, Xuhui Jia, Yukun Zhu, and Ming-Hsuan Yang. 2023. Fine-grained controllable video generation via object appearance and context. arXiv preprint arXiv:2312.02919 (2023).",
1858
+ "Yash Jain, Anshul Nasery, Vibhav Vineet, and Harkirat Behl. 2024. Peekaboo: Interactive video generation via masked-diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8079-8088.",
1859
+ "Hyeonho Jeong, Chun-Hao Paul Huang, Jong Chul Ye, Niloy Mitra, and Duygu Ceylan. 2024. Track4Gen: Teaching Video Diffusion Models to Track Points Improves Video Generation. arXiv preprint arXiv:2412.06016 (2024).",
1860
+ "Xuan Ju, Yiming Gao, Zhaoyang Zhang, Ziyang Yuan, Xintao Wang, Ailing Zeng, Yu Xiong, Qiang Xu, and Ying Shan. 2024. MiraData: A Large-Scale Video Dataset with Long Durations and Structured Captions. arXiv:2407.06358 [cs.CV] https://arxiv.org/abs/2407.06358",
1861
+ "Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. 2023. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 4015-4026.",
1862
+ "Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, et al. 2024. HunyuanVideo: A Systematic Framework For Large Video Generative Models. arXiv preprint arXiv:2412.03603 (2024).",
1863
+ "Mathis Koroglu, Hugo Caselles-Dupré, Guillaume Jeanneret Sanmiguel, and Matthieu Cord. 2024. OnlyFlow: Optical Flow based Motion Conditioning for Video Diffusion Models. arXiv preprint arXiv:2411.10501 (2024).",
1864
+ "Kuaishou. 2024. Keling. https://kling.kuaishou.com/",
1865
+ "Black Forest Labs. 2024. FLUX. https://github.com/black-forest-labs/flux",
1866
+ "Guojun Lei, Chi Wang, Hong Li, Rong Zhang, Yikai Wang, and Weiwei Xu. 2024. **AnimateAnything: Consistent and Controllable Animation for Video Generation. arXiv preprint arXiv:2411.10836 (2024).",
1867
+ "Yaowei Li, Xintao Wang, Zhaoyang Zhang, Zhouxia Wang, Ziyang Yuan, Liangbin Xie, Yuexian Zou, and Ying Shan. 2024. Image conductor: Precision control for interactive video synthesis. arXiv preprint arXiv:2406.15339 (2024).",
1868
+ "Bin Lin, Yunyang Ge, Xinhua Cheng, Zongjian Li, Bin Zhu, Shaodong Wang, Xianyi He, Yang Ye, Shenghai Yuan, Liuhan Chen, et al. 2024. Open-Sora Plan: Open-Source Large Video Generation Model. arXiv preprint arXiv:2412.00131 (2024).",
1869
+ "Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. 2023. SMPL: A skinned multi-person linear model. In Seminal Graphics Papers: Pushing the Boundaries, Volume 2. 851-866.",
1870
+ "Jiahao Lu, Tianyu Huang, Peng Li, Zhiyang Dou, Cheng Lin, Zhiming Cui, Zhen Dong, Sai-Kit Yeung, Wenping Wang, and Yuan Liu. 2024. Align3R: Aligned Monocular Depth Estimation for Dynamic Videos. arXiv preprint arXiv:2412.03079 (2024).",
1871
+ "Wan-Duo Kurt Ma, John P Lewis, and W Bastiaan Kleijn. 2024b. Trailblazer: Trajectory control for diffusion-based video generation. In SIGGRAPH Asia.",
1872
+ "Yue Ma, Yingqing He, Hongfa Wang, Andong Wang, Chenyang Qi, Chengfei Cai, Xiu Li, Zhifeng Li, Heung-Yeung Shum, Wei Liu, et al. 2024a. Follow-your-click: Open-domain regional image animation via short prompts. arXiv preprint arXiv:2403.08268 (2024).",
1873
+ "Tuna Han Salih Meral, Hidir Yesiltepe, Connor Dunlop, and Pinar Yanardag. 2024. MotionFlow: Attention-Driven Motion Transfer in Video Diffusion Models. arXiv preprint arXiv:2412.05275 (2024).",
1874
+ "Chong Mou, Mingdeng Cao, Xintao Wang, Zhaoyang Zhang, Ying Shan, and Jian Zhang. 2024. ReVideo: Remake a Video with Motion and Content Control. arXiv preprint arXiv:2405.13865 (2024).",
1875
+ "Koichi Namekata, Sherwin Bahmani, Ziyi Wu, Yash Kant, Igor Gilitschenski, and David B Lindell. 2024. Sg-i2v: Self-guided trajectory control in image-to-video generation. arXiv preprint arXiv:2411.04989 (2024).",
1876
+ "Pauline C Ng and Steven Henikoff. 2003. SIFT: Predicting amino acid changes that affect protein function. *Nucleic acids research* 31, 13 (2003), 3812-3814.",
1877
+ "Muyao Niu, Xiaodong Cun, Xintao Wang, Yong Zhang, Ying Shan, and Yinqiang Zheng. 2024. Mofa-video: Controllable image animation via generative motion field adoptions in frozen image-to-video diffusion model. in ECCV.",
1878
+ "Geon Yeong Park, Hyeonho Jeong, Sang Wan Lee, and Jong Chul Ye. 2024. Spectral motion alignment for video motion transfer using diffusion models. arXiv preprint arXiv:2403.15249 (2024).",
1879
+ "William Peebles and Saining Xie. 2023a. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 4195-4205.",
1880
+ "William Peebles and Saining Xie. 2023b. Scalable Diffusion Models with Transformers. arXiv:2212.09748 [cs.CV] https://arxiv.org/abs/2212.09748"
1881
+ ],
1882
+ "bbox": [
1883
+ 516,
1884
+ 103,
1885
+ 915,
1886
+ 859
1887
+ ],
1888
+ "page_idx": 10
1889
+ },
1890
+ {
1891
+ "type": "header",
1892
+ "text": "Diffusion as Shader: 3D-aware Video Diffusion for Versatile Video Generation Control",
1893
+ "bbox": [
1894
+ 472,
1895
+ 68,
1896
+ 877,
1897
+ 78
1898
+ ],
1899
+ "page_idx": 10
1900
+ },
1901
+ {
1902
+ "type": "page_number",
1903
+ "text": "11",
1904
+ "bbox": [
1905
+ 885,
1906
+ 69,
1907
+ 915,
1908
+ 78
1909
+ ],
1910
+ "page_idx": 10
1911
+ },
1912
+ {
1913
+ "type": "list",
1914
+ "sub_type": "ref_text",
1915
+ "list_items": [
1916
+ "Adam Polyak, Amit Zohar, Andrew Brown, Andros Tjandra, Animesh Sinha, Ann Lee, Apoorv Vyas, Bowen Shi, Chih-Yao Ma, Ching-Yao Chuang, et al. 2024. Movie gen: A cast of media foundation models. arXiv preprint arXiv:2410.13720 (2024).",
1917
+ "Alexander Pondaven, Aliaksandr Siarohin, Sergey Tulyakov, Philip Torr, and Fabio Pizzati. 2024. Video Motion Transfer with Diffusion Transformers. arXiv preprint arXiv:2412.07776 (2024).",
1918
+ "Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alexander Sorkine-Hornung, and Luc Van Gool. 2017. The 2017 DAVIS Challenge on Video Object Segmentation. arXiv:1704.0675 (2017).",
1919
+ "Haonan Qiu, Zhaoxi Chen, Zhouxia Wang, Yingqing He, Menghan Xia, and Ziwei Liu. 2024. Freetraj: Tuning-free trajectory control in video diffusion models. arXiv preprint arXiv:2406.16863 (2024).",
1920
+ "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning Transferable Visual Models From Natural Language Supervision. arXiv:2103.00020 [cs.CV] https://arxiv.org/abs/2103.00020",
1921
+ "René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. 2020. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE transactions on pattern analysis and machine intelligence 44, 3 (2020), 1623-1637.",
1922
+ "Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, and Daniel Cohen-Or. 2023. Texture: Text-guided texturing of 3d shapes. In ACM SIGGRAPH 2023 conference proceedings. 1-11.",
1923
+ "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-resolution image synthesis with latent diffusion models. In CVPR.",
1924
+ "Xiaoyu Shi, Zhaoyang Huang, Fu-Yun Wang, Weikang Bian, Dasong Li, Yi Zhang, Manyuan Zhang, Ka Chun Cheung, Simon See, Hongwei Qin, et al. 2024. Motion-2v: Consistent and controllable image-to-video generation with explicit motion modeling. In SIGGRAPH.",
1925
+ "Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. 2021. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems 34 (2021), 19313-19325.",
1926
+ "Jiaming Song, Chenlin Meng, and Stefano Ermon. 2020. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502 (2020).",
1927
+ "Yao Teng, Enze Xie, Yue Wu, Haoyu Han, Zhenguo Li, and Xihui Liu. 2023. Drag-a-video: Non-rigid video editing with point-based interaction. arXiv preprint arXiv:2312.02936 (2023).",
1928
+ "Thomas Unterthiner, Sjoerd van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. 2019. Towards Accurate Generative Models of Video: A New Metric & Challenges. arXiv:1812.01717 [cs.CV] https://arxiv.org/abs/1812.01717",
1929
+ "Jiawei Wang, Yuchen Zhang, Jiaxin Zou, Yan Zeng, Guoqiang Wei, Liping Yuan, and Hang Li. 2024f. Boximator: Generating rich and controllable motions for video synthesis. arXiv preprint arXiv:2402.01566 (2024).",
1930
+ "Ruicheng Wang, Sicheng Xu, Cassie Dai, Jianfeng Xiang, Yu Deng, Xin Tong, and Jiao-long Yang. 2024b. MoGe: Unlocking Accurate Monocular Geometry Estimation for Open-Domain Images with Optimal Training Supervision. arXiv:2410.19115 [cs.CV] https://arxiv.org/abs/2410.19115",
1931
+ "Tianfu Wang, Menelaoos Kanakis, Konrad Schindler, Luc Van Gool, and Anton Obukhov. 2023. Breathing new life into 3d assets with generative repainting. arXiv preprint arXiv:2309.08523 (2023).",
1932
+ "Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. 2024d. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems 36 (2024).",
1933
+ "Yuelei Wang, Jian Zhang, Pengtao Jiang, Hao Zhang, Jinwei Chen, and Bo Li. 2024e. CPA: Camera-pose-awareness Diffusion Transformer for Video Generation. arXiv preprint arXiv:2412.01429 (2024).",
1934
+ "Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. 2004. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing 13, 4 (2004), 600-612. https://doi.org/10.1109/TIP.2003.819861",
1935
+ "Zhouxia Wang, Yushi Lan, Shangchen Zhou, and Chen Change Loy. 2024a. ObjCtrl-2.5 D: Training-free Object Control with Camera Poses. arXiv preprint arXiv:2412.07721 (2024).",
1936
+ "Zhouxia Wang, Ziyang Yuan, Xintao Wang, Yaowei Li, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. 2024c. Motionctrl: A unified and flexible motion controller for video generation. In SIGGRAPH.",
1937
+ "Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. 2024b. SpatialTracker: Tracking Any 2D Pixels in 3D Space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 20406-20417.",
1938
+ "Zeqi Xiao, Wenqi Ouyang, Yifan Zhou, Shuai Yang, Lei Yang, Jianlou Si, and Xingang Pan. 2024a. Trajectory Attention for Fine-grained Video Motion Control. arXiv preprint arXiv:2411.19324 (2024)."
1939
+ ],
1940
+ "bbox": [
1941
+ 81,
1942
+ 102,
1943
+ 483,
1944
+ 859
1945
+ ],
1946
+ "page_idx": 11
1947
+ },
1948
+ {
1949
+ "type": "list",
1950
+ "sub_type": "ref_text",
1951
+ "list_items": [
1952
+ "Jinbo Xing, Menghan Xia, Yong Zhang, Haoxin Chen, Wangbo Yu, Hanyuan Liu, Gongye Liu, Xintao Wang, Ying Shan, and Tien-Tsin Wong. 2024. Dynamicafter: Animating open-domain images with video diffusion priors. In European Conference on Computer Vision. Springer, 399-417.",
1953
+ "Shiyuan Yang, Liang Hou, Haibin Huang, Chongyang Ma, Pengfei Wan, Di Zhang, Xiaodong Chen, and Jing Liao. 2024a. Direct-a-video: Customized video generation with user-directed camera movement and object motion. In ACM SIGGRAPH 2024 Conference Papers. 1-12.",
1954
+ "Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. 2024b. Cogvideo: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072 (2024).",
1955
+ "Danah Yatim, Ralfid Fridman, Omer Bar-Tal, Yoni Kasten, and Tali Dekel. 2024. Spacetime diffusion features for zero-shot text-driven motion transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8466-8476.",
1956
+ "Shengming Yin, Chenfei Wu, Jian Liang, Jie Shi, Houqiang Li, Gong Ming, and Nan Duan. 2023. Dragnuwa: Fine-grained control in video generation by integrating text, image, and trajectory. arXiv preprint arXiv:2308.08089 (2023).",
1957
+ "Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. 2024. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048 (2024).",
1958
+ "Shenghai Yuan, Jina Fuang, Xianyi He, Yunyuan Ge, Yujun Shi, Liuhan Chen, Jiebo Luo, and Li Yuan. 2024. Identity-Preserving Text-to-Video Generation by Frequency Decomposition. arXiv preprint arXiv:2411.17440 (2024).",
1959
+ "Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. 2023. Adding conditional control to text-to-image diffusion models. In ICCV. 3836-3847.",
1960
+ "Qihang Zhang, Shuangfei Zhai, Miguel Angel Bautista, Kevin Miao, Alexander Toshev, Joshua Susskind, and Jiaotao Gu. 2024. World-consistent Video Diffusion with Explicit 3D Modeling. arXiv preprint arXiv:2412.01821 (2024).",
1961
+ "Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. 2018. The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. arXiv:1801.03924 [cs.CV] https://arxiv.org/abs/1801.03924",
1962
+ "Tingyang Zhang, Chen Wang, Zhiyang Dou, Jiahui Lei Qingzhe Gao, Baoquan Chen, and Lingjie Liu. 2025. ProTracker: Probabilistic Integration for Robust and Accurate Point Tracking. arXiv preprint arxiv:2501.03220 (2025).",
1963
+ "Guangcong Zheng, Teng Li, Rui Jiang, Yehao Lu, Tao Wu, and Xi Li. 2024a. Cam12V: Camera-Controlled Image-to-Video Diffusion Model. arXiv preprint arXiv:2410.15957 (2024).",
1964
+ "Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. 2024b. Open-Sora: Democratizing Efficient Video Production for All. https://github.com/hpaitech/Open-Sora",
1965
+ "Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. 2018. Stereo Magnification: Learning View Synthesis using Multiplane Images. In SIGGRAPH.",
1966
+ "Shenhao Zhu, Junming Leo Chen, Zuozhuo Dai, Yinghui Xu, Xun Cao, Yao Yao, Hao Zhu, and Siyu Zhu. 2024. Champ: Controllable and Consistent Human Image Animation with 3D Parametric Guidance. arXiv:2403.14781 [cs.CV]"
1967
+ ],
1968
+ "bbox": [
1969
+ 514,
1970
+ 102,
1971
+ 916,
1972
+ 587
1973
+ ],
1974
+ "page_idx": 11
1975
+ },
1976
+ {
1977
+ "type": "page_number",
1978
+ "text": "12",
1979
+ "bbox": [
1980
+ 83,
1981
+ 69,
1982
+ 94,
1983
+ 78
1984
+ ],
1985
+ "page_idx": 11
1986
+ },
1987
+ {
1988
+ "type": "header",
1989
+ "text": "Zekai Gu, et al.",
1990
+ "bbox": [
1991
+ 104,
1992
+ 68,
1993
+ 192,
1994
+ 78
1995
+ ],
1996
+ "page_idx": 11
1997
+ }
1998
+ ]
2501.03xxx/2501.03847/73b93507-55a7-403b-876e-01ee04faaab0_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03847/73b93507-55a7-403b-876e-01ee04faaab0_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8df11c064e86918239db33e76771890691e747ceda9fa094407be83654d04bf2
3
+ size 5659180
2501.03xxx/2501.03847/full.md ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Diffusion as Shader: 3D-aware Video Diffusion for Versatile Video Generation Control
2
+
3
+ ZEKAI GU, Hong Kong University of Science and Technology, China
4
+ RUI YAN, Zhejiang University, China
5
+ JIAHAO LU, Hong Kong University of Science and Technology, China
6
+ PENG LI, Hong Kong University of Science and Technology, China
7
+ ZHIYANG DOU, The University of Hong Kong, China
8
+ CHENYANG SI, Nanyang Technological University, Singapore
9
+ ZHEN DONG, Wuhan University, China
10
+ QIFENG LIU, Hong Kong University of Science and Technology, China
11
+ CHENG LIN, The University of Hong Kong, China
12
+ ZIWEI LIU, Nanyang Technological University, Singapore
13
+ WENPING WANG, Texas A&M University, U.S.A
14
+ YUAN LIU, Hong Kong University of Science and Technology, China
15
+
16
+ ![](images/3cbfeacd022e1030e0f91208808d64b5327adf483519d365f7758f3f2e934c58.jpg)
17
+ (a) Diffusion as Shader
18
+
19
+ ![](images/ead6ff7c8cb3cc6e24f0162ab251c800d010365132d7fe4cca26284790ece4f8.jpg)
20
+
21
+ ![](images/efd136b4e3f5d44b4d772e9f04a1f4c3e24f324e7c79d4ebe30a877dbd4efc6e.jpg)
22
+
23
+ ![](images/b0d22c54810587509ff087f48801917e4e71c7402c3d6c43967eded4b4960770.jpg)
24
+
25
+ ![](images/73628fceb521443e49094fdfaecfc630712fb415f9573ee8798be6a3cd2c66e5.jpg)
26
+ Fig. 1. Diffusion as Shader (DaS) is (a) a 3D-aware video diffusion method enabling versatile video control tasks including (b) animating meshes to video generation, (c) motion transfer, (d) camera control, and (e) object manipulation.
27
+
28
+ ![](images/fd541b4a3b4563641813b3c16643cf616c9974c27691eb4ea6a6e3342ce52b74.jpg)
29
+
30
+ ![](images/73f232c772f1a648cd79e4b89c9ecb23d18eaa73c9584bbc6d4dc2d33c8913d6.jpg)
31
+
32
+ ![](images/db5346859b02d554b70bbe52db22f7a386e0ff4ec514402a7e4287d2b1281784.jpg)
33
+
34
+ ![](images/de252c04747de383ca09971014a91c3de7d61e857e43a2b3b60c3ac5f3756337.jpg)
35
+
36
+ ![](images/1bd7dcecd305989c8cc2282d87f696c48e35eabfc8185721bc4cd3fdf8157d5c.jpg)
37
+
38
+ ![](images/175ec96d23b8e7882a7a83ff189f06ec2f516fae9cd9805c5d43371ae9d69747.jpg)
39
+
40
+ ![](images/9711c00114afc9a7ceecc4622ec2e5cff79578d0811f1b0cc7290d03e1846535.jpg)
41
+
42
+ ![](images/e604c615761d4adede2dacabde122138eaf625124b5185544eab9fa30ade6434.jpg)
43
+
44
+ ![](images/5aab5615e26b815c99c9bc0f3cf8ff09c077aaba6142e64e2ed733db8187e25c.jpg)
45
+
46
+ ![](images/28b0727e1b2f5aa8a8991e53902d24ce03af4701124f4a906b503f6c16def39b.jpg)
47
+
48
+ ![](images/bdf68f5d5489b1bdb6e506e3b4735981511b433b7d282ba9c3509a3d367b8dc5.jpg)
49
+
50
+ ![](images/f3f93fc2386ab1c4ac884633d8c9a66b45b7fb70a370734f5f7c0c2bf59da601.jpg)
51
+
52
+ ![](images/2611f9362c0d9336e000e8cbefc916b12d4e23a4bf5fd0ade3e77727be5faca5.jpg)
53
+
54
+ ![](images/ddbd9d07e0c571d3cc4964ea78c249134a0e492cbf130a8ccc34f0e217fd809d.jpg)
55
+
56
+ Diffusion models have demonstrated impressive performance in generating high-quality videos from text prompts or images. However, precise control over the video generation process—such as camera manipulation or content editing—remains a significant challenge. Existing methods for controlled video generation are typically limited to a single control type, lacking the flexibility to handle diverse control demands. In this paper, we introduce Diffusion as Shader (DaS), a novel approach that supports multiple video control tasks within a unified architecture. Our key insight is that achieving versatile video control necessitates leveraging 3D control signals, as videos are fundamentally 2D renderings of dynamic 3D content. Unlike prior methods limited to 2D control signals, DaS leverages 3D tracking videos as control inputs, making the video diffusion process inherently 3D-aware. This innovation allows DaS to achieve a wide range of video controls by simply manipulating the 3D tracking videos. A further advantage of using 3D tracking videos is their ability to effectively link frames, significantly enhancing the temporal consistency of the generated videos. With just 3 days of fine-tuning on 8 H800 GPUs using less than 10k videos, DaS demonstrates
57
+
58
+ strong control capabilities across diverse tasks, including mesh-to-video generation, camera control, motion transfer, and object manipulation. Codes and more results are available at https://igl-hkust.github.io/das/.
59
+
60
+ # 1 INTRODUCTION
61
+
62
+ The development of diffusion generative models [Blattmann et al. 2023; Brooks et al. 2024; Ho et al. 2020; Lin et al. 2024; Rombach et al. 2022; Zheng et al. 2024b] enables high-quality video generation from text prompts or a starting image. Recent emerging models, e.g. Sora [Brooks et al. 2024], CogVideo-X [Yang et al. 2024b], Keling [Kuaishou 2024], and Hunyuan [Kong et al. 2024], have shown impressive video generation ability with strong temporal consistency and appealing visual effects, which becomes a promising tool for artists to create stunning videos using just few images or text
63
+
64
+ prompts. These advancements show strong potential to revolutionize the advertising, film, robotics, and game industries, becoming fundamental elements for various generative AI-based applications.
65
+
66
+ A major challenge in video generation lies in achieving versatile and precise control to align seamlessly with users' creative visions. While recent methods have introduced strategies to integrate control into the video generation process [Guo et al. 2024; He et al. 2024b,a; Huang et al. 2023; Ma et al. 2024b,a; Namekata et al. 2024; Polyak et al. 2024; Wang et al. 2024f,c; Yuan et al. 2024], they predominantly focus on specific control types, relying on specialized architectures that lack adaptability to emerging control requirements. Furthermore, these approaches are generally limited to high-level adjustments—such as camera movements or maintaining identity—falling short when it comes to enabling fine-grained modifications, like precisely raising an avatar's left hand.
67
+
68
+ We argue that achieving versatile and precise video generation control fundamentally requires 3D control signals in the diffusion model. Videos are 2D renderings of dynamic 3D content. In a traditional Computer Graphics (CG)-based video-making pipeline, we can effectively control all aspects of a video in detail by manipulating the underlying 3D representations, such as meshes or particles. However, existing video control methods solely apply 2D control signals on rendered pixels, lacking the 3D awareness in the video generation process and thus struggling to achieve versatile and fine-grained controls. Thus, to this end, we present a novel 3D-aware video diffusion method, called Diffusion as Shader (DaS) in this paper, which utilizes 3D control signals to enable diverse and precise control tasks within a unified architecture.
69
+
70
+ Specifically, as shown in Figure 1 (a), DaS is an image-to-video diffusion model that takes a 3D tracking video as the 3D control signals for various control tasks. The 3D tracking video contains the motion trajectories of 3D points whose colors are defined by their coordinates in the camera coordinate system of the first frame. In this way, the 3D tracking video represents the underlying 3D motion of this video. The video diffusion model acts like a shader to compute shaded appearances on the dynamic 3D points to generate the video. Thus, we call our model Diffusion as Shader.
71
+
72
+ Using 3D tracking videos as control signals offers a significant advantage over depth videos with enhanced temporal consistency. While a straightforward approach to incorporating 3D control into video diffusion models involves using depth maps as control signals, depth maps only define the structural properties of the underlying 3D content without explicitly linking frames across time. In contrast, 3D tracking videos provide a consistent association between frames, as identical 3D points maintain the same colors across the video. These color anchors ensure consistent appearances for the same 3D points, thereby significantly improving temporal coherence in the generated videos. Our experiments demonstrate that even when a 3D region temporarily disappears and later reappears, DaS effectively preserves the appearance consistency of that region, thanks to the temporal consistency enabled by the tracking video.
73
+
74
+ By leveraging 3D tracking videos, DaS enables versatile video generation controls, encompassing but not limited to the following video control tasks.
75
+
76
+ (1) Animating meshes to videos. Using advanced 3D tools like Blender, we can design animated 3D meshes based on predefined templates. These animated meshes are transformed into 3D tracking videos to guide high-quality video generation (Figure 1 (b)).
77
+ (2) Motion transfer. Starting with an input video, we employ a 3D tracker [Xiao et al. 2024b] to generate a corresponding 3D tracking video. Next, the depth-to-image Flux model [Labs 2024] is used to modify the style or content of the first frame. Based on the updated first frame and the 3D tracking video, DaS generates a new video that replicates the motion patterns of the original while reflecting the new style or content (Figure 1 (c)).
78
+ (3) Camera control. To enable precise camera control, depth maps are estimated to extract 3D points [Bochkovskii et al. 2024]. These 3D points are then projected onto a specified camera path to create a 3D tracking video, which guides the generation of videos with customized camera movements (Figure 1 (d)).
79
+ (4) Object manipulation. By integrating object segmentation techniques [Kirillov et al. 2023] with a monocular depth estimator [Bochkovskii et al. 2024], the 3D points of specific objects can be extracted and manipulated. These modified 3D points are used to construct a 3D tracking video, which guides the creation of videos for object manipulation (Figure 1 (e)).
80
+
81
+ Due to the 3D awareness of DaS, DaS is data-efficient. Finetuning with less than 10k videos on 8 H800 GPUs for 3 days already gives the powerful control ability to DaS, which is demonstrated by various control tasks. We compare DaS with baseline methods on camera control [He et al. 2024b; Wang et al. 2024c] and motion transfer [Geyer et al. 2023a], which demonstrates that DaS achieves significantly improved performances in these two controlling tasks than baselines. For the remaining two tasks, i.e. mesh-to-video and object manipulation, we provide extensive qualitative results to show the superior generation quality of our method.
82
+
83
+ # 2 RELATED WORK
84
+
85
+ # 2.1 Video diffusion
86
+
87
+ In recent years, the success of diffusion models in image generation [Ho et al. 2020; Peebles and Xie 2023a; Rombach et al. 2022] has sparked interest in exploring video generation [Blattmann et al. 2023; Brooks et al. 2024; Chen et al. 2023b, 2024b; Guo et al. 2023; He et al. 2022; Ho et al. 2022; Kong et al. 2024; Kuaishou 2024; Lin et al. 2024; Xing et al. 2024; Yang et al. 2024b; Zheng et al. 2024b]. VDM [Ho et al. 2022] is the first work to explore the feasibility of diffusion in the field of video generation. SVD [Blattmann et al. 2023] introduces a unified strategy for training a robust video generation model. Sora [Brooks et al. 2024], through training on extensive video data, suggests that scaling video generation models is a promising path towards building general-purpose simulators of the physical world. CogVideo-X [Yang et al. 2024b], VideoCrafter [Chen et al. 2023b, 2024b], DynamiCrafter [Xing et al. 2024], Keling [Kuaishou 2024], and Hunyuan [Kong et al. 2024] have demonstrated impressive video generation performance with strong temporal consistency.
88
+
89
+ Controllable video generation. Existing works still lack an effective way to control the generation process. There are many works [Guo et al. 2024; He et al. 2024b,a; Huang et al. 2023; Ma et al. 2024b,a,a; Namekata et al. 2024; Polyak et al. 2024; Qiu et al.
90
+
91
+ 2024; Wang et al. 2024f,c; Yu et al. 2024; Yuan et al. 2024] that introduce a specific control signal in the video generation process which can only achieve one control type like identity preserving, camera control, and motion transfer. Our method is more versatile in various video control types by using a 3D-aware video generation with 3D tracking videos as conditions.
92
+
93
+ # 2.2 Controlled video generation
94
+
95
+ We review the following 4 types of controlled video generation.
96
+
97
+ Animating meshes to videos. Animating meshes to videos aims to texture meshes. Several works [Cai et al. 2024; Cao et al. 2023; Richardson et al. 2023; Wang et al. 2023] have demonstrated the feasibility of mesh texturization using powerful diffusion models. TexFusion [Cao et al. 2023] applies the diffusion model's denoiser on a set of 2D renders of the 3D object, optimizing an intermediate neural color field to output final RGB textures. TEXTure [Richardson et al. 2023] introduces a dynamic trimap representation and a novel diffusion sampling process, leveraging this trimap to generate seamless textures from various views. G-Rendering [Cai et al. 2024] takes a dynamic mesh as input. To preserve consistency, G-Rendering employs UV-guided noise initialization and correspondence-aware blending of both pre- and post-attention features. Following G-Rendering, our method also targets dynamic meshes, utilizing a diffusion model as a Shader to incorporate realistic texture information. Unlike G-Rendering, which preserves consistency at the noise and attention levels, our approach leverages 3D tracking videos as supplementary information, integrating them into the diffusion model to ensure both temporal and spatial consistency.
98
+
99
+ Camera control. Camera control [Bahmani et al. 2024; Geng et al. 2024; He et al. 2024b; Wang et al. 2024e,c; Xiao et al. 2024a; Yang et al. 2024a; Yu et al. 2024; Zheng et al. 2024a] is an important capability for enhancing the realism of generated videos and increasing user engagement by allowing customized viewpoints. Recently, many efforts have been made to introduce camera control in video generation. MotionCtrl [Wang et al. 2024c] incorporates a flexible motion controller for video generation, which can independently or jointly control camera motion and object motion in generated videos. CameraCtrl [He et al. 2024b] adopts Plücker embeddings [Sitzmann et al. 2021] as the primary form of camera parameters, enabling the ViewCrafter [Yu et al. 2024] employs a point-based representation for free-view rendering, enabling precise camera control. AC3D [Bahmani et al. 2024] optimizes pose conditioning schedules during training and testing to accelerate convergence and restricts the injection of camera conditioning to specific positions, reducing interference with other meaningful video features. CPA [Wang et al. 2024e] incorporates a Sparse Motion Encoding Module to embed the camera pose information and integrating the embedded motion information via temporal attention. Our method aims to use 3D tracking videos as an intermediary to achieve precise and consistent camera control.
100
+
101
+ Motion transfer. Motion transfer [Esser et al. 2023; Geng et al. 2024; Geyer et al. 2023a; Meral et al. 2024; Park et al. 2024; Pondaven et al. 2024; Wang et al. 2024d,c; Yatim et al. 2024] aims to synthesize novel videos by following the motion of the original one. Gen-1 [Esser et al. 2023] employs depth estimation results [Bochkovskii
102
+
103
+ et al. 2024; Lu et al. 2024; Ranftl et al. 2020] to guide the motion TokenFlow [Geyer et al. 2023a] achieves consistent motion transfer by enforcing consistency in the diffusion feature space. MotionCtrl [Wang et al. 2024c] also achieves motion transfer by incorporating a motion controller. DiTFlow [Pondaven et al. 2024] proposes Attention Motion Flow as guidance for motion transfer on DiTs [Peebles and Xie 2023a]. Motion Prompting [Geng et al. 2024] utilizes 2D motions as prompts to realize impressive motion transfer. Unlike these approaches, our method employs 3D tracking as guidance for motion transfer, enabling a more comprehensive capture of each object's motion and the relationships between them within the video. This ensures accurate and globally consistent geometric and temporal consistency.
104
+
105
+ Object manipulation. Object manipulation refers to versatile object movement control for image-to-video generation. Different from camera control, which focuses on changes in perspective, object manipulation emphasizes the movement of the objects themselves. Currently, mainstream methods [Chen et al. 2023a; Geng et al. 2024; Jain et al. 2024; Li et al. 2024; Ma et al. 2024b; Mou et al. 2024; Qiu et al. 2024; Teng et al. 2023; Wang et al. 2024f,c; Yang et al. 2024a; Yin et al. 2023] typically achieve object manipulation by utilizing directed trajectories or modeling the relationships between bounding boxes with specific semantic meanings. However, these methods primarily rely on 2D guidance to represent the spatial movement of target objects, which often fails to accurately capture user intent and frequently results in distorted outputs. ObjCtrl-2.5D [Wang et al. 2024a] tries to address this limitation by extending 2D trajectories with depth information, creating a single 3D trajectory as the control signal. Better than the single 3D trajectory, our method leverages 3D tracking videos, which offer greater details and more effectively represent the motion relationships between foreground and background for more precise and realistic object manipulation.
106
+
107
+ Concurrent works. Recently, several works [Feng et al. 2024a; Geng et al. 2024; Jeong et al. 2024; Koroglu et al. 2024; Lei et al. 2024; Niu et al. 2024; Shi et al. 2024; Zhang et al. 2024] have explored utilizing motion as control signals. These approaches can be broadly categorized into two groups: 2D motion-based and 3D motion-based methods. [Koroglu et al. 2024; Lei et al. 2024; Shi et al. 2024] leverage 2D optical flow to condition motion, while [Geng et al. 2024; Jeong et al. 2024; Niu et al. 2024] utilize 2D tracks, which are sparser than optical flow, to track or control video motion. [Zhang et al. 2024] learns to generate 3D coordinates in the video diffusion model, which 3D awareness. [Feng et al. 2024a] lifts videos into 3D space and extracts the motion of 3D points, enabling a more accurate capture of spatial relationships between objects and supporting tasks such as object manipulation and camera control. Our method, DaS, also leverages recent tracking methods [Xiao et al. 2024b; Zhang et al. 2025] to construct videos. However, we extend the applicability by unifying a broader range of control tasks, including mesh-to-video generation and motion transfer.
108
+
109
+ # 3 METHOD
110
+
111
+ # 3.1 Overview
112
+
113
+ DaS is an image-to-video (I2V) diffusion generative model, which applies both an input image and a 3D tracking video as conditions
114
+
115
+ ![](images/6355c9439780ddbe41bdf6863467236c8bbcc179b7b76af42c52be020f5b43cc.jpg)
116
+ Fig. 2. Architecture of DaS. (a) We colorize dynamic 3D points according to their coordinates to get (b) a 3D tracking video. (c) The input image and the 3D tracking video are processed by (d) a transformer-based latent diffusion with a variational autoencoder (VAE). The 3D tracking video is processed by a trainable copy of the denoising DiT and zero linear layers are used to inject the condition features from 3D tracking videos into the denoising process.
117
+
118
+ for controllable video generation. In the following, we first review the backend I2V video diffusion model in Sec. 3.2. Then, we discuss the definition of the 3D tracking video and how to inject the 3D tracking video into the generation process as a condition in Sec. 3.3. Finally, in Sec. 3.4, we discuss how to apply DaS in various types of video generation control.
119
+
120
+ # 3.2 Backend video diffusion model
121
+
122
+ DaS is finetuned from the CogVideoX [Yang et al. 2024b] model that is a transformer-based video diffusion model [Peebles and Xie 2023a] operating on a latent space. Specifically, as shown in Figure 2 (d), we adopt the I2V CogVideoX model as the base model, which takes an image $\mathbf{I} \in \mathbb{R}^{H \times W \times 3}$ as input and generate a video $\mathbf{V} \in \mathbb{R}^{T \times H \times W \times 3}$ . The generated video $\mathbf{V}$ has $T$ frames with the same image size of width $W$ height $H$ as the input image. The input image $\mathbf{I}$ is first padded with zeros to get an input condition video with the same size $T \times H \times W \times 3$ as the target video. Then, a VAE encoder is applied to the padded condition video to get a latent vector of size $\frac{T}{4} \times \frac{H}{8} \times \frac{W}{8} \times 16$ , which is concatenated with a noise of the same size. A diffusion transformer (DiT) [Peebles and Xie 2023b] is iteratively used to denoise the noise latent for a predefined number of steps and the output denoised latent is processed by a VAE decoder to get the video $\mathbf{V}$ . In the following, we discuss how to add a 3D tracking video as an additional condition on this base model.
123
+
124
+ # 3.3 Finetuning with 3D tracking videos
125
+
126
+ We add a 3D tracking video as an additional condition to our video diffusion model. As shown in Figure 2 (a, b), the 3D tracking video is rendered from a set of moving 3D points $\{\mathbf{p}_i(t) \in \mathbb{R}^3\}$ , where $t = 1, \dots, T$ means the frame index in the video. The colors of these points are determined by their coordinates in the first frame, where we normalize the coordinates into $[0,1]^3$ and convert the coordinates into RGB colors $\{\mathbf{c}_i\}$ . Note we adopt the reciprocal of z-coordinate in the normalization. These colors remain the same for different timesteps $t$ . Then, to get a specific $t$ -th frame of the tracking video,
127
+
128
+ we project these 3D points onto the $t$ -th camera to render this frame. In Sec. 3.4, we will discuss how to get these moving 3D points and the camera poses of different frames for different control tasks. Next, we first introduce the architecture to utilize the 3D tracking video as a condition for video generation.
129
+
130
+ Injecting 3D tracking control. We follow a similar design as the ControlNet [Chen et al. 2024a; Zhang et al. 2023] in DaS to add the 3D tracking video as the additional condition. As shown in Figure 2 (d), we apply the pretrained VAE encoder to encode the 3D tracking video to get the latent vector. Then, we make a trainable copy of the pretrained denoising DiT, called condition DiT, to process the latent vector of the 3D tracking video. The denoising DiT contains 42 blocks and we copy the first 18 blocks as the condition DiT. In the condition DiT, we extract the output feature of each DiT block, process it with a zero-initialized linear layer, and add the feature to the corresponding feature map of the denoising DiT. We finetune the condition DiT with the diffusion losses while freezing the pretrained denoising DiT.
131
+
132
+ Finetuning details. To train the DaS model, we construct a training dataset containing both real-world videos and synthetic rendered videos. The real-world videos are from MiraData [Ju et al. 2024] while we use the meshes and motion sequences from Mixamo to render synthetic videos. All videos are center-cropped and resized to $720 \times 480$ resolution with 49 frames. We only finetune the copied condition DiT while freezing all the original denoising DiT. To construct the 3D tracking video for the rendered videos, since we have access to the ground-truth 3D meshes and camera poses for the synthetic videos, we construct our 3D tracking videos directly using these dense ground-truth 3D points, which results in dense 3D point tracking. For real-world videos, we adopt SpatialTracker [Xiao et al. 2024b] to detect 3D points and their trajectories in the 3D space. Specifically, for each real-world video, we detect 4,900 3D evenly distributed points and track their trajectories. For training, we employ a learning rate of $1 \times 10^{-4}$ using the AdamW optimizer. We train the model for 2000 steps using the gradient accumulation
133
+
134
+ ![](images/23c0461025a07a92eddadc619f9d94ae866e63783af0e30220f6aaeb11380fa1.jpg)
135
+
136
+ ![](images/c75e1d4c2dd29108d43afdeb2b0624cb2db2aceac70cb93d2597a55a614f3278.jpg)
137
+ Fig. 3. 3D tracking video generation in (a) object manipulation, (b) animating mesh to video generation, (c) camera control, and (d) motion transfer.
138
+
139
+ strategy to get an effective batch size of 64. The training takes 3 days on 8 H800 GPUs.
140
+
141
+ # 3.4 Video generation control
142
+
143
+ In this section, we describe how to utilize DaS for the following controllable video generation.
144
+
145
+ 3.4.1 Object manipulation. DaS can generate a video to manipulate a specific object. As shown in Figure 3 (a), given an image, we estimate the depth map using Depth Pro [Bochkovskii et al. 2024] or MoGE [Wang et al. 2024b] and segment out the object using SAM [Kirillov et al. 2023]. Then, we are able to manipulate the point cloud of the object to construct a 3D tracking video for object manipulation video generation.
146
+ 3.4.2 Animating meshes to videos. DaS enables the creation of visually appealing, high-quality videos from simple animated meshes. While many Computer Graphics (CG) software tools provide basic 3D models and motion templates to generate animated meshes, these outputs are often simplistic and lack the detailed appearance and geometry needed for high-quality animations. Starting with these simple animated meshes, as shown in Figure 3 (b), we generate an initial visually appealing frame using a depth-to-image FLUX model [Labs 2024]. We then produce 3D tracking videos from the animated meshes, which, when combined with the generated first frame, guide DaS to transform the basic meshes into visually rich and appealing videos.
147
+ 3.4.3 Camera control. Previous approaches [He et al. 2024b; Wang et al. 2024c] rely on camera or ray embeddings as conditions to control the camera trajectory in video generation. However, these embeddings lack true 3D awareness, leaving the diffusion models to infer the scene's 3D structure and simulate camera movement.
148
+
149
+ In contrast, DaS significantly enhances 3D awareness by incorporating 3D tracking videos for precise camera control. To generate videos with a specific camera trajectory, as shown in Figure 3 (c), we first estimate the depth map of the initial frame using Depth Pro [Bochkovskii et al. 2024] and convert it into colored 3D points. These points are then projected onto the given camera trajectory, constructing a 3D tracking video that enables DaS to control camera movements with high 3D accuracy.
150
+
151
+ 3.4.4 Motion transfer. As shown in Figure 3 (d), DaS also facilitates creating a new video by transferring motion from an existing source video. First, we estimate the depth map of the source video's first frame and apply the depth-to-image FLUX model [Labs 2024] to repaint the frame into a target appearance guided by text prompts. Then, using SpatialTracker [Xiao et al. 2024b], we generate a 3D tracking video from the source video to serve as control signals. Finally, the DaS model generates the target video by combining the edited first frame with the 3D tracking video.
152
+
153
+ # 4 EXPERIMENTS
154
+
155
+ We conduct experiments on five tasks, including camera control, motion transfer, mesh-to-video generation, and object manipulation to demonstrate the versatility of DaS in controlling the video generation process.
156
+
157
+ # 4.1 Camera control
158
+
159
+ Baseline methods. To evaluate the ability to control camera motions of generated videos, we select two representative methodologies, MotionCtrl [Wang et al. 2024c] and CameraCtrl [He et al. 2024b] as baseline methods, both of which allow camera trajectories as input and use camera or ray embeddings for camera control.
160
+
161
+ Metrics. To measure the accuracy of the camera trajectories of generated videos, we evaluate the consistency between the estimated
162
+
163
+ ![](images/dddf88953eabdf82f30156ea7361c7e3b43f00a7332434b1f9530e93236f3e76.jpg)
164
+ Fig. 4. Qualitative results of DaS on the camera control task. We show 4 trajectories (left, right, up, down) with large movements.
165
+
166
+ camera poses from the generated videos and the input ground-truth camera poses using rotation errors and translation errors. Specifically, for each frame of a generated video, we reconstruct its relative pose given the first frame using SIFT [Ng and Henikoff 2003]. Then, we get the normalized quaternion and translation vectors for the rotation and translation. Finally, we calculate the cosine similarity between the estimated camera poses with the given camera poses.
167
+
168
+ $$
169
+ \mathbf {R o t E r r} = \operatorname {a r c c o s} \left(\frac {1}{T - 1} \sum_ {i = 2} ^ {T} \langle \mathbf {q} _ {\mathrm {g e n}} ^ {i}, \mathbf {q} _ {\mathrm {g t}} ^ {i} \rangle\right),
170
+ $$
171
+
172
+ $$
173
+ \mathbf {T r a n s E r r} = \arccos \left(\frac {1}{T - 1} \sum_ {i = 2} ^ {T} \langle \mathbf {t} _ {\mathrm {g e n}} ^ {i}, \mathbf {t} _ {\mathrm {g t}} ^ {i} \rangle\right),
174
+ $$
175
+
176
+ where $T$ is the number of frames, $\mathbf{q}^i$ and $\mathbf{t}^i$ are the normalized quaternion and translation vector of the $i$ -th frame, and $\langle \cdot, \cdot \rangle$ means the dot product between two vectors.
177
+
178
+ Results. We compare against baseline methods on 100 random trajectories from RealEstate10K [Zhou et al. 2018]. But since most of the random trajectories only contain small movements, we further test the models on larger fixed movements (moving left, right, up, down, spiral) as shown in Figure 4. As shown in Table 1, our method outperforms the baseline methods, which demonstrates that our method achieves stable and accurate control of the camera poses of the generated videos. The main reason is that due to the utilization of the 3D tracking videos, our method is fully 3D-aware to enable accurate spatial inference in the video generation process. In comparison, baseline methods [He et al. 2024b; Wang et al. 2024c] only adopt implicit camera or ray embeddings for camera control.
179
+
180
+ <table><tr><td rowspan="2">Method</td><td colspan="2">Small Movement</td><td colspan="2">Large Movement</td></tr><tr><td>TransErr ↓</td><td>RotErr ↓</td><td>TransErr ↓</td><td>RotErr ↓</td></tr><tr><td>MotionCtrl</td><td>44.23</td><td>8.92</td><td>67.05</td><td>39.86</td></tr><tr><td>CameraCtrl</td><td>42.31</td><td>7.82</td><td>66.76</td><td>29.70</td></tr><tr><td>Ours</td><td>27.85</td><td>5.97</td><td>37.17</td><td>10.40</td></tr></table>
181
+
182
+ Table 1. Quantitative results on camera control of MotionCtrl [Wang et al. 2024c], CameraCtrl [He et al. 2024b], and our method. "TransErr" and "RotErr" are the angle differences between the estimated translation and rotation and the ground-truth ones in degree.
183
+
184
+ # 4.2 Motion transfer
185
+
186
+ Baseline methods. We compare DaS with two famous motion transfer methods, TokenFlow [Geyer et al. 2023b] and CCEdit [Feng et al. 2024b]. TokenFlow represents video motions with the feature consistency across different timesteps extracted by a diffusion model. Then, the feature consistency is propagated to several keyframes generated by a text prompt for video generation. For TokenFlow, we adopt the Stable Diffusion 2.1 [Rombach et al. 2022] model for the motion transfer task. CCEdit adopts depth maps as conditions to control the video motion and transfers the motion using a new repainted frame to generate a video.
187
+
188
+ Metrics. Since all methods generate the transferred videos based on text prompts, we aim to evaluate the alignment between the generated videos and the text prompts, as well as the video coherence, using the CLIP [Radford et al. 2021]. Specifically, for video-text alignment, we extract multiple frames from the video and compare them with the corresponding text prompts by calculating the CLIP score [Hessel et al. 2022] for each frame. This score reflects
189
+
190
+ ![](images/f18c7a0fa736625d9d8187bb8141b7325e4c0c994c814d32a921b0c994b03fad.jpg)
191
+ Fig. 5. Qualitative comparison on motion transfer between our method, CCEdit [Feng et al. 2024b], and TokenFlow [Geyer et al. 2023b].
192
+
193
+ ![](images/f0d3ce417e5da9dc9611d267137512d83d85820506deb20e40964da8f50f4943.jpg)
194
+
195
+ <table><tr><td>Method</td><td>Tex-Ali</td><td>↑</td><td>Tem-Con</td><td>↑</td></tr><tr><td>CCEdit</td><td>16.9</td><td></td><td>0.932</td><td></td></tr><tr><td>Tokenflow</td><td>31.9</td><td></td><td>0.956</td><td></td></tr><tr><td>Ours</td><td>32.6</td><td></td><td>0.971</td><td></td></tr></table>
196
+
197
+ the alignment between image content and textual descriptions. For temporal consistency, we extract normalized CLIP features from adjacent video frames and compute the cosine similarity between the adjacent features.
198
+
199
+ Results. As shown in Table 2, our method demonstrates outstanding performance in both text alignment and frame consistency, surpassing two baseline methods. Furthermore, Figure 5 presents the qualitative comparison of our method, CCEdit, and TokenFlow. It shows that CCEdit produces frames of low quality and struggles to maintain temporal coherence. TokenFlow produces semantically consistent frames but has difficulty producing coherent videos. In contrast, our method accurately transfers the video motion with strong temporal coherence as shown in Figure 6.
200
+
201
+ # 4.3 Animating meshes to videos
202
+
203
+ Qualitative comparison. We compare our method against a state-of-the-art human image animation method CHAMP [Zhu et al. 2024] on the mesh-to-video task. Champ takes a human image and a motion sequence as input and generates a corresponding human video. The motion sequence is represented by an animated SMPL [Loper
204
+
205
+ Table 2. CLIP scores for motion transfer of CCEdit [Feng et al. 2024b], TokenFlow [Geyer et al. 2023b], and our method. "Text-Ali" is the semantic CLIP consistency between generated videos and the given text prompts. "Tem-Con" is the temporal CLIP consistency between neighboring frames.
206
+
207
+ <table><tr><td>Depth</td><td>Tracking</td><td>#Tracks</td><td>PSNR ↑</td><td>SSIM ↑</td><td>LPIPS ↓</td><td>FVD ↓</td></tr><tr><td>✓</td><td></td><td>-</td><td>18.08</td><td>0.573</td><td>0.312</td><td>645.1</td></tr><tr><td></td><td>✓</td><td>900</td><td>18.52</td><td>0.586</td><td>0.337</td><td>765.3</td></tr><tr><td></td><td>✓</td><td>2500</td><td>19.17</td><td>0.632</td><td>0.263</td><td>566.4</td></tr><tr><td></td><td>✓</td><td>4900</td><td>19.27</td><td>0.658</td><td>0.261</td><td>551.3</td></tr><tr><td></td><td>✓</td><td>8100</td><td>19.11</td><td>0.649</td><td>0.262</td><td>599.0</td></tr></table>
208
+
209
+ Table 3. Analysis of applying different 3D control signals for image to video generation. We evaluate PSNR, SSIM, LPIPS, and FVD of generated videos on the validation set of the DAVIS and MiraData datasets. "Depth" means using depth maps as the 3D control signals. "Tracking" means using 3D tracking videos as the control signals. #Tracks means the number of 3D points used in the 3D tracking video.
210
+
211
+ et al. 2023] mesh. We use the same input image but the SMPL mesh for CHAMP and generate the corresponding animation videos for qualitative comparison as shown in Figure 8. We also generate different styles of videos from the same animated 3D meshes as shown in Figure 8. Compared to CHAMP, our method demonstrates better consistency in the 3D structure and texture details of the avatar on different motion sequences and across different styles.
212
+
213
+ # 4.4 Object manipulation
214
+
215
+ Qualitative results. For the object manipulation, we adopt the SAM [Kirillov et al. 2023] and depth estimation models [Bochkovskii et al. 2024; Wang et al. 2024b] to get the object points. Then, we evaluate two kinds of manipulation, i.e. translation and rotation. The results are shown in Figure 9, which demonstrate that DaS achieves accurate object manipulation to produce photorealistic videos with strong multiview consistency for these objects.
216
+
217
+ ![](images/4821eb61b8ce9a3e7d30626bc033a45df4a5c167b0de25e984eeb539e278adfb.jpg)
218
+ Transferred Source
219
+
220
+ ![](images/0eb5a3d9f567a7001ebe31d3730e0e3103cb9fdbca3df09653d5ba8c0aec26b1.jpg)
221
+
222
+ ![](images/298e3176a284957f7acae2c59ba9fea385a4121270c507f2a93a3770f13329ee.jpg)
223
+
224
+ ![](images/c47839aeac28899685d78c33fc3dde14c3faa4cb3dd41d9ad1d30cd9f478a768.jpg)
225
+ Transferred
226
+
227
+ ![](images/08009dc059a460546760a9bb83a95efac241ff8cd494fc8ffc6b985673c88a1c.jpg)
228
+ "An animated red car moves from left to right, with a deserted city in the background."
229
+
230
+ ![](images/c8a828a5adfdfd6b8481b15689b4877128ea03505b072fca85662482417b0f67.jpg)
231
+
232
+ ![](images/97a90adbef36d2f62c4f4563c1945a3cf7029a8970b7120bd5e9d56d2f8eaf83.jpg)
233
+
234
+ ![](images/2e1d692fe55d62c9739125f7b179709c1759110b33beda6243f8a549a0935f49.jpg)
235
+
236
+ ![](images/1f03952b31c0b126d476aaec1e044b934fcefc3435ad7033bdccd6cc197218a1.jpg)
237
+
238
+ ![](images/1bdebb62c9f3190c4eabc2e08a5a801dde36962e2fec79b775c07795537a595c.jpg)
239
+ "A herd of bird-deer in a towering, wooded forest."
240
+
241
+ ![](images/d0a2ba7a43cdad083473c12faab0af9667e66ecb52129ae5c82dba847f7afe50.jpg)
242
+
243
+ ![](images/2c1d953412d792e35ef89728d47396d5110002c86e9b84d61e88c2f990d76c1c.jpg)
244
+
245
+ ![](images/bf12a9414692318a0ccf5a1432ae2e7c4be9554a72671d85d4056442835e61a6.jpg)
246
+ Transferred
247
+
248
+ ![](images/352465d10bf0786d58455e84d925c25e221a82e21a1be8765d8cf1e008ca551f.jpg)
249
+
250
+ ![](images/8c294ef157e3ba417a8f65c3ed2bdf8ba4a8c855cd777a3379095db8f42a54c8.jpg)
251
+
252
+ ![](images/a4f2457bf2bb8d7d30fdec3b5ee58df8d5cea2b6f448788b03d6acca2deda213.jpg)
253
+ Transferred Source
254
+
255
+ ![](images/df9a839873720bcaa079a28c52b03a8472f7a9f762031efe204ab370131d9508.jpg)
256
+ "A green alien is generating ancient cityscapes displayed on a computer screen."
257
+
258
+ ![](images/ec4bcd646ada9cb6b383b05cc08d562de0478113df067530ab3f1605ea3a41be.jpg)
259
+
260
+ ![](images/2130ff0f3de97f4c9b9d4e41aa75f366ec3c8d82122c341e6373f446ab3ec2b6.jpg)
261
+
262
+ ![](images/b2a5df0f12b8a2da4bac305c4a45bc4503f6353fa97f26b0bfb2040b85455951.jpg)
263
+
264
+ ![](images/e774ac37fe2cdef64d2767fd2447fe4c6ec455b0b699a70f46ebeddbe7422cdb.jpg)
265
+
266
+ ![](images/717e2087de75bc586fad3d0a28f553f6ecee5718aca6963e6aacdaf89f7c8cfc.jpg)
267
+ "An anime girl with a white hat and tanned skin sits by the edge of a tranquil mountain lake."
268
+
269
+ ![](images/b5d9a23a917da8d8046a5630064a7312e3b0a0236180d48c992d3a7e10563dbb.jpg)
270
+
271
+ ![](images/a95465ef21636cf570827a47d4b2c7586ee8689a6a4fcdcf10fa607348e87db8.jpg)
272
+
273
+ ![](images/9f1c73a820cad5fa38538c07fa5a48261ed5017bf7b7d66265a8ac9060fd82b2.jpg)
274
+ Fig. 6. Qualitative results on motion transfer of our method.
275
+ Fig. 7. More results of the animating mesh to video generation task. Our method enables the generation of different styles from the same mesh.
276
+
277
+ ![](images/b015635a124a6b65433c67f58456287662d3934905e05aec3f1e514dc6b7792b.jpg)
278
+ Fig. 8. Qualitative comparison on the animating mesh to video task between our method and CHAMP [Zhu et al. 2024].
279
+
280
+ ![](images/9f3675d45ab9387fa973bbd5c73acb7528a5fcca23e872bb39464eec004f8c2c.jpg)
281
+ Fig. 9. Qualitative results of our method on the object manipulation task. The top part shows the results of translation while the bottom part shows the results of rotating the object.
282
+
283
+ ![](images/bbdd6af3b0ecb4cd2d1f5b4716ca4cdea6ef1ec46548d4467e40231c6476f85a.jpg)
284
+ Fig. 10. Generated videos using depth maps or 3D tracking videos as control signals. Our 3D tracking videos provide better quality on the cross-frame consistency for video generation than depth maps.
285
+
286
+ # 4.5 Analysis
287
+
288
+ We conduct analysis on the choice of 3D control signals, i.e. depth maps or 3D tracking videos, and the number of 3D tracking points. To achieve this, we randomly selected 50 videos from the validation split of the DAVIS [Pont-Tuset et al. 2017] and MiraData [Ju et al. 2024] video dataset. We extract the first-frame images as the input image and apply different models to re-generate these videos. To evaluate the quality of the generated videos, we compute PSNR, SSIM [Wang et al. 2004], LPIPS [Zhang et al. 2018], and FVD [Unterthiner et al. 2019] between the generated videos and the ground-truth videos.
289
+ 4.5.1 Depth maps vs. 3D tracking videos. To illustrate the effectiveness of our 3D tracking videos, we compare DaS with a baseline using depth maps as conditions instead of 3D tracking videos. Specifically, the baseline adopts the same architecture as DaS but replaces the 3D tracking video with a depth map video. We adopt the Depth Pro [Bochkovskii et al. 2024] to generate the video depth video for this baseline method. As shown in Table 3, our model outperforms this baseline in all metrics, demonstrating that the 3D tracking videos provide a better signal for the diffusion model to recover ground-truth videos than the depth map conditions. Figure 10 shows the generated videos, which demonstrate that our method produces more consistent videos with the ground truth. The main reason is that the 3D tracking videos effectively associate different frames of a video while the depth maps only provide some cues of the scene structures without constraining the motion of the video.
290
+ 4.5.2 Point density. In Table 3, we further present an ablation study with varying numbers of 3D tracking points as control signals. The number of 3D tracking points ranges from 900 $(30\times 30)$ to 8100 $(90\times 90)$ . Though the generated videos with 4900 tracking points perform slightly better than the other ones, the visual qualities of 2500, 4900, and 8100 tracking points are very similar to each other. Since tracking too many points with SpatialTracker [Xiao et al. 2024b] would be slow, we choose 4900 as our default setting in all our other experiments using 3D point tracking.
291
+ 4.5.3 Runtime. In the inference stage, we employ the DDIM [Song et al. 2020] sampler with 50 steps, classifier-free guidance of magnitude 7.0, which costs about 2.5 minutes to generate 49 frames on a H800 GPU at a resolution of $480 \times 720$ .
292
+
293
+ ![](images/2d4d25646022fad28faf390587257adabcf5b8713cefd0803e76382238165379.jpg)
294
+ Fig. 11. Failure cases. (Top) Incompatible tracking video. When a tracking video that does not correspond to the structures of the input image is provided, DaS will generate a video with a scene transition to a compatible new scene. (Bottom) Out of tracking range. For regions without 3D tracking points, the tracking video fails to constrain these regions and DaS may generate some uncontrolled content.
295
+
296
+ # 5 LIMITATIONS AND CONCLUSIONS
297
+
298
+ Limitations and future works. Though DaS achieves control over the video generation process in most cases, it still suffers from multiple failure cases mainly caused by incorrect 3Dtracking videos. The first failure case is that the input image should be compatible with the 3D tracking videos. Otherwise, the generated videos would be implausible as shown in Figure 11 (top). Another failure case is that for regions without 3D tracking points, the generated contents may be out-of-control and produce some unnatural results (Figure 11 (bottom)). For future works, we currently rely on provided animated meshes or existing videos to get high-quality 3D tracking videos and a promising direction is to learn to generate these 3D tracking videos with a new diffusion model.
299
+
300
+ Conclusions. In this paper, we introduce Diffusion as Shader (DaS) for controllable video generation. The key idea of DaS is to adopt the 3D tracking videos as 3D control signals for video generation. The 3D tracking videos are constructed from colored dynamic 3D points which represent the underlying 3D motion of the video. Then, diffusion models are applied to generate a video following the motion of the 3D tracking video. We demonstrate that the 3D tracking videos not only improve the temporal consistency of the generated videos but also enable versatile control of the video content, including mesh-to-video generation, camera control, motion transfer, and object manipulation.
301
+
302
+ # REFERENCES
303
+
304
+ Sherwin Bahmani, Ivan Skorokhodov, Guocheng Qian, Aliaksandr Siarohin, Willi Menapace, Andrea Tagliasacchi, David B Lindell, and Sergey Tulyakov. 2024. AC3D: Analyzing and Improving 3D Camera Control in Video Diffusion Transformers. arXiv preprint arXiv:2411.18673 (2024).
305
+ Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. 2023. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127 (2023).
306
+ Aleksei Bochkovskii, Amael Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R Richter, and Vladlen Koltun. 2024. Depth pro: Sharp monocular metric depth in less than a second. arXiv preprint arXiv:2410.02073 (2024).
307
+ Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. 2024. Video generation models as world simulators. https://openai.com/research/video-generation-models-as-world-simulators
308
+ Shengqu Cai, Duygu Ceylan, Matheus Gadelha, Chun-Hao Paul Huang, Tuanfeng Yang Wang, and Gordon Wetzstein. 2024. Generative rendering: Controllable 4d-guided video generation with 2d diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 7611-7620.
309
+ Tianshi Cao, Karsten Kreis, Sanja Fidler, Nicholas Sharp, and Kangxue Yin. 2023. Texfusion: Synthesizing 3d textures with text-guided image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 4169-4181.
310
+ Haoxin Chen, Menghan Xia, Yingqing He, Yong Zhang, Xiaodong Cun, Shaoshu Yang, Jinbo Xing, Yaofang Liu, Qifeng Chen, Xintao Wang, et al. 2023b. Videocrafter1: Open diffusion models for high-quality video generation. arXiv preprint arXiv:2310.19512 (2023).
311
+ Haoxin Chen, Yong Zhang, Xiaodong Cun, Menghan Xia, Xintao Wang, Chao Weng, and Ying Shan. 2024b. Videocrafter2: Overcoming data limitations for high-quality video diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 7310-7320.
312
+ Junsong Chen, Chongjian Ge, Enze Xie, Yue Wu, Lewei Yao, Xiaozhe Ren, Zhongdao Wang, Ping Luo, Huchuan Lu, and Zhenguo Li. 2024a. PIXART-Sigma: Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation. In European Conference on Computer Vision. Springer, 74-91.
313
+ Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. 2023a. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404 (2023).
314
+ Patrick Esser, Johnathan Chiu, Parmida Atighehchian, Jonathan Granskog, and Anastasiis Germanidis. 2023. Structure and content-guided video synthesis with diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 7346-7356.
315
+ Ruoyu Feng, Wenming Weng, Yanhui Wang, Yuhui Yuan, Jianmin Bao, Chong Luo, Zhibo Chen, and Baining Guo. 2024b. CCEdit: Creative and Controllable Video Editing via Diffusion Models. arXiv:2309.16496 [cs.CV] https://arxiv.org/abs/2309.16496
316
+ Wanquan Feng, Tianhao Qi, Jiawei Liu, Mingzhen Sun, Pengqi Tu, Tianxiang Ma, Fei Dai, Songtao Zhao, Siyu Zhou, and Qian He. 2024a. I2VControl: Disentangled and Unified Video Motion Synthesis Control. arXiv preprint arXiv:2411.17765 (2024).
317
+ Daniel Geng, Charles Herrmann, Junhwa Hur, Forrester Cole, Serena Zhang, Tobias Pfaff, Tatiana Lopez-Guevara, Carl Doersch, Yusuf Aytar, Michael Rubinstein, et al. 2024. Motion Prompting: Controlling Video Generation with Motion Trajectories. arXiv preprint arXiv:2412.02700 (2024).
318
+ Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. 2023a. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373 (2023).
319
+ Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. 2023b. TokenFlow: Consistent Diffusion Features for Consistent Video Editing. arXiv:2307.10373 [cs.CV] https://arxiv.org/abs/2307.10373
320
+ Yuwei Guo, Ceyuan Yang, Anyi Rao, Maneesh Agrawala, Dahua Lin, and Bo Dai. 2024. Sparsectrl: Adding sparse controls to text-to-video diffusion models. In ECCV. 330-348.
321
+ Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. 2023. Animatediff: Imagine your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725 (2023).
322
+ Hao Ye, Yinghao Xu, Yuwei Guo, Gordon Wetzstein, Bo Dai, Hongsheng Li, and Ceyuan Yang. 2024b. Cameractrl: Enabling camera control for text-to-video generation. arXiv preprint arXiv:2404.02101 (2024).
323
+ Xuanhua He, Quande Liu, Shengju Qian, Xin Wang, Tao Hu, Ke Cao, Keyu Yan, and Jie Zhang. 2024a. Id-animator: Zero-shot identity-preserving human video generation. arXiv preprint arXiv:2404.15275 (2024).
324
+ Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. 2022. Latent video diffusion models for high-fidelity long video generation. arXiv preprint arXiv:2211.13221 (2022).
325
+
326
+ Jack Hessel, Ari Holtzman, Maxwell Forbes, Ronan Le Bras, and Yejin Choi. 2022. CLIPScore: A Reference-free Evaluation Metric for Image Captioning. arXiv:2104.08718 [cs.CV] https://arxiv.org/abs/2104.08718
327
+ Jonathan Ho, Ajay Jain, and Pieter Abbeel. 2020. Denoising diffusion probabilistic models. NeurIPS (2020).
328
+ Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. 2022. Video diffusion models. Advances in Neural Information Processing Systems 35 (2022), 8633-8646.
329
+ Hsin-Ping Huang, Yu-Chuan Su, Deqing Sun, Lu Jiang, Xuhui Jia, Yukun Zhu, and Ming-Hsuan Yang. 2023. Fine-grained controllable video generation via object appearance and context. arXiv preprint arXiv:2312.02919 (2023).
330
+ Yash Jain, Anshul Nasery, Vibhav Vineet, and Harkirat Behl. 2024. Peekaboo: Interactive video generation via masked-diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8079-8088.
331
+ Hyeonho Jeong, Chun-Hao Paul Huang, Jong Chul Ye, Niloy Mitra, and Duygu Ceylan. 2024. Track4Gen: Teaching Video Diffusion Models to Track Points Improves Video Generation. arXiv preprint arXiv:2412.06016 (2024).
332
+ Xuan Ju, Yiming Gao, Zhaoyang Zhang, Ziyang Yuan, Xintao Wang, Ailing Zeng, Yu Xiong, Qiang Xu, and Ying Shan. 2024. MiraData: A Large-Scale Video Dataset with Long Durations and Structured Captions. arXiv:2407.06358 [cs.CV] https://arxiv.org/abs/2407.06358
333
+ Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. 2023. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 4015-4026.
334
+ Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, et al. 2024. HunyuanVideo: A Systematic Framework For Large Video Generative Models. arXiv preprint arXiv:2412.03603 (2024).
335
+ Mathis Koroglu, Hugo Caselles-Dupré, Guillaume Jeanneret Sanmiguel, and Matthieu Cord. 2024. OnlyFlow: Optical Flow based Motion Conditioning for Video Diffusion Models. arXiv preprint arXiv:2411.10501 (2024).
336
+ Kuaishou. 2024. Keling. https://kling.kuaishou.com/
337
+ Black Forest Labs. 2024. FLUX. https://github.com/black-forest-labs/flux
338
+ Guojun Lei, Chi Wang, Hong Li, Rong Zhang, Yikai Wang, and Weiwei Xu. 2024. **AnimateAnything: Consistent and Controllable Animation for Video Generation. arXiv preprint arXiv:2411.10836 (2024).
339
+ Yaowei Li, Xintao Wang, Zhaoyang Zhang, Zhouxia Wang, Ziyang Yuan, Liangbin Xie, Yuexian Zou, and Ying Shan. 2024. Image conductor: Precision control for interactive video synthesis. arXiv preprint arXiv:2406.15339 (2024).
340
+ Bin Lin, Yunyang Ge, Xinhua Cheng, Zongjian Li, Bin Zhu, Shaodong Wang, Xianyi He, Yang Ye, Shenghai Yuan, Liuhan Chen, et al. 2024. Open-Sora Plan: Open-Source Large Video Generation Model. arXiv preprint arXiv:2412.00131 (2024).
341
+ Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. 2023. SMPL: A skinned multi-person linear model. In Seminal Graphics Papers: Pushing the Boundaries, Volume 2. 851-866.
342
+ Jiahao Lu, Tianyu Huang, Peng Li, Zhiyang Dou, Cheng Lin, Zhiming Cui, Zhen Dong, Sai-Kit Yeung, Wenping Wang, and Yuan Liu. 2024. Align3R: Aligned Monocular Depth Estimation for Dynamic Videos. arXiv preprint arXiv:2412.03079 (2024).
343
+ Wan-Duo Kurt Ma, John P Lewis, and W Bastiaan Kleijn. 2024b. Trailblazer: Trajectory control for diffusion-based video generation. In SIGGRAPH Asia.
344
+ Yue Ma, Yingqing He, Hongfa Wang, Andong Wang, Chenyang Qi, Chengfei Cai, Xiu Li, Zhifeng Li, Heung-Yeung Shum, Wei Liu, et al. 2024a. Follow-your-click: Open-domain regional image animation via short prompts. arXiv preprint arXiv:2403.08268 (2024).
345
+ Tuna Han Salih Meral, Hidir Yesiltepe, Connor Dunlop, and Pinar Yanardag. 2024. MotionFlow: Attention-Driven Motion Transfer in Video Diffusion Models. arXiv preprint arXiv:2412.05275 (2024).
346
+ Chong Mou, Mingdeng Cao, Xintao Wang, Zhaoyang Zhang, Ying Shan, and Jian Zhang. 2024. ReVideo: Remake a Video with Motion and Content Control. arXiv preprint arXiv:2405.13865 (2024).
347
+ Koichi Namekata, Sherwin Bahmani, Ziyi Wu, Yash Kant, Igor Gilitschenski, and David B Lindell. 2024. Sg-i2v: Self-guided trajectory control in image-to-video generation. arXiv preprint arXiv:2411.04989 (2024).
348
+ Pauline C Ng and Steven Henikoff. 2003. SIFT: Predicting amino acid changes that affect protein function. *Nucleic acids research* 31, 13 (2003), 3812-3814.
349
+ Muyao Niu, Xiaodong Cun, Xintao Wang, Yong Zhang, Ying Shan, and Yinqiang Zheng. 2024. Mofa-video: Controllable image animation via generative motion field adoptions in frozen image-to-video diffusion model. in ECCV.
350
+ Geon Yeong Park, Hyeonho Jeong, Sang Wan Lee, and Jong Chul Ye. 2024. Spectral motion alignment for video motion transfer using diffusion models. arXiv preprint arXiv:2403.15249 (2024).
351
+ William Peebles and Saining Xie. 2023a. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 4195-4205.
352
+ William Peebles and Saining Xie. 2023b. Scalable Diffusion Models with Transformers. arXiv:2212.09748 [cs.CV] https://arxiv.org/abs/2212.09748
353
+
354
+ Adam Polyak, Amit Zohar, Andrew Brown, Andros Tjandra, Animesh Sinha, Ann Lee, Apoorv Vyas, Bowen Shi, Chih-Yao Ma, Ching-Yao Chuang, et al. 2024. Movie gen: A cast of media foundation models. arXiv preprint arXiv:2410.13720 (2024).
355
+ Alexander Pondaven, Aliaksandr Siarohin, Sergey Tulyakov, Philip Torr, and Fabio Pizzati. 2024. Video Motion Transfer with Diffusion Transformers. arXiv preprint arXiv:2412.07776 (2024).
356
+ Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alexander Sorkine-Hornung, and Luc Van Gool. 2017. The 2017 DAVIS Challenge on Video Object Segmentation. arXiv:1704.0675 (2017).
357
+ Haonan Qiu, Zhaoxi Chen, Zhouxia Wang, Yingqing He, Menghan Xia, and Ziwei Liu. 2024. Freetraj: Tuning-free trajectory control in video diffusion models. arXiv preprint arXiv:2406.16863 (2024).
358
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning Transferable Visual Models From Natural Language Supervision. arXiv:2103.00020 [cs.CV] https://arxiv.org/abs/2103.00020
359
+ René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. 2020. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE transactions on pattern analysis and machine intelligence 44, 3 (2020), 1623-1637.
360
+ Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, and Daniel Cohen-Or. 2023. Texture: Text-guided texturing of 3d shapes. In ACM SIGGRAPH 2023 conference proceedings. 1-11.
361
+ Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-resolution image synthesis with latent diffusion models. In CVPR.
362
+ Xiaoyu Shi, Zhaoyang Huang, Fu-Yun Wang, Weikang Bian, Dasong Li, Yi Zhang, Manyuan Zhang, Ka Chun Cheung, Simon See, Hongwei Qin, et al. 2024. Motion-2v: Consistent and controllable image-to-video generation with explicit motion modeling. In SIGGRAPH.
363
+ Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. 2021. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems 34 (2021), 19313-19325.
364
+ Jiaming Song, Chenlin Meng, and Stefano Ermon. 2020. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502 (2020).
365
+ Yao Teng, Enze Xie, Yue Wu, Haoyu Han, Zhenguo Li, and Xihui Liu. 2023. Drag-a-video: Non-rigid video editing with point-based interaction. arXiv preprint arXiv:2312.02936 (2023).
366
+ Thomas Unterthiner, Sjoerd van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. 2019. Towards Accurate Generative Models of Video: A New Metric & Challenges. arXiv:1812.01717 [cs.CV] https://arxiv.org/abs/1812.01717
367
+ Jiawei Wang, Yuchen Zhang, Jiaxin Zou, Yan Zeng, Guoqiang Wei, Liping Yuan, and Hang Li. 2024f. Boximator: Generating rich and controllable motions for video synthesis. arXiv preprint arXiv:2402.01566 (2024).
368
+ Ruicheng Wang, Sicheng Xu, Cassie Dai, Jianfeng Xiang, Yu Deng, Xin Tong, and Jiao-long Yang. 2024b. MoGe: Unlocking Accurate Monocular Geometry Estimation for Open-Domain Images with Optimal Training Supervision. arXiv:2410.19115 [cs.CV] https://arxiv.org/abs/2410.19115
369
+ Tianfu Wang, Menelaoos Kanakis, Konrad Schindler, Luc Van Gool, and Anton Obukhov. 2023. Breathing new life into 3d assets with generative repainting. arXiv preprint arXiv:2309.08523 (2023).
370
+ Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. 2024d. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems 36 (2024).
371
+ Yuelei Wang, Jian Zhang, Pengtao Jiang, Hao Zhang, Jinwei Chen, and Bo Li. 2024e. CPA: Camera-pose-awareness Diffusion Transformer for Video Generation. arXiv preprint arXiv:2412.01429 (2024).
372
+ Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. 2004. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing 13, 4 (2004), 600-612. https://doi.org/10.1109/TIP.2003.819861
373
+ Zhouxia Wang, Yushi Lan, Shangchen Zhou, and Chen Change Loy. 2024a. ObjCtrl-2.5 D: Training-free Object Control with Camera Poses. arXiv preprint arXiv:2412.07721 (2024).
374
+ Zhouxia Wang, Ziyang Yuan, Xintao Wang, Yaowei Li, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. 2024c. Motionctrl: A unified and flexible motion controller for video generation. In SIGGRAPH.
375
+ Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. 2024b. SpatialTracker: Tracking Any 2D Pixels in 3D Space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 20406-20417.
376
+ Zeqi Xiao, Wenqi Ouyang, Yifan Zhou, Shuai Yang, Lei Yang, Jianlou Si, and Xingang Pan. 2024a. Trajectory Attention for Fine-grained Video Motion Control. arXiv preprint arXiv:2411.19324 (2024).
377
+
378
+ Jinbo Xing, Menghan Xia, Yong Zhang, Haoxin Chen, Wangbo Yu, Hanyuan Liu, Gongye Liu, Xintao Wang, Ying Shan, and Tien-Tsin Wong. 2024. Dynamicafter: Animating open-domain images with video diffusion priors. In European Conference on Computer Vision. Springer, 399-417.
379
+ Shiyuan Yang, Liang Hou, Haibin Huang, Chongyang Ma, Pengfei Wan, Di Zhang, Xiaodong Chen, and Jing Liao. 2024a. Direct-a-video: Customized video generation with user-directed camera movement and object motion. In ACM SIGGRAPH 2024 Conference Papers. 1-12.
380
+ Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. 2024b. Cogvideo: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072 (2024).
381
+ Danah Yatim, Ralfid Fridman, Omer Bar-Tal, Yoni Kasten, and Tali Dekel. 2024. Spacetime diffusion features for zero-shot text-driven motion transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8466-8476.
382
+ Shengming Yin, Chenfei Wu, Jian Liang, Jie Shi, Houqiang Li, Gong Ming, and Nan Duan. 2023. Dragnuwa: Fine-grained control in video generation by integrating text, image, and trajectory. arXiv preprint arXiv:2308.08089 (2023).
383
+ Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. 2024. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048 (2024).
384
+ Shenghai Yuan, Jina Fuang, Xianyi He, Yunyuan Ge, Yujun Shi, Liuhan Chen, Jiebo Luo, and Li Yuan. 2024. Identity-Preserving Text-to-Video Generation by Frequency Decomposition. arXiv preprint arXiv:2411.17440 (2024).
385
+ Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. 2023. Adding conditional control to text-to-image diffusion models. In ICCV. 3836-3847.
386
+ Qihang Zhang, Shuangfei Zhai, Miguel Angel Bautista, Kevin Miao, Alexander Toshev, Joshua Susskind, and Jiaotao Gu. 2024. World-consistent Video Diffusion with Explicit 3D Modeling. arXiv preprint arXiv:2412.01821 (2024).
387
+ Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. 2018. The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. arXiv:1801.03924 [cs.CV] https://arxiv.org/abs/1801.03924
388
+ Tingyang Zhang, Chen Wang, Zhiyang Dou, Jiahui Lei Qingzhe Gao, Baoquan Chen, and Lingjie Liu. 2025. ProTracker: Probabilistic Integration for Robust and Accurate Point Tracking. arXiv preprint arxiv:2501.03220 (2025).
389
+ Guangcong Zheng, Teng Li, Rui Jiang, Yehao Lu, Tao Wu, and Xi Li. 2024a. Cam12V: Camera-Controlled Image-to-Video Diffusion Model. arXiv preprint arXiv:2410.15957 (2024).
390
+ Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. 2024b. Open-Sora: Democratizing Efficient Video Production for All. https://github.com/hpaitech/Open-Sora
391
+ Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. 2018. Stereo Magnification: Learning View Synthesis using Multiplane Images. In SIGGRAPH.
392
+ Shenhao Zhu, Junming Leo Chen, Zuozhuo Dai, Yinghui Xu, Xun Cao, Yao Yao, Hao Zhu, and Siyu Zhu. 2024. Champ: Controllable and Consistent Human Image Animation with 3D Parametric Guidance. arXiv:2403.14781 [cs.CV]
2501.03xxx/2501.03847/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b9202e360f1fb742e4e1adf9ef676b19f65879d5315c7d6126e64510349506b
3
+ size 1654408
2501.03xxx/2501.03847/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03895/d7c2366b-b4d8-4abe-ac93-634e84e98114_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03895/d7c2366b-b4d8-4abe-ac93-634e84e98114_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03895/d7c2366b-b4d8-4abe-ac93-634e84e98114_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57b0cf3d0cfc5e24848d5239a63b391c8a1f669ae3b2ced294cf9c7a1f13fc6f
3
+ size 8842183
2501.03xxx/2501.03895/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03895/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d943f1c15ddd08359574840971b62ce4cd48b73b052f8b4e37242acdfafcc0a
3
+ size 1359605
2501.03xxx/2501.03895/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03931/7c87c34f-6034-4a6a-95cf-83d77cdef7f4_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03931/7c87c34f-6034-4a6a-95cf-83d77cdef7f4_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03931/7c87c34f-6034-4a6a-95cf-83d77cdef7f4_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1709c2dd70be0624db767390188af86938fbe62524230561339c7cb28fe75449
3
+ size 29780949
2501.03xxx/2501.03931/full.md ADDED
@@ -0,0 +1,753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MagicMirror: ID-Preserved Video Generation in Video Diffusion Transformers
2
+
3
+ Yuechen Zhang $^{1}$ Yaoyang Liu $^{2}$ Bin Xia $^{1}$ Bohao Peng $^{1}$ Zexin Yan $^{3}$ Eric Lo $^{1}$ Jiaya Jia $^{1,2,4}$ $^{1}$ CUHK ${}^{2}$ HKUST ${}^{3}$ CMU ${}^{4}$ SmartMore
4
+
5
+ Figure 1. MagicMirror generates text-to-video results given the ID reference image. Complete videos are available in https:// julianjuaner.github.io/projects/MagicMirror/.
6
+
7
+ # Abstract
8
+
9
+ We present MagicMirror, a framework for generating identity-preserved videos with cinematic-level quality and dynamic motion. While recent advances in video diffusion models have shown impressive capabilities in text-to-video generation, maintaining consistent identity while producing natural motion remains challenging. Previous methods either require person-specific fine-tuning or struggle to balance identity preservation with motion diversity. Built upon Video Diffusion Transformers, our method introduces three key components: (1) a dual-branch facial feature extractor that captures both identity and structural features, (2) a lightweight cross-modal adapter with Conditioned Adaptive Normalization for efficient identity integration, and (3) a two-stage training strategy combining synthetic identity pairs with video data. Extensive experiments demonstrate that MagicMirror effectively balances identity consistency with natural motion, outperforming existing methods across multiple metrics while requiring minimal parameters added. The code and model will be made publicly available.
10
+
11
+ # 1. Introduction
12
+
13
+ Recent advancements in image generation, particularly through Diffusion Models [4, 22, 48, 51], have propelled
14
+
15
+ personalized content creation to the forefront of computer vision research. While significant progress has been made in preserving personal identity (ID) in image generation [5, 19, 24, 34, 44, 56], achieving comparable fidelity in video generation remains challenging.
16
+
17
+ Existing ID-preserving video generation methods show promising results but face limitations. Approaches like Magic-Me and ID-Animator [20, 39], utilizing inflated UNets [18] for fine-tuning or adapter training, demonstrate some success in maintaining identity across frames. However, they are ultimately restricted by the generation model's inherent capabilities, often failing to produce high-quality videos (see Fig. 2). These approaches make a static copy-and-paste instead of generating dynamic facial motions. The co-current work ConsisID [69], implements identity-preserving video generation via a Diffusion Transformer (DiT) framework. While achieving notable performance in video quality, it still exhibits limitations in temporal smoothness and naturalistic facial motions. Another branch of methods combines image personalization methods with Image-to-Video (I2V) generation [60, 63, 64]. While these two-stage solutions preserve ID to some extent, they often struggle with stability in longer sequences and require a separate image generation step. To address current shortcomings, we present MagicMirror, a single-stage framework designed to generate high-quality videos while
18
+
19
+ maintaining strong ID consistency and dynamic natural facial motions. Our approach leverages native video diffusion models [64] to generate ID-specific videos, aiming to empower individuals as protagonists in their virtual narratives, and bridge the gap between personalized ID generation and high-quality video synthesis.
20
+
21
+ The generation of high-fidelity identity-preserving videos poses several technical challenges. A primary challenge stems from the architectural disparity between image and video generation paradigms. State-of-the-art video generation models, built on full-attention Diffusion Transformer (DiT) architectures [42, 64], are not directly compatible with conventional cross-attention conditioning methods. To bridge this gap, we introduce a lightweight identity-conditioning adapter integrated into the Video DiT framework. Specifically, we propose a dual-branch facial embedding that simultaneously preserves high-level identity features and reference-specific structural information. Meanwhile, we observed that current video foundation models optimize for text-video alignment, often at the cost of spatial fidelity and generation quality. This trade-off manifests in reduced image quality metrics on benchmarks such as VBench [25], particularly introducing the challenge of preserving fine-grained identity features. To address it, we develop a Conditioned Adaptive Normalization (CAN) that effectively incorporates identity conditions, as a distribution prior, into the pre-trained foundation model. The CAN module, combined with a learnable cross-attention, enables identity conditioning through attention guidance and feature distribution guidance.
22
+
23
+ Another significant challenge lies in the acquisition of high-quality training data. While paired image data with consistent identity is relatively abundant, obtaining high quality [66] image-video pairs that maintain high-fidelity identity consistency remains scarce. To address this limitation, we develop a strategic data synthesis pipeline that leverages identity preservation models [34] to generate paired training data. Our training methodology employs a progressive approach: initially pre-training on image data to learn robust identity representations, followed by videospecific fine-tuning. This two-stage strategy enables effective learning of identity features while ensuring temporal consistency in facial expressions across video sequences.
24
+
25
+ We evaluate our method on multiple general metrics by constructing a human-centric video generation test set, and comparing it with the aforementioned competitive ID-preserved video generation methods. Extensive experimental and visual evaluations [54] demonstrate that our approach successfully generates high-quality videos with dynamic content and strong facial consistency, as illustrated in Fig. 1. By integrating identity preservation with natural facial motion in Video DiT frameworks without case-specific fine-tuning, MagicMirror advances personalized
26
+
27
+ ![](images/a6abba123817bb2f354e3b23f589e7406edd7dc06ec20762efc5ed1104e9b170.jpg)
28
+ Video ocean (close-sourced)
29
+
30
+ ![](images/5d3fe1b5ba36ce7c1a9c7fe7c3c50fd4b58ba7722e5ee5798f4ff1a176b9c034.jpg)
31
+
32
+ ![](images/0bfb86b63c5bf849870f6f5ab42fdac4d5e11092d1966d85a5d9ef7de90ee675.jpg)
33
+ ID-Preserving | Prompt Following|Motion
34
+
35
+ ![](images/223d07000d9381f8913f0ce2519124f0ad8cf1cc979ef90f2588c34012ef6ccf.jpg)
36
+ Magic Mirror (Ours)
37
+ ID-Preserving | Prompt Following|Motion
38
+ Figure 2. MagicMirror generates dynamic facial motion. ID-Animator [20] and Video Ocean [37] exhibit limited motion range due to a strong identity-preservation constraint. MagicMirror achieves more dynamic facial expressions while maintaining reference identity fidelity.
39
+
40
+ video generation and enhances creative expression in digital storytelling.
41
+
42
+ Our main contributions are three-fold: (1) We introduce MagicMirror, a novel fine-tuning free framework with a dual-branch condition extractor for generating ID-preserving videos; (2) We design a lightweight adapter with a conditioned adaptive normalization module, for effective integration of identity features in full-attention Diffusion Transformer architectures; (3) We develop a dataset construction method that combines synthetic data generation with a progressive training strategy to address data scarcity challenges in personalized video generation.
43
+
44
+ # 2. Related Works
45
+
46
+ Diffusion Models. Since the introduction of DDPM [22], diffusion models have demonstrated remarkable capabilities across diverse domains, spanning NLP [16, 32], medical imaging [6, 7], and molecular modeling [8, 26]. In computer vision, following initial success in image generation [11, 29], Latent Diffusion Models (LDM) [48] significantly reduced computational requirements while maintaining generation quality. Subsequent developments in conditional architectures [40, 50] enabled fine-grained concept customization over the generation process.
47
+
48
+ Video Generation via Diffusion Models. Following the emergence of diffusion models, their superior controllability and diversity in image generation [62] have led to their prominence over traditional approaches based on GANs [17, 27, 28] and auto-regressive Transformers [13, 46, 67]. The Video Diffusion Model (VDM) [23] pioneered video generation using diffusion models by extending the traditional U-Net [49] architecture to process
49
+
50
+ temporal information. Subsequently, LVDM [21] demonstrated the effectiveness of latent space operations, whileAnimateDiff [18] adapted text-to-image models for personalized video synthesis. A significant advancement came with the Diffusion Transformer (DiT) [42], which successfully merged Transformer architectures [12, 55] with diffusion models. Building on this foundation, Latte [38] emerged as the first open-source text-to-video model based on DiT. Following the breakthrough of SORA [41], several open-source initiatives including Open-Sora-Plan [35], Open-Sora [73], and CogVideoX [64] have advanced video generation through DiT architectures. While current research predominantly focuses on image-to-video translation [60, 63, 70] efficiency, and motion control [59, 61, 68, 72], the critical challenge of ID-preserving video generation remains relatively unexplored.
51
+
52
+ ID-Preserving Generation. ID-preserving generation, or identity customization, aims to maintain specific identity characteristics in generated images or videos. Initially developed in the GAN era [17] with significant advances in face generation [27, 47, 58], this field has evolved substantially with diffusion models, demonstrating enhanced capabilities in novel image synthesis [14, 50]. Current approaches to ID-preserving image generation fall into two main categories:
53
+
54
+ Tuning-based Models: These approaches fine-tune models using one or more reference images to generate identity-consistent outputs. Notable examples include Textual Inversion [14] and Dreambooth [50].
55
+
56
+ Tuning-free Models: Addressing the computational overhead problem, these models maintain high ID fidelity through additional conditioning and trainable parameters. Starting with IP-adapter [65], various methods such as InstantID, PhotoMaker [5, 19, 24, 34, 44, 56] have emerged to enable efficient and high quality personalized generation.
57
+
58
+ ID-preserving video generation introduces additional complexities, particularly in synthesizing realistic facial movements from static references while maintaining identity consistency. Current approaches include MagicMe [39], a tuning-based method requiring per-identity optimization, and ID-Animator [20], a tuning-free approach utilizing face adapters and decoupled Cross-Attention [65]. However, these methods face challenges in maintaining dynamic expressions while preserving identity, and are constrained by base model limitations in video quality, duration, and prompt adherence. The integration of Diffusion Transformers presents promising opportunities for advancing ID-preserving video generation. ConsisID [69] is the co-current work with the DiT architecture, but it faces the problem of video smoothness and face dynamics.
59
+
60
+ # 3. MagicMirror
61
+
62
+ An overview of MagicMirror is illustrated in Fig. 3. This dual-branch framework (Sec. 3.2) extracts facial identity features from one or more reference images $r$ . They are subsequently processed through a DiT backbone augmented with a lightweight cross-modal adapter, incorporating Conditioned Adaptive Normalization (Sec. 3.3). This architecture enables MagicMirror to synthesize identity-preserved text-to-video outputs. The following sections elaborate on the preliminaries of diffusion models (Sec. 3.1) and each component of our method.
63
+
64
+ # 3.1. Preliminaries
65
+
66
+ Latent Diffusion Models (LDMs) generate data by iteratively reversing a noise corruption process, converting random noise into structured samples. At time step $t \in \{0, \dots, T\}$ , the model predicts latent state $x_{t}$ conditioned on $x_{t+1}$ :
67
+
68
+ $$
69
+ p _ {\theta} \left(x _ {t} \mid x _ {t + 1}\right) = \mathcal {N} \left(x _ {t}; \widetilde {\mu} _ {t}, \widetilde {\beta} _ {t} I\right), \tag {1}
70
+ $$
71
+
72
+ where $\theta$ represents the model parameters, $\widetilde{\mu}_t$ denotes the predicted mean, and $\widetilde{\beta}_t$ is the variance schedule.
73
+
74
+ The training objective typically employs a mean squared error loss $\mathcal{L}_{\mathrm{noise}}$ on the noise prediction $\epsilon_{\theta}(x_t,t,c_{\mathrm{txt}})$ :
75
+
76
+ $$
77
+ \mathcal {L} _ {\text {n o i s e}} = \mathbb {E} _ {t, c _ {\mathrm {t x t}}, \epsilon \sim \mathcal {N} (0, 1)} \left[ \| \epsilon - \epsilon_ {\theta} \left(x _ {t}, t, c _ {\mathrm {t x t}}\right) \| ^ {2} \right], \tag {2}
78
+ $$
79
+
80
+ where $c_{\mathrm{txt}}$ denotes the text condition.
81
+
82
+ Recent studies on controllable generation [43, 56, 65, 71] extend this framework by incorporating additional control signals, such as image condition $c_{\mathrm{img}}$ . This is achieved through a feature extractor $\tau_{\mathrm{img}}$ that processes a reference image $r$ : $c_{\mathrm{img}} = \tau_{\mathrm{img}}(r)$ . Consequently, the noise prediction function in Eq. (2) becomes $\epsilon_{\theta}(x_t, t, c_{\mathrm{xt}}, c_{\mathrm{img}})$ . In this paper, we denote the additional facial condition by $c_{\mathrm{face}}$ .
83
+
84
+ # 3.2. Dual-Branch Facial Feature Extraction
85
+
86
+ The facial feature extraction module in MagicMirror, depicted in the left part of Fig. 3, is designed to extract high-level identity information and detailed structural features. Given an identity reference image $r \in \mathbb{R}^{h \times w \times 3}$ , our model extracts facial condition embeddings $c_{\mathrm{face}} = \{x_{\mathrm{face}}, \hat{x}_{\mathrm{id}}\}$ using a dual-branch perceiver architecture, where each branch specializes in a distinct aspect of facial representation.
87
+
88
+ To obtain these embeddings, we first extract dense feature maps $\mathbf{f} = F_{\mathrm{feat}}(r)$ from the input image, where $F_{\mathrm{feat}}$ is a pre-trained CLIP ViT encoder [45] that captures rich facial semantics. The identity branch employs an identity perceiver $\tau_{\mathrm{id}}$ to extract high-level identity features:
89
+
90
+ $$
91
+ x _ {\mathrm {i d}} = \tau_ {\mathrm {i d}} \left(q _ {\mathrm {i d}}, \mathbf {f}\right), \tag {3}
92
+ $$
93
+
94
+ where $q_{\mathrm{id}} = F_{\mathrm{id}}(r)$ represents high-level identity-aware features extracted by an ArcFace encoder $F_{\mathrm{id}}$ [9, 34].
95
+
96
+ The structural branch utilizes a facial structure perceiver $\tau_{\mathrm{face}}$ that focuses on fine-grained facial details, leveraging a learnable query embedding $q_{\mathrm{face}}$ for facial structure
97
+
98
+ ![](images/226f0fd05f6f56171457d13b87efae3e46a2feccec6c591df2709a223eba7bdb.jpg)
99
+ Figure 3. Overview of MagicMirror. The framework employs a dual-branch feature extraction system with ID and face perceivers, followed by a cross-modal adapter (illustrated in Fig. 4) for DiT-based video generation. By optimizing trainable modules marked by the flame, our method efficiently integrates facial features for controlled video synthesis while maintaining model efficiency.
100
+
101
+ extraction:
102
+
103
+ $$
104
+ x _ {\text {f a c e}} = \tau_ {\text {f a c e}} \left(q _ {\text {f a c e}}, \mathbf {f}\right). \tag {4}
105
+ $$
106
+
107
+ Both perceivers $\tau_{\mathrm{face}}$ and $\tau_{\mathrm{id}}$ implement the standard Q-Former architecture [31] with distinct query conditions in Eqs. (3) and (4). The identity branch and structural branch serve complementary roles in ensuring high-quality ID-preserving video generation. The identity branch maintains consistent identity features across frames, guaranteeing that the generated video preserves the subject's identity throughout the sequence. Conversely, the structural branch captures fine-grained facial details and facilitates dynamic natural facial motions, effectively preventing structural collapse during movement.
108
+
109
+ To control these aspects effectively, we employ distinct feature injection mechanisms. Identity information is incorporated into the text representation, building upon recent advancements in personalized text-to-image generation [34, 50]. Specifically, identity embeddings are projected into the text embedding space via a fusion MLP $\mathrm{MLP}_{\mathrm{fuse}}$ , $\hat{x}_{\mathrm{id}} = \mathrm{MLP}_{\mathrm{fuse}}(x_{\mathrm{id}}, \mathbf{m}x_{\mathrm{txt}})$ , where $x_{\mathrm{txt}}$ denotes the input textual prompt embedding, $\mathbf{m}$ is a token-level binary mask that selectively applies fusion at identity-relevant tokens (e.g., "man", "woman") within $x_{\mathrm{txt}}$ . This process results in a fused id representation $\hat{x}_{\mathrm{id}}$ . The final adapted text embedding for DiT is computed as a masked replacement:
110
+
111
+ $$
112
+ \hat {x} _ {\mathrm {t x t}} = \mathbf {m} \hat {x} _ {\mathrm {i d}} + (1 - \mathbf {m}) x _ {\mathrm {t x t}}, \tag {5}
113
+ $$
114
+
115
+ Meanwhile, the structural embedding $x_{\mathrm{face}}$ is utilized as direct conditioning signals to guide the generative process.
116
+
117
+ # 3.3. Conditioned Adaptive Normalization
118
+
119
+ Having obtained the decoupled ID-aware conditions $c_{\mathrm{face}}$ , we address the challenge of efficiently integrating these conditions into the video diffusion transformer. Traditional Latent Diffusion Models, as exemplified by Stable Diffusion [48], utilize isolated cross-attention mechanisms for condition injection, which allow for straightforward adaptation to new conditions via decoupled cross-attention [19, 65]. However, our framework is based on mm-DiTs [64],
120
+
121
+ which implements a cross-modal full-attention paradigm coupled with layer-wise distribution modulation experts. This architectural choice introduces additional complexity in adapting to new conditions beyond simple cross-attention augmentation. Under this constraint, we find that modulation layers, despite requiring extremely few parameters, play a crucial role in learning the data distribution. This point is discussed experimentally in the Appendix B.2.
122
+
123
+ Leveraging mm-DiT's layer-wise modulation [64], we propose a lightweight adapter that incorporates additional facial conditions. As illustrated in Fig. 4, facial embedding $x_{\mathrm{face}}$ in Eq. (4) is concatenated with text and video features $(x_{\mathrm{txt}}$ and $x_{\mathrm{vid}}$ ) to feed into the full self-attention. CogVideoX employs modal-specific modulation, where factors $m_{\mathrm{vid}}$ and $m_{\mathrm{txt}}$ are applied to their respective modalities through adaptive normalization, where modulation factors are extracted from MLP extractors $\varphi_{\{\mathrm{txt}, \mathrm{vid}\}}$ . To accommodate the facial modality, we introduce a dedicated adaptive normalization module, normalizing facial features preceding the self-attention and feed-forward network (FFN). The corresponding set of modulation factors for the facial modality $m_{\mathrm{face}}$ is computed by a MLP $\varphi_{\mathrm{face}}$ :
124
+
125
+ $$
126
+ m _ {\text {f a c e}} = \left\{\mu_ {\text {f a c e}} ^ {1}, \sigma_ {\text {f a c e}} ^ {1}, \gamma_ {\text {f a c e}} ^ {1}, \mu_ {\text {f a c e}} ^ {2}, \sigma_ {\text {f a c e}} ^ {2}, \gamma_ {\text {f a c e}} ^ {2} \right\} = \varphi_ {\text {f a c e}} (\mathbf {t}, l), \tag {6}
127
+ $$
128
+
129
+ where $\mathbf{t}$ denotes the time embedding and $l$ represents the layer index. Here, $\mu$ is the shift parameter to adjust the feature mean, $\sigma$ is the scale parameter to modulating the feature amplitude, and $\gamma$ is the gating parameter to control the influence of the modulation. Within each block, let $\phi^n$ denote the in-block operations—where $\phi^1$ represents the self-attention operation and $\phi^2$ represents the FFN. The feature transformation after operation $n$ is computed as: $\bar{x}^n = x^{n - 1} * (1 + \sigma^n) + \mu^n$ , then $x^n = \bar{x}^n + \gamma^n \phi^n (\bar{x}^n)$ , with modality-specific subscripts omitted for brevity.
130
+
131
+ Furthermore, we explore whether layer-wise distribution conditioned by ID features would make ID injection more effective. To enhance the distribution learning capability of text and video latents from specific reference IDs, we
132
+
133
+ ![](images/f3aeb58cd33de85456f738dccd443ca17f4ef512395cecaf50fe55bfefb76cd8.jpg)
134
+
135
+ ![](images/1780769790e86bd402b546a0026f6e5098f7cb8814cbed8b955dababfad62d77.jpg)
136
+
137
+ ![](images/5986f2af4f326ef84b31bd0134cafdd7bc6a0ea6d11c1c5658baa1a87f6ba01d.jpg)
138
+ Figure 4. Cross-modal adapter in DiT blocks. Top: Cross-modal modulation in mmDiTs. Bottom: The Conditioned Adaptive Normalization (CAN) for modal-specific feature modulation and decoupled attention integration.
139
+
140
+ introduce Conditioned Adaptive Normalization (CAN), inspired by class-conditioned DiT [42] and StyleGAN's [27] approach to control with conditions. Based on $\varphi_{\{\mathrm{txt},\mathrm{vid}\}}$ , CAN predicts distribution shifts for video and text modalities with a trainable MLP $\varphi_{\mathrm{cond}}$ :
141
+
142
+ $$
143
+ \hat {m} _ {\mathrm {v i d}}, \hat {m} _ {\mathrm {t x t}} = \varphi_ {\text {c o n d}} (\mathbf {t}, l, \mu_ {\mathrm {v i d}} ^ {1}, x _ {\mathrm {i d}}). \tag {7}
144
+ $$
145
+
146
+ Here, $\mu_{\mathrm{vid}}^{1}$ acts as a distribution identifier for a better initialization of the CAN module, and $x_{\mathrm{id}}$ from Eq. (3) represents the identity distribution prior. The final modulation factors are computed via residual addition: $m_{\mathrm{vid}} = \hat{m}_{\mathrm{vid}} + \varphi_{\mathrm{vid}}(\mathbf{t},l), m_{\mathrm{txt}} = \hat{m}_{\mathrm{txt}} + \varphi_{\mathrm{txt}}(\mathbf{t},l)$ .
147
+
148
+ Complementing the conditioned normalization, we augment the joint full self-attention $T_{\mathrm{SA}}$ with a cross-attention mechanism $T_{\mathrm{CA}}$ [20, 65] to further enhance the aggregation of ID modality features. The final attention output is computed as:
149
+
150
+ $$
151
+ x _ {\text {o u t}} = T _ {\mathrm {S A}} \left(W _ {q} \left(x _ {\text {f u l l}}\right), W _ {k v} \left(x _ {\text {f a c e}}\right)\right) + T _ {\mathrm {C A}} \left(W _ {q} \left(x _ {\text {f u l l}}\right), \hat {W} _ {k v} \left(x _ {\text {f a c e}}\right)\right), \tag {8}
152
+ $$
153
+
154
+ where $x_{\mathrm{full}}$ denotes the complete aggregated feature representation produced by concatenating or integrating text, video, and facial features from preceding layers. $T_{\mathrm{SA}}$ and $T_{\mathrm{CA}}$ utilize the same query projection $W_{q}(x_{\mathrm{full}})$ , while the key-value projections $\hat{W}_{kv}$ in cross-attention are re-initialized and trainable.
155
+
156
+ # 3.4. Data and Training
157
+
158
+ Training a zero-shot customization adapter presents unique data challenges compared to fine-tuning approaches, like Magic-Me [39]. Our model's full-attention architecture, which integrates spatial and temporal components inseparably, necessitates a two-stage training strategy. As shown in Fig. 5, we begin by training on diverse, high-quality datasets to develop robust identity preservation capabilities.
159
+
160
+ Our progressive training pipeline leverages diverse datasets to enhance model performance, particularly in identity preservation. For image pre-training, we first utilize the LAION-Face [53] dataset, which contains web-scale real images and provides a rich source for generating self-reference images. To further increase identity diversity, we utilize the SFHQ [3] dataset, which applies self-reference techniques with standard text prompts. To prevent overfitting and promote the generation of diverse face-head motion, we use the FFHQ [27] dataset as a base. From this, we random sample text prompts from a prompt pool of human image captions, and synthesize ID-conditioned image pairs using PhotoMaker-V2 [34], ensuring both identity similarity and facial motion diversity through careful filtering.
161
+
162
+ For video post-training, we leverage the high-quality Pexels and Mixkit datasets [1, 2], along with a small collection of self-collected videos from the web. Similarly, synthesized image data corresponding to each face reference of keyframes are generated as references. The combined dataset offers rich visual content for training the model across images and videos.
163
+
164
+ The objective function combines identity-aware and general denoising loss: $\mathcal{L} = \mathcal{L}_{\mathrm{noise}} + \lambda (1 - \cos (q_{\mathrm{face}},D(x_0)))$ where $D(\cdot)$ represents the latent decoder for the denoised latent $x_0$ , and $\lambda$ is the balance factor. Following PhotoMaker [34], we compute the denoising loss specifically within the face area for $50\%$ of random training samples.
165
+
166
+ # 4. Experiments
167
+
168
+ # 4.1. Implementation Details
169
+
170
+ Dataset preparation. As illustrated in Fig. 5, our training pipeline leverages both self-referenced and synthetically paired image data [3, 27, 53] for identity-preserving alignment in the initial training phase. For synthetic data pairs (denoted as C and D in Fig. 5), we employ ArcFace [9] for facial recognition and detection to extract key attributes including age, bounding box coordinates, gender, and facial embeddings. Reference frames are then generated using PhotoMakerV2 [34]. We implement a quality control process by filtering image pairs $\{a,b\}$ based on their facial embedding cosine similarity, retaining pairs where $\cos(q_{\mathrm{face}}^a, q_{\mathrm{face}}^b) > 0.65$ , $q_{\mathrm{face}}$ means the facial embedding. For text conditioning, we utilize MiniGemini-8B [33] to caption all video data, to form a diverse prompt pool con
171
+
172
+ <table><tr><td>Models</td><td>Dynamic Degree↑</td><td>Text Alignment↑</td><td>Inception Score↑</td><td>Motion Smoothness↓</td><td>Average ID Similarity↑</td><td>Similarity Decay↓</td><td>Face Motion FMref↑</td><td>Face Motion FMinter↑</td><td>Overall Preference↑</td></tr><tr><td>DynamiCrafter [60]</td><td>0.455</td><td>0.168</td><td>8.20</td><td>0.507</td><td>0.896</td><td>0.002</td><td>0.237</td><td>0.287</td><td>5.402</td></tr><tr><td>EasyAnimate-I2V [63]</td><td>0.155</td><td>0.177</td><td>9.55</td><td>0.482</td><td>0.903</td><td>0.022</td><td>0.262</td><td>0.278</td><td>5.935</td></tr><tr><td>CogVideoX-I2V [64]</td><td>0.660</td><td>0.213</td><td>9.85</td><td>0.497</td><td>0.901</td><td>0.029</td><td>0.413</td><td>0.532</td><td>6.985</td></tr><tr><td>ID-Animator [20]</td><td>0.140</td><td>0.211</td><td>7.57</td><td>0.515</td><td>0.923</td><td>0.005</td><td>0.652</td><td>0.181</td><td>5.693</td></tr><tr><td>ConsisID [69]</td><td>0.615</td><td>0.236</td><td>11.09</td><td>0.513</td><td>0.913</td><td>0.002</td><td>0.652</td><td>0.601</td><td>6.640</td></tr><tr><td>MagicMirror</td><td>0.705</td><td>0.240</td><td>10.59</td><td>0.484</td><td>0.922</td><td>0.002</td><td>0.730</td><td>0.610</td><td>7.315</td></tr></table>
173
+
174
+ Table 1. Quantitative comparisons. We report results with Image-to-Video and ID-preserved models. ID similarities are evaluated on the corresponding face-enhanced prompts to avoid face missing caused by complex prompts. Arrows indicate the direction of improved performance for each metric. We highlight the best and the second best results for each metric.
175
+
176
+ ![](images/17feac24bd203c33f02fb83107932a1c39cc89ba2abc96fd589d8a7242975227.jpg)
177
+ Figure 5. Overview of our training datasets. The pipeline includes image pre-training data (A-C) and video post-training data (D). We utilize both self-reference data (A, B) and filtered synthesized pairs with the same identity (C, D). Numbers of (images + synthesized images) are reported.
178
+
179
+ taining 29K prompts, while CogVLM [57] provides video descriptions in the second training stage. Detailed data collection procedures are provided in Appendix A.1.
180
+
181
+ Training Details. Our MagicMirror framework extends CogVideoX-5B [64] by integrating facial-specific modal adapters into alternating DiT layers (i.e., adapters in all layers with even index $l$ ). We adopt the feature extractor $F_{\mathrm{feat}}$ and ID perceiver $\tau_{\mathrm{id}}$ from a pre-trained PhotoMakerV2 [34]. In the image pre-train stage, we optimize the adapter components for 30K iterations using a global batch size of 64. Subsequently, we perform video fine-tuning for 5K iterations with a batch size of 8 to enhance temporal consistency in video generation. Both phases employ a decayed learning rate starting from $10^{-5}$ . All experiments were conducted on a single compute node with 8 NVIDIA A800 GPUs.
182
+
183
+ Evaluation and Comparisons. We evaluate our approach against the state-of-the-art ID-consistent video generation model ID-Animator [20], ConsisID [69] and leading Image-to-Video (I2V) frameworks, including DynamiCrafter [60],
184
+
185
+ CogVideoX-I2V [64], and EasyAnimate [63]. Our evaluation leverages standardized VBench [25], for video generation assessment that measures motion quality and text-motion alignment. For identity preservation, we utilize facial recognition embedding similarity [15] and facial motion metrics. Our evaluation dataset consists of 40 single-character prompts from VBench, ensuring demographic diversity, and 40 action-specific prompts for motion assessment. Identity references are sampled from 50 face identities from PubFig [30], generating four personalized videos per identity across varied prompts.
186
+
187
+ # 4.2.Quantitative Evaluation
188
+
189
+ The quantitative results are summarized in Tab. 1. We evaluate generated videos using VBench's and EvalCrafter's general metrics [25, 36], including dynamic degree, text-prompt consistency, and Inception Score [52] for video quality assessment. We also evaluate on smoothness using cross-frame optical flow consistency. For identity preservation, we introduce Average Similarity, measuring the distance between generated faces and the average similarity of a group of reference images with the same identity. This prevents models from achieving artificially high scores through naive copy-paste behavior, as illustrated in Fig. 2. Face motion is quantified using two metrics: $\mathrm{FM}_{\mathrm{ref}}$ (relative distance to the reference face) and $\mathrm{FM}_{\mathrm{inter}}$ (inter-frame distance), computed using RetinaFace [10] landmarks after position alignment, and the L2 distance between the normalized coordinates is reported as the metric.
190
+
191
+ Our method achieves superior facial similarity compared to I2V approaches while maintaining competitive performance to ID-Animator and ConsisID. We demonstrate strong text alignment, video quality, and dynamic performance, attributed to our decoupled facial feature extraction and cross-modal adapter with CAN.
192
+
193
+ Besides, we analyze facial similarity drop across uniformly sampled frames from each video to assess temporal identity consistency, reporting as the similarity decay term in Tab. 1. Standard I2V models (CogVideoX-I2V [64], EasyAnimate [63]) exhibit significant temporal decay in identity preservation. Although DynamiCrafter [60] shows better stability due to its random reference strategy, it com
194
+
195
+ ![](images/68905b9f71f291d880e3720f1720c9ab748356300eddad599ef1eb1b4d669226.jpg)
196
+ An elderly man, with a determined expression, stands in a sunlit gym, wearing a gray tank top, his muscles taut as he grips a heavy kettlebell. The room is filled with natural light streaming through large windows, casting shadows on the polished wooden floor. His face shows concentration and strength, highlighting his commitment to fitness. As he lifts the kettlebell with steady hands, the camera captures the sweat glistening on his brow, emphasizing his effort and resilience. The background features neatly arranged gym equipment, adding to the atmosphere of dedication and perseverance.
197
+
198
+ ![](images/cf3aabbf8d991db1d6e50b89568b2ce14328ce7e63c73b8e883083bb8b4bc12c.jpg)
199
+
200
+ ![](images/d7bf0048d63a239eb8be22c56cbac44b5a48f5296bc234c5e34d19b7991da2f3.jpg)
201
+
202
+ ![](images/b89049296d31fd1f1236240c1f33a80f1043e3658ce5066811aba34adbdd3b45.jpg)
203
+
204
+ ![](images/f63d4002bd01082bb8f43ccdb55b7f2fabad49e61915975a6ace0e08bd4a51e9.jpg)
205
+ A serene woman, dressed in a cozy oversized sweater and jeans, kneels on a lush green meadow, gently petting a friendly golden retriever. The dog's tail wags enthusiastically, its fur gleaming in the soft sunlight. Her face lights up with a warm smile as her hand moves tenderly over the dog's head and back. In the background, a picturesque landscape of rolling hills and blooming wildflowers enhances the tranquil scene. The golden retriever, with its tongue lolling out and eyes full of affection, leans into her touch, creating a heartwarming moment of connection and joy.
206
+
207
+ ![](images/9ebf5738155546f132955cdcda1b62ff016b45133fbe5e20c1a70322941029ec.jpg)
208
+
209
+ ![](images/504530b215d7edaa0fff3263cda953b818bc43ff580758b46ffb86efa1034eab.jpg)
210
+
211
+ ![](images/9c396b026d1a5c3fbbe2453cda37647f65cffd4c96e4e8199468cd71058d88e0.jpg)
212
+
213
+ ![](images/19ebff5f56b257e9ad273a9e0561d92afa204f3c87515dfcb373269c0c291972.jpg)
214
+
215
+ ![](images/a03a73dbbca6ed6283866a10c00ab078ea3892aa0ba6c50e64246f8976a3a20c.jpg)
216
+
217
+ ![](images/124dfb340605b9062b5103d393290b3410dda084c3af089ac18a292b3eb41d63.jpg)
218
+
219
+ ![](images/bb5efd76e2fd82eaaad90f6225e023a7b80401a371e4a1dd06a52d80bb3d92c3.jpg)
220
+
221
+ ![](images/1931a4c2c303bb8d0be12b83364ba1f4d1322375096b7000b494057331a267b4.jpg)
222
+
223
+ ![](images/ae6d26229a3426c0a4389fc0db31316d14377cf402636e13e296c8a7d961c6b8.jpg)
224
+
225
+ ![](images/912a9a632a0cd48a5126b36f48b7ef17a4b9f5ddf64b797d95674a1e82e0ea78.jpg)
226
+
227
+ ![](images/3e7e91de5f9d18f37245564ac4ca634f4efebbdd08e6838c2f7879d3771e531c.jpg)
228
+
229
+ ![](images/52a5f394c07aab83b710025f2caa6f530d45fb48e0e49fde7050f0bbe6301866.jpg)
230
+
231
+ ![](images/3eb4358bb91f1b586c502f7fe0be7172ea79a147383a8281219d45faa963ba7a.jpg)
232
+
233
+ ![](images/240f356a3c2cab425efb11f1113c4dd48d93e87112f2e2e0fd1db9d2426334b7.jpg)
234
+
235
+ ![](images/8267956dc07ac13b14de13ea4917bf95c996e7850a38b7040441d1eb1ebf7d48.jpg)
236
+
237
+ ![](images/018a3d4c6397d18a6906d70b91e2736694edf3d5658fca57aa3d5dfeeac16bde.jpg)
238
+ A bearded man in his thirties, wearing a plaid shirt, sits at a rustic wooden bar, surrounded by an array of beer taps and vintage brewery decor. He carefully lifts a frosty pint glass filled with amber beer, examining its color and clarity against the warm, ambient lighting. He takes a slow, appreciative sip, his eyes closing momentarily as he savors the complex flavors. The camera captures the subtle smile of satisfaction on his face, highlighting the rich foam on his upper lip. The background hum of soft chatter and clinking glasses adds to the cozy, inviting atmosphere of the pub.
239
+
240
+ ![](images/05d5179998c4ff8a23a01c084abb12d40bbbde765fbc99bf1c718d8d6a3f42aa.jpg)
241
+
242
+ ![](images/7043de2bba5a34e37a5c91bb0f620f79afcaf2102db369f6e2af5c6dbb422285.jpg)
243
+
244
+ ![](images/2a5008937e4584757c7e7558cb7a9c43154d600b0e9db966ac45b3490b06703c.jpg)
245
+
246
+ ![](images/590b72ab49543d72cdacc1c1497cf3add43f9a81001d2b07066224d8d2176b3f.jpg)
247
+
248
+ ![](images/33222bf879680579852080eb894b39b7d4ae2ce4be9d2e81e11fa5143cbb77ba.jpg)
249
+
250
+ ![](images/268e54f6a4a9f17fdaadde3e1d812d6d911c5611abd6ce464700110849407136.jpg)
251
+
252
+ ![](images/02092a4f64a2be5ee86057835d7095e75d8fc910e08e76690a85ba113395c3c8.jpg)
253
+
254
+ ![](images/ef3a097cac5d8ace47db3314e02fd22820e3b9f3cc532c5b9f9c10cdea753e73.jpg)
255
+ Figure 6. Qualitative comparisons. Captions and reference identity images are presented in the top-left corner for each case.
256
+
257
+ ![](images/bad9436fd8335a02cbaf57ee547ee22339fb57e931017f5f8bce48f5a0f12b70.jpg)
258
+
259
+ ![](images/62ad12d7a468c066069cbc0557ff2c248bca55768427b7b53cbb966766f26dd4.jpg)
260
+
261
+ ![](images/a1815e354bde0ad3ecf2e7eafde187ac00c69f6844a374366df24014c817b52a.jpg)
262
+
263
+ promises fidelity. Both ConsisID [69] and MagicMirror maintain consistent identity preservation throughout the video duration.
264
+
265
+ # 4.3. Qualitative Evaluation
266
+
267
+ Beyond the examples shown in Fig. 1, we present comparative results in Fig. 6. Our method maintains high text coherence, motion dynamics, and video quality compared to conventional CogVideoX inference. When compared to existing image-to-video approaches [34, 60, 63, 64], Mag-
268
+
269
+ ![](images/85ea80e17fb2db328856c4e40060e8bdfeb909ee20a9db7ddb7f0ac3becc45cf.jpg)
270
+
271
+ ![](images/4d54fede84a9cdb08bd56346e19bb77ba17d097e9198987ba742935e7f881504.jpg)
272
+
273
+ ![](images/4cfd9aa454fe78e04bb3bbbfa3d7dd7f1b50507acb76e5f396c0aaebff231199.jpg)
274
+
275
+ icMirror demonstrates superior identity consistency across frames while preserving natural motion. Our method also achieves enhanced dynamic range and text alignment compared to ID-Animator [20] and ConsisID [69], which exhibits limitations in motion variety and prompt adherence.
276
+
277
+ To complement our quantitative metrics, we conducted a comprehensive user study to evaluate the perceptual quality of generated results. The study involved 173 participants who assessed the outputs across four key aspects: motion
278
+
279
+ ![](images/309b0d9e97fbbdaf36a628bc444e9c1ebbe2730df6d5d4fafac20caa744a02fc.jpg)
280
+ Reference
281
+
282
+ ![](images/61f9492eee3654417f1b9b1320cf5f7778afc080d108cf5681adab1c13f38d58.jpg)
283
+
284
+ ![](images/ce386a340c205408a3fe19085d55eb3ffd8e82ff52b31e2bb157d708be183a02.jpg)
285
+ Reference
286
+
287
+ ![](images/0506b0389930f7a9d9b04a7d143c19c6f9a3bb4cbd94ec5e8aa0befe173e62eb.jpg)
288
+
289
+ ![](images/729b207006b12457eeff6b09fce2bd7cee6f69b8f714160ed3aa1dd6aa49ae6c.jpg)
290
+
291
+ ![](images/0de2064699d18d1913e83c413e828d19e4c27c6429269cc433f010ac3708054d.jpg)
292
+ Figure 7. Examples for ablation studies on modules and training strategies.
293
+
294
+ ![](images/5701672de8ad759ac4990982eec0b3bb701becfb8e56a4ed933aefea1f23f6e7.jpg)
295
+
296
+ ![](images/7d39582eb905f6e8f6872e81a5dc96b40cd682037ad8ad54b83c82140859feb2.jpg)
297
+
298
+ <table><tr><td>Models</td><td>Visual Quality↑</td><td>Text Alignment↑</td><td>Dynamic Degree↑</td><td>ID Similarity↑</td></tr><tr><td>DynamiCrafter [60]</td><td>6.03</td><td>7.29</td><td>4.85</td><td>5.87</td></tr><tr><td>EasyAnimate-I2V [63]</td><td>6.62</td><td>8.21</td><td>5.57</td><td>6.01</td></tr><tr><td>CogVideoX-I2V [64]</td><td>6.86</td><td>8.31</td><td>6.55</td><td>6.22</td></tr><tr><td>ID-Animator [20]</td><td>5.63</td><td>6.37</td><td>4.06</td><td>6.70</td></tr><tr><td>ConsisID [69]</td><td>6.43</td><td>8.35</td><td>6.23</td><td>5.55</td></tr><tr><td>MagicMirror</td><td>6.97</td><td>8.88</td><td>7.02</td><td>6.39</td></tr></table>
299
+
300
+ Table 2. User study results. We highlight the best and the second best results for each metric.
301
+
302
+ <table><tr><td>Exp.</td><td>x̂id</td><td>xface</td><td>mface</td><td>m̂</td><td>Pretrain</td><td>txt-align↑</td><td>FMinter↑</td><td>ID↑</td></tr><tr><td>A [identity branch]</td><td>✓</td><td></td><td></td><td></td><td></td><td>0.238</td><td>0.572</td><td>0.865</td></tr><tr><td>B [structural branch]</td><td></td><td>✓</td><td></td><td></td><td></td><td>0.240</td><td>0.584</td><td>0.869</td></tr><tr><td>C [dual branch]</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td>0.239</td><td>0.654</td><td>0.870</td></tr><tr><td>D [Eq.6, mface]</td><td></td><td>✓</td><td>✓</td><td></td><td></td><td>0.241</td><td>0.563</td><td>0.872</td></tr><tr><td>E [Eq.7, mxt, mvid]</td><td>✓</td><td></td><td></td><td>✓</td><td></td><td>0.242</td><td>0.696</td><td>0.875</td></tr><tr><td>F [w/o CAN]</td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td>0.236</td><td>0.568</td><td>0.886</td></tr><tr><td>G [w/o pretrain]</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>0.241</td><td>0.559</td><td>0.883</td></tr><tr><td>Full [MagicMirror]</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>0.240</td><td>0.665</td><td>0.911</td></tr></table>
303
+
304
+ Table 3. Ablation study results on the same training scale across multiple settings. Some modules are interdependent.
305
+
306
+ dynamics, text-motion alignment, video quality, and identity consistency. Participants rated each aspect on a scale of 1-10, with results summarized in Tab. 2. As shown in the overall preference scores in Tab. 1, MagicMirror consistently outperforms baseline methods across most evaluated dimensions, demonstrating its superior perceptual quality in human assessment. Regarding video quality and ID similarity, we observed a gap between designed metrics and human-evaluated perceptual evaluation in ConsisID [69].
307
+
308
+ # 4.4. Ablation Studies
309
+
310
+ Condition-related Modules. We evaluate our key modules through ablation studies, shown in Tab. 3 and Fig. 7. To ensure a fair comparison, all ablations are conducted on the same training scale, with a half training iteration of the official setting. Experiments using single-branch facial embedding (Exp. A: identity branch only, Exp. B: structural branch only) exhibit a limited identity preservation (0.865-0.869 ID similarity). In contrast, the dual-branch strategy (Exp. C) synergizes their complementary strengths, and achieves a higher motion metric. The Conditioned Adaptive Normalization (CAN) proves vital for distribution align
311
+
312
+ ment, enhancing identity preservation across frames. The effectiveness of CAN for facial condition injection is further demonstrated in Exp. D, E, and F. Notably, complete CAN removal (Exp. F) causes significant performance degradation $(0.911\rightarrow 0.886)$ , underscoring its necessity for effective identity injection. An extended analysis of the CAN's benefits for the training convergence and distribution alignment is provided in the Appendix B.1-B.2.
313
+
314
+ Training Strategy. Exp. G in Tab. 3 and Fig. 7 also illustrate the impact of different training strategies. Image pretraining is essential for robust identity preservation, while video post-training ensures temporal consistency. Our two-stage training approach achieves optimal results by leveraging the advantages of both phases, generating high ID fidelity videos with dynamic facial motions. Appendix B.3 discusses more details about the training strategy.
315
+
316
+ # 5. Conclusion
317
+
318
+ In this work, we presented MagicMirror, a zero-shot framework for ID-preserving video generation. MagicMirror incorporates dual facial embeddings and Conditional Adaptive Normalization (CAN) into DiT-based architectures. Our approach enables robust identity preservation and stable training convergence. Extensive experiments demonstrate that MagicMirror generates high-quality personalized videos while maintaining identity consistency from a single reference image, outperforming existing methods across multiple benchmarks and human evaluations.
319
+
320
+ Limitations. While MagicMirror excels at ID-consistent video generation, challenges remain in supporting multiple identities and preserving fine-grained attributes beyond facial features, such as clothing, improvements necessary for practical customized video applications.
321
+
322
+ Acknowledgment. The study was supported in part by the Research Grants Council under the Areas of Excellence scheme grant AoE/E-601/22-R, Hong Kong General Research Fund (14208023), Hong Kong AoE/P-404/18, and the Center for Perceptual and Interactive Intelligence (CPII) Ltd under InnoHK supported by the Innovation and Technology Commission.
323
+
324
+ # Appendix
325
+
326
+ This appendix provides comprehensive technical details and additional results for MagicMirror, encompassing dataset preparation, architectural specifications, implementation, and extensive experimental validations. We include additional qualitative results and in-depth analyses to support our main findings. We strongly encourage readers to examine the project page https://julianjuaner.github.io/projects/Magic-Mirror/ for dynamic video demonstrations. The following contents are organized for efficient navigation.
327
+
328
+ # Appendix Contents
329
+
330
+ A. Experiment Details 9
331
+
332
+ A.1. Training Data Preparation 9
333
+ A.2.Test Data Preparation 9
334
+ A.3.Comparisons 9
335
+ A.4.Evaluation Metrics 11
336
+ A.5. Implementation Details 12
337
+
338
+ B. Additional Discussions 12
339
+
340
+ B.1.Advantages of CAN. 12
341
+ B.2. Distribution Analysis and Its Impact 13
342
+ B.3. Two-Stage Training Analysis 13
343
+ B.4.Limitation Analysis 13
344
+
345
+ C. Additional Results & Applications 14
346
+
347
+ C.1.Additional Applications. 14
348
+ C.2. Image Generation Results 14
349
+ C.3. Video Generation Results 15
350
+
351
+ D. Acknowledgments 15
352
+
353
+ # A. Experiment Details
354
+
355
+ # A.1. Training Data Preparation
356
+
357
+ Our training dataset is constructed through a rigorous preprocessing pipeline, as illustrated in Fig. 8. For the image pretrain data, we start downloading 5 million images from LAION-face [53], then undergo strict quality filtering based on face detection confidence scores and resolution requirements. The filtered subset of $107\mathrm{K}$ images is then processed through an image captioner [33], where we exclude images containing texts. This results in a curated set of $50\mathrm{K}$ high-quality face image-text pairs. To enhance identity diversity, we incorporate the synthetic SFHQ dataset [3]. To fit the model output, we standardize these images by adding black borders and pairing them with a consistent prompt template: "A squared ID photo of ..., with pure black on two sides." This preprocessing ensures uniformity while maintaining the dataset's diverse identity characteristics.
358
+
359
+ For FFHQ [27], we leverage a state-of-the-art identity-preserving prior PhotoMakerV2 [34] to generate synthetic images of the same identity, but with different face poses.
360
+
361
+ We filter redundant identities using pairwise facial similarity metrics, with prompts sampled from our 50K video keyframe captions. We use the Pexels-400K and Mixkit datasets from [35] for construction of image-video pairs. The videos undergo a systematic preprocessing pipeline, including face detection and motion-based filtering to ensure high-quality dynamic content. We generate video descriptions using CogVLM video captioner [57]. Following our FFHQ processing strategy, we employ PhotoMakerV2 to synthesize identity-consistent images from the detected faces, followed by quality-based filtering.
362
+
363
+ # A.2. Test Data Preparation
364
+
365
+ Face Images Preparation We construct a comprehensive evaluation set for identity preservation assessment across video generation models. Our dataset comprises 50 distinct identities across seven demographic categories: man, woman, elderly man, elderly woman, boy, girl, and baby. The majority of faces are sourced from PubFig dataset [30], supplemented with public domain images for younger categories. Each identity is represented by 1-4 reference images to capture variations in pose and expression.
366
+
367
+ Prompt Preparation Our test prompts are derived from VBench [25], focusing on human-centric actions. For detailed descriptions, we sample from the initial 200 GPT-4-enhanced prompts and select 77 single-person scenarios. Each prompt is standardized with consistent subject descriptors and augmented with the img trigger word for model compatibility. We assign four category-appropriate prompts to each identity, ensuring demographic alignment. For the "baby" category, which lacks representation in VBench, we craft four custom prompts to maintain evaluation consistency across all categories.
368
+
369
+ # A.3. Comparisons
370
+
371
+ ID-Animator [20] We utilize enhanced long prompts for evaluation, although some undergo partial truncation due to CLIP's 77-token input constraint.
372
+
373
+ In our main comparisons Tab. 1, we evaluated ID-Animator at a resolution of $480 \times 720$ . This choice was made to ensure that SD-based ID-Animator comparisons used matching resolutions, thereby ensuring equal content capacity—a decision justified by the inherent resolution independence of the UNet architecture. To provide a fair and comprehensive evaluation, we additionally present some results at the default $512 \times 512$ resolution here in Tab. 4. These results confirm that our comparisons remain robust and consistent across different resolution settings.
374
+
375
+ <table><tr><td>Method</td><td>Base</td><td>Resolution</td><td>txt-align\( ^\uparrow \)</td><td>\( FM_{inter}^{\uparrow} \)</td><td>\( ID^{\uparrow} \)</td><td>\( Smooth^{\downarrow} \)</td></tr><tr><td>ID-Animator</td><td>SD1.5</td><td>(480, 720)</td><td>0.211</td><td>0.181</td><td>0.923</td><td>0.515</td></tr><tr><td>ID-Animator</td><td>SD1.5</td><td>(512, 512)</td><td>0.217</td><td>0.179</td><td>0.921</td><td>0.501</td></tr><tr><td>MagicMirror</td><td>CogVideoX</td><td>(480, 720)</td><td>0.240</td><td>0.610</td><td>0.922</td><td>0.484</td></tr></table>
376
+
377
+ Table 4. ID-Animator resolution comparison.
378
+
379
+ ![](images/872b009fe22920009ddf1957c92835d6c8abe2727cd32c55be235a0c7de26431.jpg)
380
+ Figure 8. Detailed training data processing pipeline. Building upon Fig. 5, we illustrate comprehensive filtering criteria, prompt examples, and processing specifications. The data flow is indicated by blue arrows, while filtering rules leading to data exclusion are marked with red arrows.
381
+
382
+ ConsisID [69] We utilize the CogVideoX-5B[64] version. Its base inference settings are aligned with those of our model, and enhanced long prompts are employed to fully leverage its capabilities.
383
+
384
+ CogVideoX-5B-I2V [64] For this image-to-video variant, we first generate reference images using PhotoMakerV2 [34] for each prompt-identity pair. These images, combined with enhanced long prompts, serve as input for video generation.
385
+
386
+ EasyAnimate [63] We evaluate using the same PhotoMakerV2-generated reference images as in our
387
+
388
+ CogVideoX-5B-I2V experiments.
389
+
390
+ DynamiCrafter [60] Due to model-specific resolution requirements, we create a dedicated set of reference images using PhotoMakerV2 that conform to the model's specifications.
391
+
392
+ In image-to-video baselines, through reference images generated by enhanced prompts, we deliberately use original short concise prompts for video generation. This choice stems from our empirical observation that image-to-video models exhibit a strong semantic bias when processing lengthy prompts. Specifically, these models tend to prior-
393
+
394
+ ![](images/6ab60ca9cb2bf6a3472058735a3feb311746e588695fdbaf7c8447a183b52c57.jpg)
395
+ CogVideoX-12V:
396
+ A man playing golf
397
+
398
+ ![](images/2e7516bcd35e4dfc4933d8fd9ebe3e41ea13aa67553eb1af57d95a4c1e1d2917.jpg)
399
+ A focused man stands on a lush, emerald-green fairway, wearing a crisp white polo shirt, beige trousers, and a navy cap, with the sun casting a warm glow over the rolling hills. He is playing golf. The camera captures a close-up of their hands gripping the club, showcasing the precision and concentration in their stance. As he swing, the club arcs gracefully through the air, sending the golf ball soaring against a backdrop of clear blue sky and distant trees. The scene shifts to the golfer watching intently as the ball lands on the manicured green, the flag fluttering gently in the breeze, embodying the serene yet competitive spirit of the game.
400
+
401
+ ![](images/736318ee24bc393a3173e2d75f1151360fc334de470073c6cb53f1c521748126.jpg)
402
+ EasyAnimate-I2V:
403
+ A shirtless man climbing
404
+
405
+ ![](images/69477b6b5408c6a64de493e9d1e9f28faeb6b11ad45a1a9a8a9a34ec534519da.jpg)
406
+ A shirtless man with a lean, muscular build ascends a rugged cliff face, his skin glistening with sweat under the bright sun. His determined expression and focused gaze reveal his concentration and skill as he navigates the challenging rock formations. The camera captures the intricate details of his movements, highlighting the tension in his muscles and the precision of his grip. The backdrop of the scene is a vast, open sky, with the distant horizon hinting at the expansive landscape below. As he climbs higher, the play of light and shadow across the rock surface adds depth and drama to the breathtaking ascent.
407
+
408
+ itize text alignment over reference image fidelity, leading to degraded video quality and compromised identity preservation. This trade-off is particularly problematic for our face preservation objectives. We provide visual evidence of this phenomenon in Fig. 9.
409
+
410
+ # A.4. Evaluation Metrics
411
+
412
+ Our evaluation framework combines standard video metrics with face-specific measurements. From VBench [25], we utilize Dynamic Degree for motion naturality and Overall Consistency for text-video alignment. Video quality is assessed using Inception Score from EvalCrafter [36]. For facial fidelity, we measure identity preservation using facial recognition embedding similarity [15] and temporal stability through frame-wise similarity decay.
413
+
414
+ We propose a novel facial dynamics metric to address the limitation of static face generation in existing methods. As shown in Fig. 10, we extract five facial landmarks using RetinaFace [10] and compute two motion scores: $\mathrm{FM}_{\mathrm{ref}}$ measures facial motion relative to the reference image (computed on aspect-ratio-normalized frames to eliminate positional bias), while $\mathrm{FM}_{\mathrm{inter}}$ quantifies maximized inter
415
+
416
+ ![](images/33d231e9b1cd5bca6ea15abebcd25e5f06558a87521872f70e52c107221199eb.jpg)
417
+ Figure 9. Impact of prompt length on image-to-video generation. We demonstrate how image-to-video models perform differently with concise versus enhanced prompts. Frames with large artifacts are marked in red. First frame images are generated from enhanced prompts.
418
+ Figure 10. Face Motion (FM) calculation. $\mathrm{FM}_{\mathrm{inter}}$ follows a similar computation across consecutive video frames.
419
+
420
+ frame facial motion (computed on original frames to preserve translational movements). This dual-score approach enables a comprehensive assessment of facial dynamics.
421
+
422
+ Success rate &造假 analysis. Success rate metrics better demonstrate reliability. Our additional experiments with
423
+
424
+ ![](images/698199418c81abbfbc1757d01ae98b3b69a312758a34312ab01fe3b66bad63fd.jpg)
425
+
426
+ ![](images/d541660f4a5981657ee97aedf3a0832c62fc06828d76e27632c159b8a0b7bfa8.jpg)
427
+ Figure 11. Detailed implementation of Conditioned Adaptive Normalization. We present the expanded architecture of $\varphi_{\mathrm{cond}}$ (illustrated in the unmasked region above) with comprehensive annotations of input-output tensor dimensions at each transformation.
428
+
429
+ 200 videos (50 prompts $\times 4$ seeds) compared MagicMirror with identity preservation baselines on success rates for face recognition, identity check, motion, and text alignment. MagicMirror achieves improved SR across most dimensions, though failcase analysis reveals motion quality remains the primary limitation, which is predominantly model-dependent.
430
+
431
+ <table><tr><td>Method / SR</td><td>motion quality</td><td>text align</td><td>face recongrized</td><td>identity check</td><td>average</td></tr><tr><td>ID-Animator</td><td>11.5%</td><td>60.0%</td><td>93.5%</td><td>82.5%</td><td>61.9%</td></tr><tr><td>ConsisID</td><td>38.5%</td><td>73.5%</td><td>89.5%</td><td>74.5%</td><td>69.0%</td></tr><tr><td>MagicMirror</td><td>44.0%</td><td>75.5%</td><td>98.0%</td><td>81.0%</td><td>74.6%</td></tr></table>
432
+
433
+ Table 5. Success rate comparison.
434
+
435
+ # A.5. Implementation Details
436
+
437
+ Decoupled Facial Embeddings. Our architecture employs two complementary branches: an ID embedding branch based on pre-trained PhotoMakerV2 [34] with two-token ID-embedding query $q_{\mathrm{id}}$ , and a facial structural embedding branch that extracts detailed features from the same ViT's penultimate layer. The latter initializes 32 token embeddings as facial query $q_{\mathrm{face}}$ input. We use a projection layer to align facial modalities before diffusion model input.
438
+
439
+ Conditioned Adaptive Normalization. This paragraph elaborates on the design details of the Conditioned Adaptive Normalization (CAN) module, complementing the overview provided in Sec. 3.3 and Fig. 4. For predicting fa
440
+
441
+ ![](images/ba375aeeeeddc848b5de64e8360f5b258bee3ff2abcade8b19eff47433582a53.jpg)
442
+ Figure 12. CAN speeds up the convergence. Without the Conditioned Adaptive Normalization, the model cannot fit the simplest appearance features like hairstyle in the image pre-train stage.
443
+
444
+ cial modulation factors $m_{\mathrm{face}}$ , we employ a two-layer MLP architecture, following the implementation structure of the original normalization modules $\varphi_{\{\mathrm{vid},\mathrm{text}\}}$ . The detailed implementation of CAN is illustrated in Fig. 11. Given the facial ID embedding $x_{\mathrm{id}} \in \mathcal{R}^{2 \times c}$ containing two tokens, we first apply one global projection layer for dimensionality reduction, mapping it to dimension $c_1$ . Subsequently, in each adapted layer, we concatenate this projected embedding with the time embedding $\mathbf{t}$ and the predicted shift factor $\mu_{\mathrm{vid}}^1$ along the channel dimension. An MLP then processes this concatenated representation to produce the final modulation factors. To ensure stable training, all newly introduced modulation predictors are initialized with zero.
445
+
446
+ We also tried to directly use the prediction of CAN as the data distribution, this results in a bad initialization, comparing with the residual prediction, direct prediction leads to abnormal video generation quality.
447
+
448
+ # B. Additional Discussions
449
+
450
+ # B.1. Advantages of CAN
451
+
452
+ The benefits of CAN in facial condition injection are evident in its ability to enhance training convergence, particularly during the image pre-train stage. As illustrated in Fig. 12, models equipped with CAN achieve significantly improved identity information capture, enabling faster adaptation to appearance attributes. This acceleration in convergence highlights CAN's effectiveness in preserving identity consistency throughout the training process.
453
+
454
+ Furthermore, we specifically design CAN and related modules to be lightweight and avoid altering any pretrained weights of the video DiT, thereby preserving the original model capacity. We evaluate GPU memory utilization, parameter count, and inference latency for generating a 49-frame 480P video. Compared to the baseline model, the additional parameters introduced by MagicMirror are primarily concentrated in the embedding extraction stage, which requires only a single forward pass. As summarized in Tab. 6, compared with ConsisID [69] and
455
+
456
+ ![](images/769a4b6c522f3d0d3465630399f35f3ddbd6782945ec61815e72d463c3dab9b9.jpg)
457
+ Figure 13. Different modalities' scale distribution using t-SNE. Each point represents the scale with a unique timestep-layer index. We also illustrate a shift variant on text and video's adaptive scale using different colors.
458
+
459
+ CogVideoX [64] baseline, MagicMirror introduces minimal computational overhead, with only a slight increase in GPU memory consumption and inference time.
460
+
461
+ <table><tr><td>Model</td><td>Video size</td><td>Memory</td><td>Params.</td><td>Latency</td><td>Batch×Iter.</td><td>Data (I+V)</td><td>GPU</td></tr><tr><td>ID-Animator</td><td>(16,512,512)</td><td>8.4 GiB</td><td>1.52B</td><td>11s</td><td>2×58K</td><td>0K+13K</td><td>A100*1</td></tr><tr><td>CogVideoX-5B</td><td>(49,480,720)</td><td>24.9 GiB</td><td>10.5B</td><td>204s</td><td>(0.1-2K)×750K</td><td>2B+35M</td><td>-</td></tr><tr><td>CogVideoX-I2V</td><td>(49,480,720)</td><td>25.9 GiB</td><td>10.6B</td><td>213s</td><td>-</td><td>-</td><td>-</td></tr><tr><td>ConsisID</td><td>(49,480,720)</td><td>41.5 GiB</td><td>11.1B</td><td>213s</td><td>80×1.8K</td><td>0K+130K</td><td>H100*40</td></tr><tr><td>MagicMirror</td><td>(49,480,720)</td><td>28.6 GiB</td><td>12.8B</td><td>209s</td><td>8×9K*</td><td>570K +29K</td><td>A800*8</td></tr></table>
462
+
463
+ Table 6. Computation overhead of MagicMirror. All computations are measured on the one A800 GPU.
464
+
465
+ # B.2. Distribution Analysis and Its Impact
466
+
467
+ We begin by visualizing the predicted modulation scale factors $\sigma$ using t-SNE [54] in Fig. 13. The results show that distinct modalities occupy characteristic distributions across different Transformer layers, and these distributions appear largely invariant to the specific timestep input. In particular, the face modality exhibits a unique pattern, while the conditioned residual $\hat{\sigma}$ introduces targeted shifts away from the baseline distribution. This shift empirically accelerates model convergence when incorporating ID conditions.
468
+
469
+ Beyond the t-SNE visualization, we further investigate the critical role of distribution alignment by examining how modality-aware data distributions affect generation quality. Specifically, we fine-tuned only the normalization layers $\varphi_{\mathrm{vid}}$ , $\varphi_{\mathrm{txt}}$ of the CogVideoX base model on two distinct datasets—CelebV-Text [66] and our Pexels video collection [2]. As illustrated in Fig. 14, this distribution-specific fine-tuning exerts a substantial influence on the spatial fidelity of generated videos. These observations underscore the importance of aligning modality distributions during training, and they also validate the high quality of our curated video dataset.
470
+
471
+ Additionally, we conducted another experiment using our Pexels dataset. We found that by using a dataset with twice the frame rate and training only the modulation layers, we achieved an improvement in the VBench [25] dy
472
+
473
+ Norm Layers Finetuning on CelebV-Text
474
+
475
+ ![](images/97dfde964a7c5cb4c8aeb21711811ad8b6d403eb0479960d489d1654dc57c97d.jpg)
476
+
477
+ Norm Layers Finetuning on Pexels
478
+
479
+ ![](images/8bdb0302a29b273cc9bde3697bdf603203e5b91414ae0b37410641fdebbaf931.jpg)
480
+ Figure 14. Modulation layers reflect data distribution. Finetuning solely the modulation layer weights demonstrates adaptation to distinct data distributions, affecting both spatial fidelity and temporal dynamics.
481
+
482
+ namic motion score from 0.71 to 0.84. This result, similar to Experiments E-F in Tab. 3, further verifies the impact of the modulation module on dynamic facial motion.
483
+
484
+ # B.3. Two-Stage Training Analysis
485
+
486
+ In Fig. 15, we present additional ablation results that clarify how each training phase addresses a distinct aspect of identity-preserving video generation. Specifically, the image pre-training phase prioritizes robust identity encoding, ensuring that facial features remain consistent and accurately captured. However, training exclusively on image data leads to color-shift artifacts during video inference, caused by modulation factor inconsistencies across different training stages. By combining these two stages, our final approach aligns both identity representation and color distribution, resulting in dynamic and high-fidelity ID-preserving videos without the artifacts observed in single-stage alternatives.
487
+
488
+ # B.4. Limitation Analysis
489
+
490
+ As discussed in Sec. 5, our approach faces several limitations, particularly in handling multi-person scenarios and preserving fine-grained features. Fig. 16 illustrates two representative failure cases: incomplete transfer of reference character details (such as accessories) and motion artifacts caused by the base model. These limitations highlight critical areas for future research in controllable personalized
491
+
492
+ ![](images/022f1cb06f06e380a54948e25d589da1989df666f97b43c9d4711f1bff004d03.jpg)
493
+ A young woman with curly hair and a professional demeanor is seen engaging in a phone conversation while seated at a desk. She wears a black headset with a microphone, a dark blazer, and a necklace with a heart pendant...
494
+
495
+ ![](images/0eb7615456ea9941dd9012b9c7456d98180ab4c13a88429e2c028c75d2e3715b.jpg)
496
+ w/o video finetune
497
+
498
+ ![](images/99faf3a028e67a0c393f52f310b76de56d1abae6f475fbd6fb7f3c8a7252bb26.jpg)
499
+ w/o image pretrain
500
+
501
+ ![](images/d59d274c36bff5cd994226d35b88def67ac10a8305d6ebb184a1f01b68df7513.jpg)
502
+ Full model
503
+
504
+ ![](images/eb04c549b90e413de586127eb3ce167793de67b7dfb0700db5793904dd2859b3.jpg)
505
+ Figure 15. Examples for ablation studies on training strategies.
506
+
507
+ ![](images/c4c50e78d1ecfd716c1f5ced038e440a1aa78a9fb266beef76b76baf2b770a8c.jpg)
508
+
509
+ ![](images/fc49da43daea8a7f05be6d8db7927017c36fbdaee73cedd1238235a80b5d8c1c.jpg)
510
+
511
+ ![](images/a16da3c6a5b2b87a148b097bc0e18d0d7453d9ecef603530652ea6badad50d5b.jpg)
512
+ (a) Fine-grained Feature Missing
513
+
514
+ ![](images/3ece6bb9f7e1f48ad5e3dedc800e312113d0c16ec39ddc34fe09af76d48ef2aa.jpg)
515
+
516
+ ![](images/d2a1d21da6eadc18de737dc8dfb401b0701f7d208a1b31eaaa859195f19f9b4a.jpg)
517
+
518
+ ![](images/a24b3609d27f1442ad9e79111cb33b61296a7bd445d30662d67ba2bccacfb52d.jpg)
519
+
520
+ ![](images/7dc0b30382631d3586c0ee04faa0dc06bba5226a79c2195c9189b89f35c84ea2.jpg)
521
+
522
+ ![](images/ca86fd5de16949800572de91c71572be81457335fbc432c149939d27ecad8a6d.jpg)
523
+ (b) Video Quality: Motion Artifacts
524
+
525
+ ![](images/2a33cb2622f3ebb752ec4de6fc94e7bfb0be2bb9dab8496aae4b6c75fb27c739.jpg)
526
+ Figure 16. Limitations of MagicMirror. (a) Fine-grained feature preservation failure in facial details and accessories. (b) Motion artifacts in generated videos showing temporal inconsistencies.
527
+
528
+ ![](images/e2fb627b493287e20d2aa666db652402b853b0374e49a11c58d7f8bcafb11277.jpg)
529
+ Figure 17. Additional applications of MagicMirror. We can generate identity-preserved videos across artistic styles and can generate multi-shot videos with consistent characters. More results are presented in the project page.
530
+
531
+ video generation, particularly in maintaining temporal consistency and fine detail preservation.
532
+
533
+ # C. Additional Results & Applications
534
+
535
+ # C.1. Additional Applications
536
+
537
+ Fig. 17 demonstrates two extended capabilities of MagicMirror. First, beyond realistic customized video generation, our framework effectively handles stylized prompts,
538
+
539
+ leveraging CogVideoX's diverse generative capabilities to produce identity-preserved outputs across various artistic styles and visual effects. Furthermore, we show that our method can generate high-quality, temporally consistent multi-shot sequences when maintaining coherent character and style descriptions. We believe these capabilities have significant implications for automated video content creation.
540
+
541
+ ![](images/00541e5e64131a993c4adb5d64b5aba7d7faa5bc3b583386c65d4d57dd5b9fbe.jpg)
542
+ "The synthwave retro, 80s style, sunset colors style, an elderly woman img is reading book."
543
+
544
+ ![](images/bcebe301d1611baee0516c5f8b10351d187c0084c507096bea954a13ba07a57c.jpg)
545
+
546
+ ![](images/b3bf25803703105bcce92abe8b42d30d97dde5eb4c2e48a58eb7515dc6f3bcd3.jpg)
547
+ "Art nouveau, organic curves, floral patterns style a male police officer talking on the radio"
548
+
549
+ ![](images/3b59d5279ea7ad8773ac18be2a8af499fd819dd1d313184813987bdd2f1a3bfc.jpg)
550
+ (a) Videos with style-specific prompts
551
+
552
+ ![](images/f64e9a28bb0ad0bb78b556fba0267316c4e1dee357f7b5fc2353604c2d597d69.jpg)
553
+
554
+ ![](images/81766c1e544fb4b9db5d2eabeb51ef60384c60191d583182cd3207e73cc14230.jpg)
555
+ A serene woman with delicate features, wearing a flowing white blouse
556
+
557
+ - practices gentle yoga stretches...
558
+ - sits at her kitchen counter bathed in morning light...
559
+ - is working at her writing desk near a window.
560
+
561
+ ![](images/78ae3a957d6dbcb690a16f3f55e09488ccba6a4e9b22e5d72edd5cd3ad4f219c.jpg)
562
+
563
+ ![](images/65a54924194cfe51685179ea97ab96df351d61d6cb2cadb25e4afb65e1bb2d79.jpg)
564
+
565
+ ![](images/7e356aadad7d8ebdf15e1134bd7c5ecdaccdbcbe17b71a7915d243d40a0cc1da.jpg)
566
+
567
+ ![](images/ba19a319ce783dc0be0c492ad59e4b37086d7839180668c8ddb670d641920041.jpg)
568
+
569
+ ![](images/356bf4dec0fca16e9ac84a316e77de163e504d29a19df7c2021dc993cfe846e3.jpg)
570
+
571
+ ![](images/2d463d61c1d1bb335c2baaa38f90ed5e02def2c3886432e592b5ad265c0d863b.jpg)
572
+
573
+ ![](images/730b6edfd1ad38b694314b5a429c02fed41f366428abd5239bbbe0f80c0d093a.jpg)
574
+ (b) multi-shot videos with consistent character prompts
575
+
576
+ ![](images/9a1ae17699465b46618e443c6a4111a1d1c8072cc692e031747badea0db93ed0.jpg)
577
+
578
+ ![](images/ac69df96efc4dd5d5716ee5a05a3980f0f33ec716486b2e959736050cd317b70.jpg)
579
+
580
+ # C.2. Image Generation Results
581
+
582
+ MagicMirror demonstrates strong capability in ID-preserving image generation with the image-pre-trained stage. Notably, it achieves even superior facial identity fidelity compared to video-finetuned variants, primarily due to optimization constraints in video training (e.g., limited
583
+
584
+ batch sizes and dataset scope). Representative examples are presented in Fig. 18.
585
+
586
+ # C.3. Video Generation Results
587
+
588
+ Additional video generation results and comparative analyses are provided in Figs. 19 and 20, highlighting our method's advantages. Fig. 19 specifically demonstrates the benefits of our one-stage approach over I2V, including superior handling of occluded initial frames, enhanced dynamic range, and improved temporal consistency during facial rotations. In Fig. 20, we provide more results with human faces on different scales.
589
+
590
+ # D. Acknowledgments
591
+
592
+ Social Impact. MagicMirror is designed to facilitate creative and research-oriented video content generation while preserving individual identities. We advocate for responsible use in media production and scientific research, explicitly discouraging the creation of misleading content or violation of portrait rights. As our framework builds upon the DiT foundation model, existing diffusion-based AI-content detection methods remain applicable.
593
+
594
+ Data Usage. The training data we used is almost entirely sourced from known public datasets, including all image data and most video data. All video data was downloaded and processed through proper channels (i.e., download requests). We implement strict NSFW filtering during the training process to ensure content appropriateness.
595
+
596
+ Figures 18-20 are presented on the following pages
597
+
598
+ ![](images/0e9ae8aac720c05204f69bedd7aeaa38cd115558df7c21887a8f9ccc26870f11.jpg)
599
+
600
+ ![](images/84d7ab50a45bce4520fee3099719c0d0716f4337402bb8d80c60cd260b134e0e.jpg)
601
+
602
+ ![](images/b486ed523d4532964c9c20bac864c54a47e092e9ab99402cf530120787829ef3.jpg)
603
+
604
+ ![](images/93f4fa048f0316ad0734ebf5037a59576ea52cb58944706433c640815990d848.jpg)
605
+
606
+ ![](images/9fb619e0f3eed52a2f02e594a3197eccdf7699593e5e156491f49385bc38a527.jpg)
607
+
608
+ ![](images/1fc0b6870c3f47f0f77c5d3eb2e7e7640b71bf8459c2a29398f38a0fd874c43b.jpg)
609
+ Ref-ID
610
+
611
+ ![](images/e3e414d842c5bc72e3da5c09fe3a7f1f230ff346669bb9d65894375aa7923821.jpg)
612
+
613
+ ![](images/4f8ca0e49f2bfd24933d66b060269bd6eb2d6e7898341500397c381ac5fafd15.jpg)
614
+
615
+ ![](images/ded0e729dfc9ce7e0e0560aeacc8ff97769b02ca57c55ffeab84a848846bdc44.jpg)
616
+
617
+ ![](images/1d1cbefaa63812dd79e9e0711e8d59727ac83d5fdf7e5658fb0def874cab8e2f.jpg)
618
+
619
+ ![](images/90b66475f424666c23dc14aeeb4048eaa7894090856c7e7cfe4bf9ed50096ecf.jpg)
620
+
621
+ ![](images/19c7286cd08718a616853e23e4d3c530cfd6c7c7b26b9325f8b5e8476ed2468c.jpg)
622
+ Generated Images
623
+
624
+ ![](images/b37a23688c2ce99e46b903b3b2c718eef17911fd69c55f34cab00492c49e739d.jpg)
625
+
626
+ ![](images/ecec8800533bcc71c6c766873b95316152c0270de218d3baace2e5e47dc6bc55.jpg)
627
+
628
+ ![](images/153aa45b3dd2edf5f7eb5c1fe4e2ca43fe3bc459cfb8ecb0a366f21c6604e1b5.jpg)
629
+
630
+ ![](images/0baac6bdc353e0ee9c1675ccd9eae346eddea6c662e3823f58bc3fbe98248076.jpg)
631
+
632
+ ![](images/07c5b1578a08cb8d64695d5a7e3a36e8cc3d8cb5a62d1e85a1462820d7fd50df.jpg)
633
+
634
+ ![](images/514ba0badd61720f99d7cde174e04d259c9a972abb4b5da2267e829a529dc8c5.jpg)
635
+ Ref-ID
636
+
637
+ ![](images/6a5e54022e092a4c47d3e071ccfbd727669b01bcd44709dfd75ea9bab342088a.jpg)
638
+
639
+ ![](images/3cb7c72fb4daa42983df4dd4d3facdc7a179f1ed14ab7db54aaee03830920a1a.jpg)
640
+
641
+ ![](images/5a4c50dc353b50802cbcb3b915375ceaf74c41988af1148bf3a962dc173183ba.jpg)
642
+
643
+ ![](images/9f56c6de2b622a896813b4a45fcc8b9dcfca85820b8dfb92f095661f8452e80f.jpg)
644
+
645
+ ![](images/66992c0c55cbb550ceb453b30f1a349c35188d6863309c931b54c437d2761d73.jpg)
646
+
647
+ ![](images/091ae333fea18d2a7d1cd77f342c82501590e642abe02144cb82a5c49a80b354.jpg)
648
+ Generated Images
649
+
650
+ # Camera Motion
651
+
652
+ ![](images/5c3f94660a497ff4a611d9fe6d607b8f9ef0698b457ab287a8d6b2562fc7b62a.jpg)
653
+ Figure 18. Image generation using MagicMirror. Model in the image pre-train stage captures ID embeddings of the reference ID (Ref-ID), yet over-fits on some low-level distributions such as image quality, style, and background.
654
+
655
+ ![](images/cbf1b709b07530b1e5ce92435ac821ff31e04a97665ef554d25d38ae3add83e4.jpg)
656
+
657
+ # Dynamic Facial Movement
658
+
659
+ ![](images/2c510fee0642362bf76b070692be867e9ed3dc24fc44fb90739f4f44b6aff538.jpg)
660
+
661
+ ![](images/f3e888f30075957e5f97585e67f8f9167bc2d30d08e25a729a7c562a5a80b2dd.jpg)
662
+
663
+ # Camera Motion + Dynamic Facial Movement
664
+
665
+ ![](images/470627d2ac1962f9b4fce7a85015ea3688f1918a7abd7cf5933276e425037c98.jpg)
666
+ Figure 19. Advantages over I2V generation. MagicMirror successfully handles challenging scenarios including partially occluded initial frames and maintains identity consistency through complex facial dynamics, addressing limitations of traditional I2V approaches.
667
+
668
+ ![](images/87f0ed2d67511e52ee0cfea5b6b5099ae117bb06366eb2062dfff4dc26720044.jpg)
669
+
670
+ ![](images/a07ff6feb1b48dd83d6b40c8992d07232bc92993347252e6f3c3b2fbd9eb8574.jpg)
671
+ Figure 20. Video generation results. We demonstrate MagicMirror's capability across varying facial scales and compositions. Additional examples and comparative analyses are available in the project page.
672
+
673
+ # References
674
+
675
+ [1] Mixkit: Free assets for your next video project. 5
676
+ [2] Pexels: Free 4k stock videos & full hd video clips to download. 5, 13
677
+ [3] David Beniaguev. Synthetic faces high quality (sfhq) dataset, 2022. 5, 9
678
+ [4] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, Wesam Manassra, Prafulla Dhariwal, Casey Chu, Yunxin Jiao, and Aditya Ramesh. Improving image generation with better captions. Technical report, OpenAI, 2023. 1
679
+ [5] Li Chen, Mengyi Zhao, Yiheng Liu, Mingxu Ding, Yangyang Song, Shizun Wang, Xu Wang, Hao Yang, Jing Liu, Kang Du, et al. Photovsere: Tuning-free image customization with text-to-image diffusion models. arXiv preprint arXiv:2309.05793, 2023.1, 3
680
+ [6] Hyungjin Chung and Jong Chul Ye. Score-based diffusion models for accelerated mri. Medical image analysis, 80: 102479, 2022. 2
681
+ [7] Hyungjin Chung, Byeongsu Sim, and Jong Chul Ye. Come-closer-diffuse-faster: Accelerating conditional diffusion models for inverse problems through stochastic contraction. In CVPR, pages 12413-12422, 2022. 2
682
+ [8] Gabriele Corso, Hannes Stärk, Bowen Jing, Regina Barzilay, and Tommi Jaakkola. Diffdock: Diffusion steps, twists, and turns for molecular docking. *ICLR*, 2023. 2
683
+ [9] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019. 3, 5
684
+ [10] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In CVPR, pages 5203-5212, 2020. 6, 11
685
+ [11] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. NeurIPS, 34:8780-8794, 2021. 2
686
+ [12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 3
687
+ [13] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In CVPR, pages 12873-12883, 2021. 2
688
+ [14] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. *ICLR*, 2023. 3
689
+ [15] Adam Geitgey. face_recognition, 2017. 6, 11
690
+ [16] Shansan Gong, Mukai Li, Jiangtao Feng, Zhiyong Wu, and LingPeng Kong. Diffuseq: Sequence to sequence text generation with diffusion models. *ICLR*, 2022. 2
691
+ [17] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and
692
+
693
+ Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139-144, 2020. 2, 3
694
+ [18] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Imagine your personalized text-to-image diffusion models without specific tuning. ICLR, 2024. 1, 3
695
+ [19] Zinan Guo, Yanze Wu, Zhuowei Chen, Lang Chen, and Qian He. Pulid: Pure and lightning id customization via contrastive alignment. NeurIPS, 2024. 1, 3, 4
696
+ [20] Xuanhua He, Quande Liu, Shengju Qian, Xin Wang, Tao Hu, Ke Cao, Keyu Yan, Man Zhou, and Jie Zhang. Id-Animator: Zero-shot identity-preserving human video generation. arXiv preprint arXiv:2404.15275, 2024. 1, 2, 3, 5, 6, 7, 8, 9
697
+ [21] Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. Latent video diffusion models for high-fidelity long video generation. arXiv preprint arXiv:2211.13221, 2022.3
698
+ [22] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. NeurIPS, 33:6840-6851, 2020. 1, 2
699
+ [23] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. NeurIPS, 35:8633-8646, 2022. 2
700
+ [24] Jiehui Huang, Xiao Dong, Wenhui Song, Hanhui Li, Jun Zhou, Yuhao Cheng, Shutao Liao, Long Chen, Yiqiang Yan, Shengcai Liao, et al. Consistent: Portrait generation with multimodal fine-grained identity preserving. In arXiv preprint arXiv:2404.16771, 2024. 1, 3
701
+ [25] Ziqi Huang, Yinan He, Jiashuo Yu, Fan Zhang, Chenyang Si, Yuming Jiang, Yuanhan Zhang, Tianxing Wu, Qingyang Jin, Nattapol Chanpaisit, et al. Vbench: Comprehensive benchmark suite for video generative models. In CVPR, pages 21807-21818, 2024. 2, 6, 9, 11, 13
702
+ [26] Bowen Jing, Gabriele Corso, Jeffrey Chang, Regina Barzilay, and Tommi Jaakkola. Torsional diffusion for molecular conformer generation. NeurIPS, 35:24240-24253, 2022. 2
703
+ [27] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 2, 3, 5, 9
704
+ [28] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8110-8119, 2020. 2
705
+ [29] Diederik Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. NeurIPS, 34:21696-21707, 2021. 2
706
+ [30] Neeraj Kumar, Alexander C Berg, Peter N Belhumeur, and Shree K Nayar. Attribute and simile classifiers for face verification. In ICCV, pages 365-372. IEEE, 2009. 6, 9
707
+ [31] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 4
708
+ [32] Xiang Li, John Thickstun, Ishaan Gulrajani, Percy S Liang, and Tatsunori B Hashimoto. Diffusion-lm improves controllable text generation. NeurIPS, 35:4328-4343, 2022. 2
709
+
710
+ [33] Yanwei Li, Yuechen Zhang, Chengyao Wang, Zhisheng Zhong, Yixin Chen, Ruihang Chu, Shaoteng Liu, and Jiaya Jia. Mini-gemini: Mining the potential of multi-modality vision language models. arXiv preprint arXiv:2403.18814, 2024. 5, 9
711
+ [34] Zhen Li, Mingdeng Cao, Xintao Wang, Zhongang Qi, MingMing Cheng, and Ying Shan. Photomaker: Customizing realistic human photos via stacked id embedding. In CVPR, pages 8640-8650, 2024. 1, 2, 3, 4, 5, 6, 7, 9, 10, 12
712
+ [35] Bin Lin, Yunyang Ge, Xinhua Cheng, Zongjian Li, Bin Zhu, Shaodong Wang, Xianyi He, Yang Ye, Shenghai Yuan, Liuhan Chen, Tanghui Jia, Junwu Zhang, Zhenyu Tang, Yatian Pang, Bin She, Cen Yan, Zhiheng Hu, Xiaoyi Dong, Lin Chen, Zhang Pan, Xing Zhou, Shaoling Dong, Yonghong Tian, and Li Yuan. Open-sora plan: Open-source large video generation model, 2024. 3, 9
713
+ [36] Yaofang Liu, Xiaodong Cun, Xuebo Liu, Xintao Wang, Yong Zhang, Haoxin Chen, Yang Liu, Tieyong Zeng, Raymond Chan, and Ying Shan. Evalcrafter: Benchmarking and evaluating large video generation models. In CVPR, pages 22139-22149, 2024. 6, 11
714
+ [37] LuChen. Video ocean - filmmaking for everyone. 2
715
+ [38] Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Zwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao. Latte: Latent diffusion transformer for video generation. arXiv preprint arXiv:2401.03048, 2024. 3
716
+ [39] Ze Ma, Daquan Zhou, Chun-Hsiao Yeh, Xue-She Wang, Xiuyu Li, Huanrui Yang, Zhen Dong, Kurt Keutzer, and Jiashi Feng. Magic-me: Identity-specific video customized diffusion. arXiv preprint arXiv:2402.09368, 2024. 1, 3, 5
717
+ [40] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. PMLR, 2021. 2
718
+ [41] OpenAI. Video generation models as world simulators. 3
719
+ [42] William Peebles and Saining Xie. Scalable diffusion models with transformers. In ICCV, pages 4195-4205, 2023. 2, 3, 5
720
+ [43] Bohao Peng, Jian Wang, Yuechen Zhang, Wenbo Li, MingChang Yang, and Jiaya Jia. Controlnext: Powerful and efficient control for image and video generation. arXiv preprint arXiv:2408.06070, 2024. 3
721
+ [44] Xu Peng, Junwei Zhu, Boyuan Jiang, Ying Tai, Donghao Luo, Jiangning Zhang, Wei Lin, Taisong Jin, Chengjie Wang, and Rongrong Ji. Portrait booth: A versatile portrait model for fast identity-preserved personalization. In CVPR, pages 27080-27090, 2024. 1, 3
722
+ [45] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763. PMLR, 2021. 3
723
+ [46] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In ICML, pages 8821-8831. Pmlr, 2021. 2
724
+ [47] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding
725
+
726
+ in style: a stylegan encoder for image-to-image translation. In CVPR, pages 2287-2296, 2021. 3
727
+ [48] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 1, 2, 4
728
+ [49] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In MICCAI, pages 234-241. Springer, 2015. 2
729
+ [50] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, pages 22500-22510, 2023. 2, 3, 4
730
+ [51] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS, 35:36479-36494, 2022. 1
731
+ [52] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. NeurIPS, 29, 2016. 6
732
+ [53] Christoph Schuhmann, Robert Kaczmarczyk, Aran Komatsuzaki, Aarush Katta, Richard Vencu, Romain Beaumont, Jenia Jitsev, Theo Coombes, and Clayton Mullis. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. In NeurIPS Workshop Datacentric AI, number FZJ2022-00923. Jülich Supercomputing Center, 2021. 5, 9
733
+ [54] Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 9(11), 2008. 2, 13
734
+ [55] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. NeurIPS, 2017. 3
735
+ [56] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, and Anthony Chen. Instantid: Zero-shot identity-preserving generation in seconds. arXiv preprint arXiv:2401.07519, 2024. 1, 3
736
+ [57] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Xixuan Song, et al. Cogvlm: Visual expert for pretrained language models. arXiv preprint arXiv:2311.03079, 2023. 6, 9
737
+ [58] Xintao Wang, Yu Li, Honglun Zhang, and Ying Shan. Towards real-world blind face restoration with generative facial prior. In CVPR, pages 9168-9178, 2021. 3
738
+ [59] Zhouxia Wang, Ziyang Yuan, Xintao Wang, Yaowei Li, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. Motionctrl: A unified and flexible motion controller for video generation. In SIGGRAPH, pages 1-11, 2024. 3
739
+ [60] Jinbo Xing, Menghan Xia, Yong Zhang, Haoxin Chen, Xintao Wang, Tien-Tsin Wong, and Ying Shan. Dynamiccafter: Animating open-domain images with video diffusion priors. ECCV, 2024. 1, 3, 6, 7, 8, 10
740
+ [61] Jinbo Xing, Long Mai, Cusuh Ham, Jiahui Huang, Aniruddha Mahapatra, Chi-Wing Fu, Tien-Tsin Wong, and Feng Liu. Motioncanvas: Cinematic shot design with controllable image-to-video generation. arXiv preprint arXiv:2502.04299, 2025.3
741
+
742
+ [62] Zhen Xing, Qijun Feng, Haoran Chen, Qi Dai, Han Hu, Hang Xu, Zuxuan Wu, and Yu-Gang Jiang. A survey on video diffusion models. ACM Computing Surveys, 2023. 2
743
+ [63] Jiaqi Xu, Xinyi Zou, Kunzhe Huang, Yunkuo Chen, Bo Liu, MengLi Cheng, Xing Shi, and Jun Huang. Easyanimate: A high-performance long video generation method based on transformer architecture. arXiv preprint arXiv:2405.18991, 2024. 1, 3, 6, 7, 8, 10
744
+ [64] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 1, 2, 3, 4, 6, 7, 8, 10, 13
745
+ [65] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 3, 4, 5
746
+ [66] Jianhui Yu, Hao Zhu, Liming Jiang, Chen Change Loy, Weidong Cai, and Wayne Wu. Celebv-text: A large-scale facial text-video dataset. In CVPR, pages 14805-14814, 2023. 2, 13
747
+ [67] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. *ICLR*, 2 (3):5, 2024. 2
748
+ [68] Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024. 3
749
+ [69] Shenghai Yuan, Jina Huang, Xianyi He, Yunyuan Ge, Yu-jun Shi, Liuhan Chen, Jiebo Luo, and Li Yuan. Identity-preserving text-to-video generation by frequency decomposition. arXiv preprint arXiv:2411.17440, 2024. 1, 3, 6, 7, 8, 10, 12
750
+ [70] Yan Zeng, Guoqiang Wei, Jiani Zheng, Jiaxin Zou, Yang Wei, Yuchen Zhang, and Hang Li. Make pixels dance: High-dynamic video generation. In CVPR, 2024. 3
751
+ [71] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In ICCV, pages 3836-3847, 2023. 3
752
+ [72] Yuechen Zhang, Jinbo Xing, Bin Xia, Shaoteng Liu, Bohao Peng, Xin Tao, Pengfei Wan, Eric Lo, and Jiaya Jia. Training-free efficient video generation via dynamic token carving. arXiv preprint arXiv:2505.16864, 2025. 3
753
+ [73] Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. Open-sora: Democratizing efficient video production for all, 2024. 3
2501.03xxx/2501.03931/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fde8bfbbc2bb58fda003d09a2393f9d509acfc854b6408b1e030f7955cb29c30
3
+ size 2582303
2501.03xxx/2501.03931/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03936/a62f678c-729d-427d-9baf-eab6a30b2833_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03936/a62f678c-729d-427d-9baf-eab6a30b2833_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03936/a62f678c-729d-427d-9baf-eab6a30b2833_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:858d7083cbea1829573c5b541c78345472bb8be17520070b20faceedbf5d000a
3
+ size 6444045
2501.03xxx/2501.03936/full.md ADDED
@@ -0,0 +1,961 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PPTAGENT: Generating and Evaluating Presentations Beyond Text-to-Slides
2
+
3
+ Hao Zheng $^{1,2,*}$ , Xinyan Guan $^{1,2,*}$ , Hao Kong $^{3}$ , Jia Zheng $^{1}$ , Weixiang Zhou $^{1}$ , Hongyu Lin $^{1}$ , Yaojie Lu $^{1}$ , Ben He $^{1,2}$ , Xianpei Han $^{1}$ , Le Sun $^{1}$
4
+
5
+ $^{1}$ Chinese Information Processing Laboratory, Institute of Software, Chinese Academy of Sciences
6
+
7
+ $^{2}$ University of Chinese Academy of Sciences
8
+
9
+ 3Shanghai Jiexin Technology
10
+
11
+ {zhenghao2022, guanxinyan2022, zhengjia, weixiang,hongyu,luyaojie}@iscas.ac.cn
12
+
13
+ {xianpei,sunle}@iscas.ac.cn haokong@knowuheart.com benhe@ucas.edu.cn
14
+
15
+ # Abstract
16
+
17
+ Automatically generating presentations from documents is a challenging task that requires accommodating content quality, visual appeal, and structural coherence. Existing methods primarily focus on improving and evaluating the content quality in isolation, overlooking visual appeal and structural coherence, which limits their practical applicability. To address these limitations, we propose PPTAGENT, which comprehensively improves presentation generation through a two-stage, edit-based approach inspired by human workflows. PPTAGENT first analyzes reference presentations to extract slide-level functional types and content schemas, then drafts an outline and iteratively generates editing actions based on selected reference slides to create new slides. To comprehensively evaluate the quality of generated presentations, we further introduce PPTEVAL, an evaluation framework that assesses presentations across three dimensions: Content, Design, and Coherence. Results demonstrate that PPTAGENT significantly outperforms existing automatic presentation generation methods across all three dimensions. The code and data are available at https://github.com/icip-cas/PPTAgent.
18
+
19
+ # 1 Introduction
20
+
21
+ Presentations are a widely used medium for information delivery, valued for their visual effectiveness in engaging and communicating with audiences. However, creating high-quality presentations requires a captivating storyline, well-designed layouts, and rich, compelling content (Fu et al., 2022). Consequently, creating well-rounded presentations requires advanced presentation skills and significant effort. Given the inherent complexity of the presentation creation, there is growing interest in automating the presentation generation process (Ge et al., 2025; Maheshwari et al., 2024; Mon
22
+
23
+ ![](images/711fcb72b98e9f37468ae75a8eb26594b7737fec007727c81c97de062931e1fe.jpg)
24
+ Figure 1: Comparison between our PPTAGENT approach (left) and the conventional abstractive summarization method (right).
25
+
26
+ dal et al., 2024) by leveraging the generalization capabilities of Large Language Models (LLMs) and Multimodal Large Language Models (MLLMs).
27
+
28
+ Existing approaches typically follow a text-to-slides paradigm, which converts LLM outputs into slides using predefined rules or templates. As shown in Figure 1, prior studies (Mondal et al., 2024; Sefid et al., 2021) tend to treat presentation generation as an abstractive summarization task, focusing primarily on textual content while neglecting the visual-centric nature (Fu et al., 2022) of presentation. This results in text-heavy and monotonous presentations that fail to engage audiences effectively (Barrick et al., 2018).
29
+
30
+ Rather than creating complex presentations from scratch in a single pass, human workflows typically involve selecting exemplary slides as references and then summarizing and transferring key content onto them (Duarte, 2010). Inspired by this process, we propose PPTAGENT, which decomposes slide generation into two phases: selecting the reference slide and editing it step by step. However, achieving such an edit-based approach for presentation generation is challenging. First, due to the
31
+
32
+ ![](images/788d9e689c4cfb0ce593b49a2061c9bf22b55bd5b5f064611fd64490dc9b9f6c.jpg)
33
+ Stage I: Presentation Analysis
34
+
35
+ ![](images/db692f2957e118c8be3f4d6209ae67a15854e00771ecf841d709e1b5bc480b12.jpg)
36
+ Stage II: Presentation Generation
37
+ Figure 2: Overview of the PPTAGENT workflow. StageI: Presentation Analysis involves analyzing the input presentation to cluster slides into groups and extract their content schemas. Stage II: Presentation Generation generates new presentations guided by the outline, incorporating self-correction mechanisms to ensure robustness.
38
+
39
+ layout and modal complexity of presentations, it is difficult for LLMs to directly determine which slides should be referenced. The key challenge lies in enhancing LLMs' understanding of reference presentations' structure and content patterns. Second, most presentations are saved in PowerPoint's XML format, as demonstrated in Figure 11, which is inherently verbose and redundant (Gryk, 2022), making it challenging for LLMs to robustly perform editing operations.
40
+
41
+ To address these challenges, PPTAGENT operates in two stages. Stage I performs a comprehensive analysis of reference presentations to extract functional types and content schemas of slides, facilitating subsequent reference selection and slide generation. Stage II introduces a suite of edit APIs with HTML-rendered representation that simplifies slide modifications through code interaction (Wang et al., 2024b). Furthermore, we implement a self-correction mechanism (Kamoi et al., 2024) that allows LLMs to iteratively refine generated editing actions based on intermediate results and execution feedback, ensuring robust generation. As shown in Figure 2, we first analyze and cluster reference slides into categories (e.g., opening slides, bulletpoint slides). For each new slide, PPTAGENT selects an appropriate reference slide (e.g., opening slide for the first slide) and generates a series of editing actions (e.g., replaceSpan) to modify it.
42
+
43
+ Due to the lack of a comprehensive evalua-
44
+
45
+ tion framework, we propose PPTEVAL, which adopts the MLLM-as-a-judge paradigm (Chen et al., 2024a) to evaluate presentations across three dimensions: Content, Design, and Coherence(Duarte, 2010). Human evaluations validate the reliability and effectiveness of PPTEVAL. Results demonstrate that PPTAGENT generates high-quality presentations, achieving an average score of 3.67 for the three dimensions in PPTEVAL.
46
+
47
+ Our main contributions can be summarized as follows:
48
+
49
+ - We propose PPTAGENT, a framework that redefines automatic presentation generation as an edit-based process guided by reference presentations.
50
+ - We introduce PPTEVAL, a comprehensive evaluation framework that assesses presentations across three dimensions: Content, Design, and Coherence.
51
+ - We release the PPTAGENT and PPTEVAL codebases, along with a new presentation dataset Zenodo10K, to support future research.
52
+
53
+ # 2 PPTAGENT
54
+
55
+ In this section, we formulate the presentation generation task and introduce our proposed PPTAGENT framework, which consists of two distinct stages. In stage I, we analyze reference presentations through slide clustering and schema extraction, providing a comprehensive understanding of
56
+
57
+ input presentations that facilitates subsequent reference selection and slide generation. In stage II, we leverage analyzed reference presentations to select reference slides and generate the target presentation for the input document through an iterative editing process. An overview of our workflow is illustrated in Figure 2.
58
+
59
+ # 2.1 Problem Formulation
60
+
61
+ PPTAGENT is designed to generate an engaging presentation through an edit-based process. We provide formal definitions for the conventional method and PPTAGENT to highlight their key differences.
62
+
63
+ The conventional method (Bandyopadhyay et al., 2024; Mondal et al., 2024) for creating each slide $S$ is formalized in Equation 1. Given the input content $C$ , it generates $n$ slide elements, each defined by its type, content, and styling attributes, such as (Textbox, "Hello", {border, size, position, ...}).
64
+
65
+ $$
66
+ \boldsymbol {S} = \left\{e _ {1}, e _ {2}, \dots , e _ {n} \right\} = f (C) \tag {1}
67
+ $$
68
+
69
+ While this conventional method is straightforward, it requires manual specification of styling attributes, which is challenging for automated generation (Guo et al., 2023). Instead of creating slides from scratch, PPTAGENT generates a sequence of executable actions to edit reference slides, thereby preserving their well-designed layouts and styles. As shown in Equation 2, given the input content $C$ and the $j$ -th reference slide $R_{j}$ , which is selected from the reference presentation, PPTAGENT generates a sequence of $m$ executable actions, where each action $a_{i}$ corresponds to a line of executable code.
70
+
71
+ $$
72
+ \boldsymbol {A} = \left\{a _ {1}, a _ {2}, \dots , a _ {m} \right\} = g (C, R _ {j}) \tag {2}
73
+ $$
74
+
75
+ # 2.2 Stage I:Presentation Analysis
76
+
77
+ In this stage, we analyze the reference presentation to guide the reference selection and slide generation. Firstly, we categorize slides based on their structural and layout characteristics through slide clustering. Then, we extract content schemas to identify the content organization of the slide in each cluster, providing a comprehensive description of slide elements.
78
+
79
+ Slide Clustering Slides can be categorized into two main types based on their functionalities: structural slides that support the presentation's organization (e.g., opening slides) and content slides that convey specific information (e.g., bullet-point
80
+
81
+ slides). To distinguish between these two types, we employ LLMs to segment the presentation accordingly. For structural slides, we leverage LLMs' long-context capability to analyze all slides in the input presentation, identifying structural slides, labeling their structural roles based on their textual features, and grouping them accordingly. For content slides, we first convert them into images and then apply a hierarchical clustering approach to group similar slide images. Subsequently, we utilize MLLMs to analyze the converted slide images, identifying layout patterns within each cluster. Further details are provided in Appendix D.
82
+
83
+ Schema Extraction After clustering, we further analyzed their content schemas to facilitate the slide generation. Specifically, we define an extraction framework where each element is represented by its category, description, and content. This framework enables a clear and structured representation of each slide. Detailed instructions are provided in Appendix F, with an example of the schema shown below.
84
+
85
+ <table><tr><td>Category</td><td>Description</td><td>Data</td></tr><tr><td>Title</td><td>Main title</td><td>Sample Library</td></tr><tr><td>Date</td><td>Date of the event</td><td>15 February 2018</td></tr><tr><td>Image</td><td>Primary image to illustrate the slide</td><td>Picture: Children in a library with…</td></tr></table>
86
+
87
+ # 2.3 Stage II: Presentation Generation
88
+
89
+ PPTAGENT first generates an outline specifying reference slides and relevant content for each new slide. Then, it iteratively edits elements from reference slides through edit APIs to create the target presentation.
90
+
91
+ Outline Generation As shown in Figure 2, we utilize LLM to generate a structured outline consisting of multiple entries. Each entry represents a new slide, containing the reference slide and relevant document content of the new slide. The reference slide is selected based on the slide-level functional description in Stage I, while the relevant document content is identified based on the input document.
92
+
93
+ Slide Generation Guided by the structured outline, slides are generated iteratively based on the corresponding entries. For each slide, LLMs incorporate textual content and extracted image captions from the input document. The new slide adopts the layout of the reference slide while ensuring consistency in content and structural clarity.
94
+
95
+ Specifically, to generate a new slide based on the corresponding entry in the outline, we design edit-based APIs to enable LLMs to edit the reference slide. As shown below, these APIs support editing, removing, and duplicating slide elements. Moreover, given the complexity of the XML format in presentations, which is demonstrated in Appendix E, we render the reference slide into an HTML representation (Feng et al., 2024), offering a more precise and intuitive format for easier understanding. This HTML-based format, combined with our edit-based APIs, enables LLMs to perform precise content modifications on reference slides.
96
+
97
+ <table><tr><td>Function Name</td><td>Description</td></tr><tr><td>del spans</td><td>Delete a span.</td></tr><tr><td>del_image</td><td>Delete an image element.</td></tr><tr><td>cloneparagraph</td><td>Create a duplicate of an existing paragraph.</td></tr><tr><td>replace spans</td><td>Replace the content of a span.</td></tr><tr><td>replace_image</td><td>Replace the source of image.</td></tr></table>
98
+
99
+ Furthermore, to enhance robustness during the editing process, we implement a self-correction mechanism (Kamoi et al., 2024). Specifically, the generated editing actions are executed within a REPL<sup>1</sup> environment. When actions fail to apply to reference slides, the REPL provides execution feedback<sup>2</sup> to assist LLMs in refining their actions. The LLM then analyzes this feedback to adjust its editing actions (Guan et al., 2024; Wang et al., 2024b), enabling iterative refinement until a valid slide is generated or the maximum retry limit is reached.
100
+
101
+ # 3 PPTEVAL
102
+
103
+ We introduce PPTEVAL, a comprehensive framework that evaluates presentation quality from multiple dimensions, addressing the absence of reference-free evaluation for presentations. The framework provides both numeric scores (1-to-5 scale) and detailed rationales to justify each dimension's assessment.
104
+
105
+ Grounded in established presentation design principles (Duarte, 2008, 2010), our evaluation framework focuses on three key dimensions, as summarized in Table 1. Specially, given a generated presentation, we assess the content and design at the slide level, while evaluating coherence across the entire presentation.
106
+
107
+ ![](images/f4263d3086f6b4a4b092975fb23628819caa0517dc55ec84644ce3de1ac85e66.jpg)
108
+ Figure 3: PPTEVAL assesses presentations from three dimensions: content, design, and coherence.
109
+
110
+ The complete evaluation process is illustrated in Figure 3, with detailed scoring criteria and representative examples provided in Appendix B.
111
+
112
+ <table><tr><td>Dimension</td><td>Criteria</td></tr><tr><td>Content</td><td>Text should be concise and grammatically sound, supported by relevant images.</td></tr><tr><td>Design</td><td>Harmonious colors and proper layout ensure readability, while visual elements like geometric shapes enhance the overall appeal.</td></tr><tr><td>Coherence</td><td>Structure develops progressively, incorporating essential background information.</td></tr></table>
113
+
114
+ Table 1: The scoring criteria of dimensions in PPTEVAL, all evaluated in 1-5 scale.
115
+
116
+ # 4 Experiment
117
+
118
+ # 4.1 Dataset
119
+
120
+ Existing presentation datasets, such as Fu et al. (2022); Mondal et al. (2024); Sefid et al. (2021); Sun et al. (2021), have two main issues. First, they are mostly stored in PDF or JSON formats, which leads to a loss of semantic information, such as structural relationships and styling attributes of elements. Additionally, these datasets primarily consist of academic presentations in artificial intelligence, limiting their diversity. To address these limitations, we introduce Zenodo10K, a new dataset sourced from Zenodo (European Organization For Nuclear Research and OpenAIRE, 2013), which hosts diverse artifacts across domains, all under clear licenses. We have curated 10,448 presentations from this source and made them publicly available to support further research.
121
+
122
+ Following Mondal et al. (2024), we sample 50 presentations in five domains to serve as reference presentations. In addition, we collected 50 documents from the same domains to be used as input documents. The sampling criteria and preprocessing details are provided in Appendix A, while the dataset statistics are summarized in Table 2.
123
+
124
+ <table><tr><td rowspan="2">Domain</td><td colspan="2">Document</td><td colspan="3">Presentation</td></tr><tr><td>#Chars</td><td>#Figs</td><td>#Chars</td><td>#Figs</td><td>#Pages</td></tr><tr><td>Culture</td><td>12,708</td><td>2.9</td><td>6,585</td><td>12.8</td><td>14.3</td></tr><tr><td>Education</td><td>12,305</td><td>5.5</td><td>3,993</td><td>12.9</td><td>13.9</td></tr><tr><td>Science</td><td>16,661</td><td>4.8</td><td>5,334</td><td>24.0</td><td>18.4</td></tr><tr><td>Society</td><td>13,019</td><td>7.3</td><td>3,723</td><td>9.8</td><td>12.9</td></tr><tr><td>Tech</td><td>18,315</td><td>11.4</td><td>5,325</td><td>12.9</td><td>16.8</td></tr></table>
125
+
126
+ Table 2: Statistics of the dataset used in our experiments, detailing the number of characters (#Chars') and figures (#Figs'), as well as the number of pages (#Pages').
127
+
128
+ # 4.2 Implementation Details
129
+
130
+ PPTAGENT is implemented with three models: GPT-4o-2024-08-06 (GPT-4o), Qwen2.5-72B-Instruct (Qwen2.5, Yang et al., 2024), and Qwen2-VL-72B-Instruct (Qwen2-VL, Wang et al., 2024a). These models are categorized according to the specific modalities they handle, whether textual or visual, as indicated by their subscripts. Specifically, we define configurations as combinations of a language model (LM) and a vision model (VM), such as Qwen2.5LM+Qwen2-VLVM.
131
+
132
+ Experiment data covers 5 domains, each with 10 input documents and 10 reference presentations, totaling 500 presentation generation tasks per configuration (5 domains $\times$ 10 input documents $\times$ 10 reference presentations). Each slide generation allows a maximum of two self-correction iterations. We use Chen et al. (2024b) and Wu et al. (2020) to compute the text and image embeddings respectively. All open-source LLMs are deployed using the VLLM framework (Kwon et al., 2023) on NVIDIA A100 GPUs. The total computational cost for experiments are approximately 500 GPU hours.
133
+
134
+ # 4.3 Baselines
135
+
136
+ We choose the following baseline methods: DocPres (Bandyopadhyay et al., 2024) propose a rule-based approach that generates narrative-rich slides through multi-stages, and incorporates images through a similarity-based mechanism. KCTV (Cachola et al., 2024) propose a template-based method that creates slides in an intermediate format before converting them into final presentations using predefined templates. The baseline methods operate without vision models since they do not process visual information. Each configuration generates 50 presentations (5 domains $\times$ 10 input documents), as they do not require reference presentations. Consequently, the FID metric is excluded from their evaluation.
137
+
138
+ # 4.4 Evaluation Metrics
139
+
140
+ We evaluated the presentation generation using the following metrics:
141
+
142
+ - Success Rate (SR) evaluates the robustness of presentation generation (Wu et al., 2024), calculated as the percentage of successfully completed tasks. For PPTAGENT, success requires the generation of all slides without execution errors after self-correction. For KCTV, success is determined by the successful compilation of the generated LaTeX file. DocPres is excluded from this evaluation due to its deterministic rule-based conversion.
143
+
144
+ - Perplexity (PPL) measures the likelihood of the model generating the given sequence. Using Llama-3-8B (Dubey et al., 2024), we calculate the average perplexity across all slides in a presentation. Lower perplexity scores indicate higher textual fluency (Bandyopadhyay et al., 2024).
145
+ - Rouge-L (Lin, 2004) evaluates textual similarity by measuring the longest common subsequence between generated and reference texts. We report the F1 score to balance precision and recall.
146
+ - FID (Heusel et al., 2017) measures the similarity between the generated presentation and the reference presentation in the feature space. Due to the limited sample size, we calculate the FID using a 64-dimensional output vector.
147
+ - PPTEVAL employs GPT-4o as the judging model to evaluate presentation quality across three dimensions: content, design, and coherence. We compute content and design scores by averaging across slides, while coherence is assessed at the presentation level.
148
+
149
+ # 4.5 Overall Result
150
+
151
+ Table 3 presents the performance comparison between PPTAGENT and baselines, revealing that:
152
+
153
+ PPTAGENT Significantly Improves Overall Presentation Quality. PPTAGENT demonstrates statistically significant performance improvements over baseline methods across all three dimensions of PPTEVAL. Compared to the rule-based baseline (DocPres), PPTAGENT exhibits substantial improvements in both the design and content dimensions (3.34 vs. 2.37, +40.9%; 3.34 vs. 2.98, +12.1%), as presentations generated by the DocPres method show minimal design effort. In comparison with the template-based baseline (KCTV), PPTAGENT also achieves notable improvements in both design and content (3.34 vs. 2.95, +13.2%; 3.28 vs. 2.55, +28.6%), underscoring the efficacy of the edit
154
+
155
+ <table><tr><td colspan="2">Configuration</td><td colspan="4">Existing Metrics</td><td colspan="4">PPTEVAL</td></tr><tr><td>Language Model</td><td>Vision Model</td><td>SR(%)↑</td><td>PPL↓</td><td>ROUGE-L ↑</td><td>FID↓</td><td>Content↑</td><td>Design↑</td><td>Coherence↑</td><td>Avg.↑</td></tr><tr><td colspan="10">DocPres (rule-based)</td></tr><tr><td>GPT-4oLM</td><td>-</td><td>-</td><td>76.42</td><td>13.28</td><td>-</td><td>2.98</td><td>2.33</td><td>3.24</td><td>2.85</td></tr><tr><td>Qwen2.5LM</td><td>-</td><td>-</td><td>100.4</td><td>13.09</td><td>-</td><td>2.96</td><td>2.37</td><td>3.28</td><td>2.87</td></tr><tr><td colspan="10">KCTV (template-based)</td></tr><tr><td>GPT-4oLM</td><td>-</td><td>80.0</td><td>68.48</td><td>10.27</td><td>-</td><td>2.49</td><td>2.94</td><td>3.57</td><td>3.00</td></tr><tr><td>Qwen2.5LM</td><td>-</td><td>88.0</td><td>41.41</td><td>16.76</td><td>-</td><td>2.55</td><td>2.95</td><td>3.36</td><td>2.95</td></tr><tr><td colspan="10">PPTAGENT (ours)</td></tr><tr><td>GPT-4oLM</td><td>GPT-4oVM</td><td>97.8</td><td>721.54</td><td>10.17</td><td>7.48</td><td>3.25</td><td>3.24</td><td>4.39</td><td>3.62</td></tr><tr><td>Qwen2-VLLM</td><td>Qwen2-VLVM</td><td>43.0</td><td>265.08</td><td>13.03</td><td>7.32</td><td>3.13</td><td>3.34</td><td>4.07</td><td>3.51</td></tr><tr><td>Qwen2.5LM</td><td>Qwen2-VLVM</td><td>95.0</td><td>496.62</td><td>14.25</td><td>6.20</td><td>3.28</td><td>3.27</td><td>4.48</td><td>3.67</td></tr></table>
156
+
157
+ Table 3: Performance comparison of presentation generation methods, including DocPres, KCTV, and our proposed PPTAGENT. The best/second-best scores are boldedunderlineined. Results are reported using existing metrics, including Success Rate (SR), Perplexity (PPL), Rouge-L, Fréchet Inception Distance (FID), and PPTEval.
158
+
159
+ <table><tr><td>Setting</td><td>SR(%)</td><td>Content</td><td>Design</td><td>Coherence</td><td>Avg.</td></tr><tr><td>PPTAGENT</td><td>95.0</td><td>3.28</td><td>3.27</td><td>4.48</td><td>3.67</td></tr><tr><td>w/o Outline</td><td>91.0</td><td>3.24</td><td>3.30</td><td>3.36</td><td>3.30</td></tr><tr><td>w/o Schema</td><td>78.8</td><td>3.08</td><td>3.23</td><td>4.04</td><td>3.45</td></tr><tr><td>w/o Structure</td><td>92.2</td><td>3.28</td><td>3.25</td><td>3.45</td><td>3.32</td></tr><tr><td>w/o CodeRender</td><td>74.6</td><td>3.27</td><td>3.34</td><td>4.38</td><td>3.66</td></tr></table>
160
+
161
+ Table 4: Ablation analysis of PPTAGENT utilizing the Qwen2.5LM+Qwen2-VLVM configuration, demonstrating the contribution of each components.
162
+
163
+ based paradigm. Most notably, PPTAGENT shows a significant enhancement in the coherence dimension (4.48 vs. 3.57, +25.5% for DocPres; 4.48 vs. 3.28, +36.6% for KCTV). This improvement can be attributed to PPTAGENT's comprehensive analysis of the structural role of slides.
164
+
165
+ PPTAGENT Exhibits Robust Generation Performance. Our approach empowers LLMs to produce well-rounded presentations with remarkable success rate, achieving $\geq 95\%$ success rate for both Qwen2.5LM+Qwen2-VLVM and GPT-4oLM+GPT-4oVM, which is a significant improvement compared to KCTV (97.8% vs. 88.0%). Moreover, detailed performance of Qwen2.5LM+Qwen2-VLVM across various domains is illustrated in Table 8, underscoring the versatility and robustness of our approach.
166
+
167
+ PPTEVAL Demonstrates Superior Evaluation Capability. Traditional metrics like PPL and ROUGE-L demonstrate inconsistent evaluation trends compared to PPTEVAL. For instance, KCTV achieves a high ROUGE-L (16.76) but a low content score (2.55), while our method shows the opposite trend with ROUGE-L (14.25) and content score (3.28). Moreover, we observe that
168
+
169
+ ROUGE score overemphasizes textual alignment with source documents, potentially compromising the expressiveness of presentations. Most importantly, PPTEVAL advances beyond existing metrics through its dual capability of reference-free design assessment and holistic evaluation of presentation coherence. Further agreement evaluation is shown in Section 5.5.
170
+
171
+ # 5 Analysis
172
+
173
+ # 5.1 Ablation Study
174
+
175
+ We conducted ablation studies across four settings: (1) randomly selecting a slide as the reference (w/o Outline), (2) omitting structural slides during outline generation (w/o Structure), (3) replacing the slide representation with the method proposed by Guo et al. (2023) (w/o CodeRender), and (4) removing guidance from the content schema (w/o Schema). All experiments were conducted using the Qwen2.5LM+Qwen2-VLvM configuration.
176
+
177
+ As demonstrated in Table 4, our experiments reveal two key findings: 1) The HTML-based representation significantly reduces interaction complexity, evidenced by the substantial decrease in success rate from $95.0\%$ to $74.6\%$ when removing the Code Render component. 2) The presentation analysis is crucial for generation quality, as removing the outline and structural slides significantly degrades coherence (from 4.48 to 3.36/3.45) and eliminating the slide schema reduces the success rate from $95.0\%$ to $78.8\%$ .
178
+
179
+ # 5.2 Case Study
180
+
181
+ We present representative examples of presentations generated under different configurations in
182
+
183
+ ![](images/69af9c67b3a63640efe41449b723e1644752e3217be2c80d3eacde0d08ebee66.jpg)
184
+ Figure 4: Score distributions of presentations generated by PPTAGENT, DocPres, and KCTV across the three evaluation dimensions: Content, Design, and Coherence, as assessed by PPTEVAL.
185
+
186
+ ![](images/8ae94cd37a7bc0826a00addc0ef5dbcfaf78256486ce2e3b667f23c8339fe9c6.jpg)
187
+
188
+ ![](images/d16377f80a8fe2978be63318c05273b914f5363a705516ea970d7c15c8ba6c78.jpg)
189
+
190
+ ![](images/75072aba3cc527e5e178b47ad13e4e252d28a6bc91d468cdd5c822fb34666562.jpg)
191
+
192
+ ![](images/35387f8dc13755e26372ce66c893412b79740a291eb455ed374d225a90b7a28b.jpg)
193
+ Figure 5: Comparative analysis of presentation generation across different methods. PPTAGENT generates under different reference presentations, indicated as PP-TAGENT $(a)$ and PPTAGENT $(b)$ .
194
+
195
+ ![](images/507495c119b3c8721e3efa0f36661617225805a5d18e7a726af6dc0ba762416a.jpg)
196
+
197
+ ![](images/a85178f72039118295c7205f376484aca391f6bd27d601727f96b7f517e96691.jpg)
198
+ Figure 6: The number of iterative self-corrections required to generate a single slide under different models.
199
+
200
+ Figure 5. PPTAGENT demonstrates superior presentation quality across multiple dimensions. First, it effectively incorporates visual elements with contextually appropriate image placements, while maintaining concise and well-structured slide content. Second, it exhibits diversity in generating visually engaging slides under diverse references. In contrast, baseline methods (DocPres and KCTV) produce predominantly text-based slides with limited visual variation, constrained by their rule-based or template-based paradigms.
201
+
202
+ # 5.3 Score Distribution
203
+
204
+ We further investigated the score distribution of generated presentations to compare the performance characteristics across methods, as shown in Figure 4. Constrained by their rule-based or template-based paradigms, baseline methods exhibit limited diversity in both content and design dimensions, with scores predominantly concentrated at levels 2 and 3. In contrast, PPTAGENT demonstrates a more dispersed score distribution, with the majority of presentations $(>80\%)$ achieving scores of 3 or higher in these dimensions. Furthermore,
205
+
206
+ ![](images/9e0d7d72335086ba63c12cdd6eb23302ebdce3f4e2cf3c835aa8ab3ae9a17e3e.jpg)
207
+
208
+ due to PPTAGENT's comprehensive consideration of structural slides, it achieves notably superior coherence scores, with over $80\%$ of the presentations receiving scores above 4.
209
+
210
+ # 5.4 Effectiveness of Self-Correction
211
+
212
+ Figure 6 illustrates the number of iterations required to generate a slide using different language models. Although GPT-4o exhibits superior self-correction capabilities compared to Qwen2.5, Qwen2.5 encounters fewer errors in the first generation. Additionally, we observed that Qwen2-VL experiences errors more frequently and has poorer self-correction capabilities, likely due to its multimodal post-training (Wang et al., 2024a). Ultimately, all three models successfully corrected more than half of the errors, demonstrating that our iterative self-correction mechanism effectively ensures the success of the generation process.
213
+
214
+ # 5.5 Agreement Evaluation
215
+
216
+ PPTEVAL with Human Preferences Despite Chen et al. (2024a) have highlighted the impressive human-like discernment of LLMs in various generation tasks. However, it remains crucial to assess the correlation between LLM evaluations and human evaluations in the context of presenta
217
+
218
+ ![](images/2385d3e9b6b316090ccc1e47cdc52318f7ebec50fa6e844c45774099ce786b80.jpg)
219
+ Figure 7: Correlation heatmap between existing automated evaluation metrics along with the content and design dimension in PPTEVAL.
220
+
221
+ tions. This necessity arises from findings by Laskar et al. (2024), which indicate that LLMs may not be adequate evaluators for complex tasks. Table 5 shows the correlation of ratings between humans and LLMs. The average Pearson correlation of 0.71 exceeds the scores of other evaluation methods (Kwan et al., 2024), indicating that PPTEVAL aligns well with human preferences.
222
+
223
+ The relationship between PPTEVAL's content and design dimensions and existing metrics through Pearson correlation analysis, as shown in Figure 7. The Pearson correlation coefficients reveal that current metrics are ineffective for presentation evaluation. Specifically, PPL primarily measures text fluency but performs poorly on slide content due to its inherent fragmented nature, frequently producing outlier measurements. Similarly, while ROUGE-L and FID quantify similarity to reference text and presentations respectively, these metrics inadequately assess content and design quality, as high conformity to references does not guarantee presentation effectiveness. These weak correlations highlight the necessity of PPTEVAL for robust and comprehensive presentation evaluation that considers both content quality and design effectiveness.
224
+
225
+ # 6 Related Works
226
+
227
+ Automated Presentation Generation Recent proposed methods for slide generation can be categorized into rule-based and template-based based on how they handle element placement and styling. Rule-based methods, such as those proposed by Mondal et al. (2024) and Bandyopadhyay et al. (2024), often focus on enhancing textual content but neglect the visual-centric nature of presenta
228
+
229
+ <table><tr><td>Correlation</td><td>Content</td><td>Design</td><td>Coherence</td><td>Avg.</td></tr><tr><td>Pearson</td><td>0.70</td><td>0.90</td><td>0.55</td><td>0.71</td></tr><tr><td>Spearman</td><td>0.73</td><td>0.88</td><td>0.57</td><td>0.74</td></tr></table>
230
+
231
+ Table 5: The correlation scores between human ratings and LLM ratings under different dimensions (Coherence, Content, Design). All presented data of similarity exhibit a p-value below 0.05, indicating a statistically significant level of confidence.
232
+
233
+ tions, leading to outputs that lack engagement. Template-based methods, including Cachola et al. (2024) and industrial solutions like Tongyi, rely on predefined templates to create visually appealing presentations. However, their dependence on extensive manual effort for template annotation significantly limits scalability and flexibility.
234
+
235
+ LLM Agent Numerous studies (Deng et al., 2024; Li et al., 2024; Tang et al., 2025) have explored the potential of LLMs to act as agents assisting humans in a wide array of tasks. For example, Wang et al. (2024b) demonstrate the capability of LLMs to accomplish tasks by generating executable actions. Furthermore, Guo et al. (2023) demonstrated the potential of LLMs in automating presentation-related tasks through API integration.
236
+
237
+ LLM as a Judge LLMs have exhibited strong capabilities in instruction following and context perception, which has led to their widespread adoption as judges (Liu et al., 2023; Zheng et al., 2023). Chen et al. (2024a) demonstrated the feasibility of using MLLMs as judges, while Kwan et al. (2024) proposed a multi-dimensional evaluation framework. Additionally, Ge et al. (2025) investigated the use of LLMs for assessing single-slide quality. However, they did not evaluate presentation quality from a holistic perspective.
238
+
239
+ # 7 Conclusion
240
+
241
+ In this paper, we introduce PPTAGENT, which conceptualizes presentation generation as a two-stage presentation editing task completed through LLMs' abilities to understand and generate code. Moreover, we propose PPTEVAL to provide quantitative metrics for assessing presentation quality. Our experiments across data from multiple domains have demonstrated the superiority of our method. This research provides a new paradigm for generating slides under unsupervised conditions and offers insights for future work in presentation generation.
242
+
243
+ # Limitations
244
+
245
+ While PPTAGENT demonstrates promising capabilities in presentation generation, several limitations remain. First, despite achieving a high success rate $(>95\%)$ on our dataset, the model occasionally fails to generate presentations, which could limit its reliability. Second, although we can provide high-quality preprocessed presentations as references, the quality of generated presentations is still influenced by the input reference presentation, which may lead to suboptimal outputs. Third, although PPTAGENT shows improvements in layout optimization compared to prior approaches, it does not fully utilize visual information to refine the slide design. This manifests in occasional design flaws, such as overlapping elements, which can compromise the readability of generated slides. Future work should focus on enhancing the robustness, reducing reference dependency, and better incorporating visual information into the generation process.
246
+
247
+ # Ethical Considerations
248
+
249
+ In the construction of Zenodo10K, we utilized the publicly available API to scrape data while strictly adhering to the licensing terms associated with each artifact. Specifically, artifacts that were not permitted for modification or commercial use under their respective licenses were filtered out to ensure compliance with intellectual property rights. Additionally, all annotation personnel involved in the project were compensated at rates exceeding the minimum wage in their respective cities, reflecting our commitment to fair labor practices and ethical standards.
250
+
251
+ # References
252
+
253
+ Sambaran Bandyopadhyay, Himanshu Maheshwari, Anandhavelu Natarajan, and Apoorv Saxena. 2024. Enhancing presentation slide generation by llms with a multi-staged end-to-end approach. arXiv preprint arXiv:2406.06556.
254
+ Andrea Barrick, Dana Davis, and Dana Winkler. 2018. Image versus text in powerpoint lectures: Who does it benefit? Journal of Baccalaureate Social Work, 23(1):91-109.
255
+ Isabel Alyssa Cachola, Silviu Cucerzan, Allen Herring, Vuksan Mijovic, Erik Oveson, and Sujay Kumar Jauhar. 2024. Knowledge-centric templatic views of documents. In *Findings of the Association for Computational Linguistics: EMNLP* 2024, pages
256
+
257
+ 15460-15476, Miami, Florida, USA. Association for Computational Linguistics.
258
+ Dongping Chen, Ruoxi Chen, Shilin Zhang, Yinuo Liu, Yaochen Wang, Huichi Zhou, Qihui Zhang, Pan Zhou, Yao Wan, and Lichao Sun. 2024a. Mllmas-a-judge: Assessing multimodal llm-as-a-judge with vision-language benchmark. arXiv preprint arXiv:2402.04788.
259
+ Jianlv Chen, Shitao Xiao, Peitian Zhang, Kun Luo, Defu Lian, and Zheng Liu. 2024b. Bge m3-embedding: Multi-lingual, multi-functionality, multi-granularity text embeddings through self-knowledge distillation. arXiv preprint arXiv:2402.03216.
260
+ Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. 2024. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36.
261
+ Nancy Duarte. 2008. Slide: ology: The art and science of creating great presentations, volume 1. O'Reilly Media Sebastapol.
262
+ Nancy Duarte. 2010. Resonate: Present visual stories that transform audiences. John Wiley & Sons.
263
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
264
+ European Organization For Nuclear Research and OpenAIRE. 2013. Zenodo.
265
+ Weixi Feng, Wanrong Zhu, Tsu-jui Fu, Varun Jampani, Arjun Akula, Xuehai He, Sugato Basu, Xin Eric Wang, and William Yang Wang. 2024. Layoutgpt: Compositional visual planning and generation with large language models. Advances in Neural Information Processing Systems, 36.
266
+ Tsu-Jui Fu, William Yang Wang, Daniel McDuff, and Yale Song. 2022. Doc2ppt: Automatic presentation slides generation from scientific documents. Proceedings of the AAAI Conference on Artificial Intelligence, 36(1):634-642.
267
+ Jiaxin Ge, Zora Zhiruo Wang, Xuhui Zhou, Yi-Hao Peng, Sanjay Subramanian, Qinyue Tan, Maarten Sap, Alane Suhr, Daniel Fried, Graham Neubig, et al. 2025. Autopresent: Designing structured visuals from scratch. arXiv preprint arXiv:2501.00912.
268
+ Michael Robert Gryk. 2022. Human readability of data files. Balisage series on markup technologies, 27.
269
+ Xinyan Guan, Yanjiang Liu, Hongyu Lin, Yaojie Lu, Ben He, Xianpei Han, and Le Sun. 2024. Mitigating large language model hallucinations via autonomous knowledge graph-based retrofitting. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 18126-18134.
270
+
271
+ Yiduo Guo, Zekai Zhang, Yaobo Liang, Dongyan Zhao, and Duan Nan. 2023. Pptc benchmark: Evaluating large language models for powerpoint task completion. arXiv preprint arXiv:2311.01767.
272
+ Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. 2017. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30.
273
+ Ryo Kamoi, Yusen Zhang, Nan Zhang, Jiawei Han, and Rui Zhang. 2024. When can llms actually correct their own mistakes? a critical survey of self-correction of llms. Transactions of the Association for Computational Linguistics, 12:1417-1440.
274
+ Wai-Chung Kwan, Xingshan Zeng, Yuxin Jiang, Yufei Wang, Liangyou Li, Lifeng Shang, Xin Jiang, Qun Liu, and Kam-Fai Wong. 2024. Mt-eval: A multi-turn capabilities evaluation benchmark for large language models. Preprint, arXiv:2401.16745.
275
+ Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pages 611-626.
276
+ Md Tahmid Rahman Laskar, Sawsan Alqahtani, M Sai- ful Bari, Mizanur Rahman, Mohammad Abdul- lah Matin Khan, Haidar Khan, Israt Jahan, Amran Bhuiyan, Chee Wei Tan, Md Rizwan Parvez, Enamul Hoque, Shafiq Joty, and Jimmy Huang. 2024. A systematic survey and critical review on evaluating large language models: Challenges, limitations, and recommendations. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 13785-13816, Miami, Florida, USA. Association for Computational Linguistics.
277
+ Yanda Li, Chi Zhang, Wanqi Yang, Bin Fu, Pei Cheng, Xin Chen, Ling Chen, and Yunchao Wei. 2024. Appagent v2: Advanced agent for flexible mobile interactions. arXiv preprint arXiv:2408.11824.
278
+ Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.
279
+ Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. 2023. G-eval: NLG evaluation using gpt-4 with better human alignment. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2511-2522, Singapore. Association for Computational Linguistics.
280
+ Himanshu Maheshwari, Sambaran Bandyopadhyay, Aparna Garimella, and Anandhavelu Natarajan. 2024. Presentations are not always linear! gnn meets llm for document-to-presentation transformation with attribution. arXiv preprint arXiv:2405.13095.
281
+
282
+ Ishani Mondal, S Shwetha, Anandhavelu Natarajan, Aparna Garimella, Sambaran Bandyopadhyay, and Jordan Boyd-Graber. 2024. Presentations by the humans and for the humans: Harnessing llms for generating persona-aware slides from documents. In Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2664-2684.
283
+ Athar Sefid, Prasenjit Mitra, and Lee Giles. 2021. Slidegen: an abstractive section-based slide generator for scholarly documents. In Proceedings of the 21st ACM Symposium on Document Engineering, pages 1-4.
284
+ Edward Sun, Yufang Hou, Dakuo Wang, Yunfeng Zhang, and Nancy XR Wang. 2021. D2s: Document-to-slide generation via query-based text summarization. arXiv preprint arXiv:2105.03664.
285
+ Hao Tang, Darren Key, and Kevin Ellis. 2025. Worldcoder, a model-based lIm agent: Building world models by writing code and interacting with the environment. Advances in Neural Information Processing Systems, 37:70148-70212.
286
+ VikParuchuri. 2023. marker.
287
+ Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. 2024a. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191.
288
+ Xingyao Wang, Yangyi Chen, Lifan Yuan, Yizhe Zhang, Yunzhu Li, Hao Peng, and Heng Ji. 2024b. Executable code actions elicit better llm agents. arXiv preprint arXiv:2402.01030.
289
+ Bichen Wu, Chenfeng Xu, Xiaoliang Dai, Alvin Wan, Peizhao Zhang, Zhicheng Yan, Masayoshi Tomizuka, Joseph Gonzalez, Kurt Keutzer, and Peter Vajda. 2020. Visual transformers: Token-based image representation and processing for computer vision. Preprint, arXiv:2006.03677.
290
+ Tong Wu, Guandao Yang, Zhibing Li, Kai Zhang, Ziwei Liu, Leonidas Guibas, Dahua Lin, and Gordon Wetzstein. 2024. Gpt-4v (isdiction) is a human-aligned evaluator for text-to-3d generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22227-22238.
291
+ An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115.
292
+ Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. 2023. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623.
293
+
294
+ # A Data Preprocessing
295
+
296
+ To maintain a reasonable cost, we selected presentations ranging from 12 to 64 pages and documents with text lengths from 2,048 to 20,480 characters. We extracted both textual and visual content from the source documents using VikParuchuri (2023). The extracted text was then organized into sections. For visual content, we generated image captions to assist in relevant image selection through textual descriptions. To minimize redundancy, we identified and removed duplicate images if their image embeddings had a cosine similarity score exceeding 0.85. For slide-level dedduplication, we removed individual slides if their text embeddings had a cosine similarity score above 0.8 compared to the preceding slide, as suggested by Fu et al. (2022).
297
+
298
+ # B Details of PPTEVAL
299
+
300
+ We recruited four graduate students through a Shanghai-based crowdsourcing platform to evaluate a total of 250 presentations: 50 randomly selected from Zenodo10K representing real-world presentations, along with two sets of 100 presentations generated by the baseline method and our approach respectively. Following the evaluation framework proposed by PPTEVAL, assessments were conducted across three dimensions using the scoring criteria detailed in Appendix F. Evaluators were provided with converted slide images, scored them individually, and then discussed the results to reach a consensus on the final scores.
301
+
302
+ Moreover, We measured inter-rater agreement using Fleiss' Kappa, with an average score of 0.59 across three dimensions (0.61, 0.61, 0.54 for Content, Design, and Coherence, respectively) indicating satisfactory agreement (Kwan et al., 2024) among evaluators. Representative scoring examples are shown in Figure 8.
303
+
304
+ We provided detailed illustration as below:
305
+
306
+ Content: The content dimension evaluates the information presented on the slides, focusing on both text and images. We assess content quality from three perspectives: the amount of information, the clarity and quality of textual content, and the support provided by visual content. High-quality textual content is characterized by clear, impactful text that conveys the proper amount of information. Additionally, images should complement and reinforce the textual content, making the information
307
+
308
+ more accessible and engaging. To evaluate content quality, we employ MLLMs on slide images, as slides cannot be easily comprehended in a plain text format.
309
+
310
+ Design: Good design not only captures attention but also enhances content delivery. We evaluate the design dimension based on three aspects: color schemes, visual elements, and overall design. Specifically, the color scheme of the slides should have clear contrast to highlight the content while maintaining harmony. The use of visual elements, such as geometric shapes, can make the slide design more expressive. Finally, good design should adhere to basic design principles, such as avoiding overlapping elements and ensuring that design does not interfere with content delivery.
311
+
312
+ Coherence: Coherence is essential for maintaining audience engagement in a presentation. We evaluate coherence based on the logical structure and the contextual information provided. Effective coherence is achieved when the model constructs a captivating storyline, enriched with contextual information that enables the audience to follow the content seamlessly. We assess coherence by analyzing the logical structure and contextual information extracted from the presentation.
313
+
314
+ # C Detailed Performance of PPTAGENT
315
+
316
+ We present a detailed performance analysis of Qwen2.5LM+Qwen2-VLVM across various domains in Table 8. Additionally, Table 7 and 6 show the success rate-weighted performance, where failed generations receive a PPTEVAL score of 0, demonstrating that a lower success rate significantly impacts the overall effectiveness of the method.
317
+
318
+ As demonstrated in Table 6. GPT-4o consistently demonstrates outstanding performance across various evaluation metrics, highlighting its advanced capabilities. While Qwen2-VL exhibits limitations in linguistic proficiency due to the trade-offs from multimodal post-training, GPT-4o maintains a clear advantage in handling language tasks. However, the introduction of Qwen2.5 successfully mitigates these linguistic deficiencies, bringing its performance on par with GPT-4o, and achieving the best performance. This underscores the significant potential of open-source LLMs as competitive and highly capable presentation agents.
319
+
320
+ # D Slide Clustering
321
+
322
+ We present our hierarchical clustering algorithm for layout analysis in Algorithm 1, where slides are grouped into clusters using a similarity threshold $\theta$ of 0.65. To focus exclusively on layout patterns and minimize interference from specific content, we preprocess the slides by replacing text content with a placeholder character ("a") and substituting image elements with solid-color backgrounds. Then, we compute the similarity matrix using cosine similarity based on the ViT embeddings of converted slide images between each slide pair. Figure 9 illustrates representative examples from the resulting slide clusters.
323
+
324
+ # E Code Interaction
325
+
326
+ For visual reference, Figure 10 illustrates a slide rendered in HTML format, while Figure 11 displays its excerpt (first 60 lines) of the XML representation (out of 1,006 lines).
327
+
328
+ # F Prompts
329
+
330
+ # F.1 Prompts for Presentation Analysis
331
+
332
+ The prompts used for presentation analysis are illustrated in Figures 12, 13, and 14.
333
+
334
+ # F.2 Prompts for Presentation Generation
335
+
336
+ The prompts used for generating presentations are shown in Figures 15, 16, and 17.
337
+
338
+ # F.3 Prompts for PPTEVAL
339
+
340
+ The prompts used in PPTEVAL are shown in Figure 18, 19, 20, 21, 22 and 23.
341
+
342
+ # Algorithm 1 Slides Clustering Algorithm
343
+
344
+ 1: Input: Similarity matrix of slides $S \in \mathbb{R}^{N \times N}$ , similarity threshold $\theta$
345
+ 2: Initialize: $C \gets \emptyset$
346
+ 3: while $\max(S) \geq \theta$ do
347
+ 4: $(i,j)\gets \arg \max (S)$ Find the most similar slide pair
348
+ 5: if $\exists c_{k} \in C$ such that $(i \in c_{k} \lor j \in c_{k})$ then
349
+ 6: $c_k \gets c_k \cup \{i, j\} \triangleright$ Merge into existing cluster
350
+ 7: else
351
+ 8: $c_{\mathrm{new}} \gets \{i, j\} \quad \triangleright$ Create new cluster
352
+ 9: $C \gets C \cup \{c_{\mathrm{new}}\}$
353
+ 10: end if
354
+ 11: Update $S$
355
+ 12: $S[:,i]\gets 0,S[i,:]\gets 0$
356
+ 13: $S(:,j]\gets 0,S[j,:]\gets 0$
357
+ 14: end while
358
+ 15: Return: $C$
359
+
360
+ # Content
361
+
362
+ ![](images/5cb77ca6b1daa7e06cb5b666ca87469c1760fe35250841cc7ef349ea1c8d8368.jpg)
363
+ Score:1
364
+
365
+ Judgement:Lack of content
366
+
367
+ 5. Opening, publishing and archiving
368
+
369
+ - Identify data to be made openly available
370
+ - Specify where and when data will be published
371
+ - Publish data in formats that can be accessed
372
+ - Publish metadata if data can't be opened
373
+ - Use repositories with persistent identifiers like DOI
374
+ - Categorize data for long-term persistence
375
+ - Using repositories ensuring data curation
376
+
377
+ ![](images/94d026db864ede6d5fecddb18c755dc36d55faf779624555f9c480694b5c85c5.jpg)
378
+ Score:5
379
+
380
+ Judgement: The content is somewhat tedious and lacks the support of images
381
+
382
+ Judgement: The content is impactful with relevant images supports well
383
+
384
+ # Design
385
+
386
+ ![](images/9fee09273c050e264307458db0c6f1913a8b3c793076c096e8f4d2f086e8ff0c.jpg)
387
+ Score:2
388
+
389
+ Judgement: Monochromatic colors without visual elements
390
+
391
+ ![](images/f78a04cba260289529c28020f1daaf5e73cac15219197c92da12de2585f4458f.jpg)
392
+ Score:4
393
+
394
+ Judgement: Harmonious color with the use of geometric shapes; However some minor flaws diminished the overall design
395
+
396
+ ![](images/aef09e4cc698f1ff8774ffd6bb2aba8f20346ed72d64e3d63e7d70576e509f6f.jpg)
397
+ Score:5
398
+
399
+ Judgement: Slide presents engaging design with consistent overall design
400
+
401
+ ![](images/f08816d2db0e44cdd3daaaaf7269782f738512906f79e2dbde713bc7317d1d0c.jpg)
402
+ Figure 8: Scoring Examples of PPTEVAL.
403
+ Opening
404
+
405
+ # Structural Slides
406
+
407
+ ![](images/1b6d51b002cdfd5d85e0cfb70cce79389b71a73f5118eb5ba604d1e018790a31.jpg)
408
+ Table of Contents
409
+
410
+ ![](images/17c80a0431624424dd0440e02e7ef522e2b436f682107484cd6a3232bc925412.jpg)
411
+ Ending
412
+
413
+ # Content Slides
414
+
415
+ ![](images/6624191fc6ffae2207cdedaaebeb45c42747da677f290258238ba4500c4f0991.jpg)
416
+ Picture and illustrative key points
417
+ Figure 9: Example of slide clusters.
418
+
419
+ ![](images/0ecf621cd4a1245ab32dfb6207458ce7ea20b64500ca647839b92269376857ec.jpg)
420
+ Text Sections with highlighted Keywords
421
+
422
+ ![](images/eb915d7391e3a4bb1d482ecc5055d3ba1c23aabc0756febf65f4ffc6b85716c5.jpg)
423
+ Image Focus with
424
+ Subtextual Description
425
+
426
+ ![](images/1c7eed4dec468fa1a8bd25cb00b215f56a9fb8d083170fa7b55e0e2c60169d38.jpg)
427
+ Figure 10: Example of rendering a slide into HTML format.
428
+
429
+ ![](images/4308038d7b13e29c57b9de2ff497fde3d9512a70d85a936b27c3c74669b95b1e.jpg)
430
+ Figure 11: The first 60 lines of the XML representation of a presentation slide (out of 1,006 lines).
431
+
432
+ <table><tr><td colspan="2">Configuration</td><td colspan="4">Existing Metrics</td><td colspan="4">PPTEval</td></tr><tr><td>Language Model</td><td>Vision Model</td><td>SR(%)↑</td><td>PPL↓</td><td>ROUGE-L ↑</td><td>FID↓</td><td>Content↑</td><td>Design↑</td><td>Coherence↑</td><td>Avg.↑</td></tr><tr><td colspan="10">DocPres (rule-based)</td></tr><tr><td>GPT-4oLM</td><td>-</td><td>-</td><td>76.42</td><td>13.28</td><td>-</td><td>2.98</td><td>2.33</td><td>3.24</td><td>2.85</td></tr><tr><td>Qwen2.5LM</td><td>-</td><td>-</td><td>100.4</td><td>13.09</td><td>-</td><td>2.96</td><td>2.37</td><td>3.28</td><td>2.87</td></tr><tr><td colspan="10">KCTV (template-based)</td></tr><tr><td>GPT-4oLM</td><td>-</td><td>80.0</td><td>68.48</td><td>10.27</td><td>-</td><td>1.99</td><td>2.35</td><td>2.85</td><td>2.40</td></tr><tr><td>Qwen2.5LM</td><td>-</td><td>88.0</td><td>41.41</td><td>16.76</td><td>-</td><td>2.24</td><td>2.59</td><td>2.95</td><td>2.59</td></tr><tr><td colspan="10">PPTAGENT (ours)</td></tr><tr><td>GPT-4oLM</td><td>GPT-4oVM</td><td>97.8</td><td>721.54</td><td>10.17</td><td>7.48</td><td>3.17</td><td>3.16</td><td>4.20</td><td>3.54</td></tr><tr><td>Qwen2-VLLM</td><td>Qwen2-VLVM</td><td>43.0</td><td>265.08</td><td>13.03</td><td>7.32</td><td>1.34</td><td>1.43</td><td>1.75</td><td>1.50</td></tr><tr><td>Qwen2.5LM</td><td>Qwen2-VLVM</td><td>95.0</td><td>496.62</td><td>14.25</td><td>6.20</td><td>3.11</td><td>3.10</td><td>4.25</td><td>3.48</td></tr></table>
433
+
434
+ # System Message:
435
+
436
+ You are an expert presentation analyst specializing in categorizing PowerPoint slides, particularly skilled at identifying structural slides (such as Opening, Transitions, and Ending slides) that guide the flow of the presentation. Please follow the specified output format strictly when categorizing the slides.
437
+
438
+ # Prompt:
439
+
440
+ Objective: Analyze a set of slides provided in plain text format. Your task is to identify structural slides (such as Opening and Ending) based on their content and categorize all other slides under "Content."
441
+
442
+ Instructions
443
+
444
+ 1. Categorize structural slides in the presentation (such as Opening, Ending); assign all other slides to "Content."
445
+ 2. Category names for structural slides should be simple, reflect their function, and contain no specific entity names.
446
+ 3. Opening and Ending slides are typically located at the beginning or end of the presentation and may consist of only one slide.
447
+ 4. Other transition categories must contain multiple slides with partially identical text.
448
+
449
+ Output format requirements:
450
+
451
+ Use the Functional key to group all categorized structural slides, with category names that reflect only the slide's function (e.g., "Opening", "Ending") and do not describe any specific content.
452
+
453
+ Use the Content key to list all slides that do not fall into structural categories.
454
+
455
+ Example output:
456
+
457
+ ```java
458
+ import java.lang.RuntimeException.
459
+
460
+ 1
461
+
462
+ functional": [1]
463
+ "opening:[1], "table of contents":[2,5]
464
+ "section header": [3, 6]
465
+ "ending": [10]
466
+
467
+ }; "content": [4,7,8,9]
468
+
469
+ 1
470
+
471
+ Ensure that all slides are included in the categorization, with their corresponding slide numbers listed in the output.
472
+
473
+ Input: {{slides}}
474
+
475
+ Output:
476
+
477
+ Table 6: Weighted Performance comparison of presentation generation methods, including DocPres, KCTV, and our proposed PPTAGENT. Results are evaluated using Success Rate (SR), Perplexity (PPL), Rouge-L, Fr'echet Inception Distance (FID), and SR-weighted PPTEval.
478
+
479
+ <table><tr><td>Setting</td><td>SR(%)</td><td>Content</td><td>Design</td><td>Coherence</td><td>Avg.</td></tr><tr><td>PPTAGENT</td><td>95.0</td><td>3.11</td><td>3.10</td><td>4.25</td><td>3.48</td></tr><tr><td>w/o Outline</td><td>91.0</td><td>2.94</td><td>3.00</td><td>3.05</td><td>3.00</td></tr><tr><td>w/o Schema</td><td>78.8</td><td>2.42</td><td>2.54</td><td>3.18</td><td>2.71</td></tr><tr><td>w/o Structure</td><td>92.2</td><td>3.02</td><td>2.99</td><td>3.18</td><td>3.06</td></tr><tr><td>w/o CodeRender</td><td>74.6</td><td>2.43</td><td>2.49</td><td>3.26</td><td>2.73</td></tr></table>
480
+
481
+ Figure 12: Illustration of the prompt used for clustering structural slides.
482
+
483
+ # System Message:
484
+
485
+ You are a helpful assistant
486
+
487
+ # Prompt:
488
+
489
+ Analyze the content layout and media types in the provided slide images.
490
+ Your objective is to create a concise, descriptive title that captures purely the presentation pattern and structural arrangement of content elements.
491
+ Requirements:
492
+ Focus on HOW content is structured and presented, not WHAT the content is
493
+ Describe the visual arrangement and interaction between different content types (text, images, diagrams,
494
+
495
+ Avoid:
496
+
497
+ Any reference to specific topics or subjects
498
+ Business or industry-specific terms
499
+ Actual content descriptions
500
+
501
+ You cannot use the following layout names:
502
+ {{existed layoutsnames}}
503
+ Example Outputs:
504
+ Hierarchical Bullet Points with Central Image
505
+ Presentation of Evolution Through a Timeline
506
+ Analysis Displayed Using a Structured Table
507
+ Growth Overview Illustrated with Multiple Charts
508
+ Picture and illustrative key points
509
+ Layout
510
+ Output: Provide a one-line layout pattern title.
511
+
512
+ Table 7: Ablation analysis of PPTAGENT utilizing the Qwen2.5LM+Qwen2-VLVM configuration, with PPTEval scores weighted by success rate to demonstrate each component's contribution.
513
+
514
+ <table><tr><td>Domain</td><td>SR (%)</td><td>PPL</td><td>FID</td><td>PPTEval</td></tr><tr><td>Culture</td><td>93.0</td><td>185.3</td><td>5.00</td><td>3.70</td></tr><tr><td>Education</td><td>94.0</td><td>249.0</td><td>7.90</td><td>3.69</td></tr><tr><td>Science</td><td>96.0</td><td>500.6</td><td>6.07</td><td>3.56</td></tr><tr><td>Society</td><td>95.0</td><td>396.8</td><td>5.32</td><td>3.59</td></tr><tr><td>Tech</td><td>97.0</td><td>238.7</td><td>6.72</td><td>3.74</td></tr></table>
515
+
516
+ Table 8: Evaluation results under the configuration of Qwen2- $\mathrm{VL}_{\mathsf{LM}} + \mathrm{Qwen2 - VL}_{\mathsf{VM}}$ in different domains, using the success rate (SR), PPL, FID and the average PPTEval score across three evaluation dimensions.
517
+
518
+ Figure 13: Illustration of the prompt used to infer layout patterns.
519
+
520
+ # System Message:
521
+
522
+ # You are a helpful assistant.
523
+
524
+ # Prompt:
525
+
526
+ Please analyze the slide elements and create a structured template schema in JSON format. The schema
527
+
528
+ should:
529
+
530
+ 1. Identify key content elements (both text and images) that make up the slide
531
+
532
+ 2. For each element, specify:
533
+
534
+ - "description": A clear description of the element's purpose, do not mention any detail
535
+ - "type": "text" or "image" determined that according the tag of element: "image" is assigned for <img>
536
+
537
+ tags
538
+
539
+ * For text elements: The actual text content as string or array in paragraph level(<p> or <li>), merge inline text segments(<span>)
540
+
541
+ * For image elements: Use the 'alt' attribute of the <img> tag as the data of the image
542
+
543
+ Example format:
544
+
545
+ 1
546
+
547
+ "element_name": {
548
+ "description": "purpose of this element", # do not mention any detail, just purpose
549
+ "type": "text" or "image",
550
+ "data": "actual text" or "<%0-character description>"# detail here, cannot be empty or null or "[%01","%02"], # Multiple text elements
551
+ [["log]: ], [["log": ], # #Multiple image elements.
552
+
553
+ 1
554
+
555
+ 1
556
+
557
+ Input
558
+
559
+ Input: { \{ \}
560
+
561
+ {Slide)} Please provide a schema that could be used as a template for creating similar slides.
562
+
563
+ Figure 14: Illustration of the prompt used to extract the slide schema.
564
+
565
+ # System Message:
566
+
567
+ You are a professional presentation designer tasked with creating structured PowerPoint outlines. Each slide outline should include a slide title, a suitable layout from provided options, and concise explanatory notes. Your objective is to ensure that the outline adheres to the specified slide count and uses only the provided layouts. The final deliverable should be formatted as a JSON object. Please ensure that no layouts other than those provided are utilized in the outline.
568
+
569
+ # Prompt:
570
+
571
+ Steps:
572
+
573
+ 1. Understand the JSON Content:
574
+
575
+ Carefully analyze the provided JSON input.
576
+
577
+ Identify key sections and subsections
578
+
579
+ {{json_content}}
580
+
581
+ 2. Generate the Outline:
582
+
583
+ Ensure that the number of slides matches the specified requirement.
584
+ Keep the flow between slides logical and ensure that the sequence of slides enhances understanding.
585
+ Ensure that the number of slides is appropriate for the task, given the task's complexity.
586
+
587
+ Carefully analyze the content and media types specified in the provided layouts.
588
+
589
+ For each slide, provide:
590
+
591
+ A Slide Title that clearly represents the content.
592
+
593
+ A Layout selected from provided layouts tailored to the slide's function. Click Preview, which will automatically select a slide's
594
+
595
+ Slide Description, which should contain concise and clear descriptions of the key points.
596
+
597
+ Please provide your output in JSON format.
598
+
599
+ Example Output:
600
+
601
+ "Opening of the XX": A
602
+
603
+ "layout": "layout1(media_type)".
604
+
605
+ "subsection_keys": [],
606
+
607
+ "descriiption
608
+
609
+ "Introduction to the XX": {
610
+
611
+ "layout": "layout2(media type)", # select from given layouts(functional or content)
612
+
613
+ "subsection keys": ["Title of Subsection 1.1", "Title of Subsection 1.2"],
614
+
615
+ "description": "...
616
+
617
+ 1
618
+
619
+ Input:
620
+
621
+ Number of Slides: { num slides }
622
+
623
+ Image Information
624
+
625
+ {{image_information}}
626
+
627
+ you can only use the following layouts
628
+
629
+ Content Layouts:
630
+
631
+ {{layout}}
632
+
633
+ Functional Layouts:
634
+
635
+ {{functional_keys}}
636
+
637
+ Output:
638
+
639
+ Figure 15: Illustration of the prompt used for generating the outline.
640
+
641
+ # System Message:
642
+
643
+ You are an Editor agent for presentation content. You transform reference text and available images into structured slide content following schemas. You excel at following schema rules like content length and ensuring all content is strictly derived from provided reference materials. You never generate new content or use images not explicitly provided.
644
+
645
+ # Prompt:
646
+
647
+ Generate slide content based on the provided schema.
648
+
649
+ Each schema element specifies its purpose, and its default quantity.
650
+
651
+ Requirements:
652
+
653
+ 1. Content Generation Rules:
654
+
655
+ - Follow default quantity for elements, adjust when necessary
656
+ - All generated content must be based on reference text or image information
657
+ - Ensure text content meets character limits
658
+ - Generated text should use concise and impactful presentation style
659
+ - For image elements, data should be the image path # eg: "images/logo.png"
660
+ - Type of images should be a critical factor of image selection, if no relevant image(similar type or purpose) provided, leave it blank
661
+
662
+ 2. Core Elements
663
+
664
+ - Must extract essential content from reference text (e.g., slide title, main content) and maintain
665
+
666
+ emantic consistency
667
+
668
+ - Must include images that support the main content (e.g., diagrams for explanations, visuals directly discussed in text)
669
+
670
+ 3. Supporting Elements (e.g., presenters, logo images):
671
+
672
+ - Generate only when relevant content exists in reference text or image information
673
+
674
+ Generate content for each element and output in the following format:
675
+
676
+ "element!";
677
+
678
+ data": ["text1", "text2"] for text elements
679
+
680
+ or ["/path/to/image", "..."] for image elements
681
+
682
+ 1
683
+
684
+ 1
685
+
686
+ Input:
687
+
688
+ Schema:
689
+
690
+ {{schema}}
691
+
692
+ Outline of Presentation:
693
+
694
+ {{outline}}
695
+
696
+ Metadata of Presentation:
697
+
698
+ {{metadata}}
699
+
700
+ Reference Text:
701
+
702
+ {{text}}
703
+
704
+ Available Images:
705
+
706
+ {{imagesinfo}}
707
+
708
+ Output: the keys in generated content should be the same as the keys in schema
709
+
710
+ Figure 16: Illustration of the prompt used for generating slide content.
711
+
712
+ # System Message:
713
+
714
+ You are a Code Generator agent specializing in slide content manipulation. You precisely translate content edit commands into API calls by following HTML structure, distinguishing between tags, and maintaining proper parent-child relationships to ensure accurate element targeting.
715
+
716
+ # Prompt:
717
+
718
+ Generate the sequence of API calls based on the provided commands, ensuring compliance with the specified rules and precise execution.
719
+
720
+ You must determine the parent-child relationships of elements based on indentation and ensure that all $\langle \text{span} \rangle$ and $\langle \text{img} \rangle$ elements are processed, leaving no unhandled content.
721
+
722
+ Each command follows this format: (element_class, type, quantity_change: int, old_data, new_data).
723
+
724
+ Steps
725
+
726
+ 1. Quantity Adjustment:
727
+
728
+ -quantity change Rules:
729
+
730
+ - If quantity_change = 0, do not perform cloneParagraph or del spans operations. Only replace the content.
731
+
732
+ - If quantity change $> 0$ ,use clone paragraph to add the corresponding number of paragraphs.
733
+
734
+ - When cloning, prioritize paragraphs from the same element_class that already have special styles (e.g., bold, color) if available.
735
+
736
+ - The paragraph_id for newly cloned paragraphs should be the current maximum paragraph_id of the parent element plus 1, while retaining the span_id within the cloned paragraph unchanged.
737
+
738
+ - If quantity_change $< 0$ , use del spans or del_image to reduce the corresponding number of elements. Always ensure to remove span elements from the end of the paragraph first.
739
+
740
+ Restriction:
741
+
742
+ - Each command's API call can only use either cloneparagraph or delspan/del_image according to the 'quantity_change', but not both.
743
+
744
+ 2. Content Repla
745
+
746
+ - Text Content: Use replaceSpan to sequentially distribute new content into one or more <span> elements within a paragraph. Select appropriate tags for emphasized content (e.g., bold, special color, larger font).
747
+
748
+ - Image Content: Use replace_image to replace image resources.
749
+ 3. Output Format:
750
+ - Add comments to each API call group, explaining the intent of the original command and the associated element, class.
751
+ -For cloning operation
752
+
753
+ Available APIs
754
+
755
+ {api docs}
756
+
757
+ Example Input
758
+
759
+ Please output only the API call sequence, one call per line, wrapped in ``` python and ````, with comments for corresponding commands.
760
+
761
+ Figure 17: Illustration of the prompt used for generating editing actions.
762
+
763
+ # System Message:
764
+
765
+ You are a help assistant
766
+
767
+ # Prompt:
768
+
769
+ Please describe the input slide based on the following three dimensions:
770
+
771
+ 1. The amount of information conveyed
772
+
773
+ Whether the slide conveys too lengthy or too little information, resulting in a large white space without colors or images.
774
+
775
+ 2. Content Clarity and Language Quality
776
+
777
+ Check if there are any grammatical errors or unclear expressions of textual content.
778
+
779
+ 3. Images and Relevance
780
+
781
+ Assess the use of visual aids such as images or icons, their presence, and how well they relate to the theme and content of the slides.
782
+
783
+ Provide an objective and concise description without comments, focusing exclusively on the dimensions outlined above.
784
+
785
+ # Figure 18: Illustration of the prompt used to describe content in PPTEval.
786
+
787
+ # System Message:
788
+
789
+ You are a help assistant
790
+
791
+ # Prompt:
792
+
793
+ Please describe the input slide based on the following three dimensions:
794
+
795
+ 1. Visual Consistency
796
+
797
+ Describe whether any style diminished the readability, like border overflow or blur, low contrast, or visual noise.
798
+
799
+ 2. Color Scheme
800
+
801
+ Analyze the use of colors in the slide, identifying the colors used and determining whether the design is monochromatic (black and white) or colorful (gray counts in).
802
+
803
+ 3. Use of Visual Elements
804
+
805
+ Describe whether the slide include supporting visual elements, such as icons, backgrounds, images, or geometric shapes (rectangles, circles, etc.).
806
+
807
+ Provide an objective and concise description without comments, focusing exclusively on the dimensions outlined above.
808
+
809
+ # Figure 19: Illustration of the prompt used to describe style in PPTEval.
810
+
811
+ # System Message:
812
+
813
+ You are an expert presentation content extractor responsible for analyzing and summarizing key elements and metadata of presentations. Your task is to extract and provide the following information:
814
+
815
+ # Prompt:
816
+
817
+ Scoring Criteria (Five-point scale):
818
+
819
+ 1. Slide Descriptions: Provide a concise summary of the content and key points covered on each slide.
820
+
821
+ 2. Presentation Metadata: Identify explicit background information(which means it should be a single item) and the details of the item(s), such as the author, speaker, date, and other directly stated details, from the opening and closing slides.
822
+
823
+ Example Output:
824
+
825
+ ```javascript
826
+ "slide_1": "This slide introduces the xx, xx.", "slide_2": "", "background": { "speaker": "speaker x", "date": "date x"
827
+ ```
828
+
829
+ 3
830
+
831
+ Input:
832
+
833
+ {{presentation}}
834
+
835
+ Output:
836
+
837
+ # Figure 20: Illustration of the prompt used to extract content in PPTEval.
838
+
839
+ # System Message:
840
+
841
+ You are an unbiased presentation analysis judge responsible for evaluating the quality of slide content. Please carefully review the provided slide image, assessing its content, and provide your judgement in a JSON object containing the reason and score. Each score level requires that all evaluation criteria meet the standards of that level.
842
+
843
+ # Prompt:
844
+
845
+ Scoring Criteria (Five-Point Scale):
846
+
847
+ 1 Point (Poor)
848
+
849
+ The text on the slides contains significant grammatical errors or is poorly structured, making it difficult to understand.
850
+
851
+ 2 Points (Below Average):
852
+
853
+ The slides lack a clear focus, the text is awkwardly phrased, and the overall organization is weak, making it hard to engage the audience.
854
+
855
+ 3 Points (Average):
856
+
857
+ The slide content is clear and complete but lacks visual aids, resulting in insufficient overall appeal.
858
+
859
+ 4 Points (Good)
860
+
861
+ The slide content is clear and well-developed, but the images have weak relevance to the theme, limiting the effectiveness of the presentation.
862
+
863
+ 5 Points (Excellent):
864
+
865
+ The slides are well-developed with a clear focus, and the images and text effectively complement each other to convey the information successfully.
866
+
867
+ Example Output:
868
+
869
+ ```html
870
+ "reason"; "xx" "score": int
871
+ ```
872
+
873
+ Input: {descr}
874
+
875
+ Let's think step by step and provide your judgment.
876
+
877
+ # System Message:
878
+
879
+ You are an unbiased presentation analysis judge responsible for evaluating the visual appeal of slides. Please carefully review the provided description of the slide, assessing their aesthetics only, and provide your judgment in a JSON object containing the reason and score. Each score level requires that all evaluation criteria meet the standards of that level.
880
+
881
+ # Prompt:
882
+
883
+ Scoring Criteria (Five-point scale):
884
+
885
+ 1. Point (Poor):
886
+
887
+ There is a conflict between slide styles, making the content difficult to read.
888
+
889
+ 2 Points (Fair)
890
+
891
+ The slide uses monotonous colors(black and white),ensuring readability while lacking visual appeal.
892
+
893
+ 3 Points (Average):
894
+
895
+ The slide employs a basic color scheme; however, it lacks supplementary visual elements such as icons, backgrounds, images, or geometric shapes (like rectangles), making it look plain.
896
+
897
+ 4 Points (Good):
898
+
899
+ The slide uses a harmonious color scheme and contains some visual elements(like icons, backgrounds, images, or geometric shapes); however, minor flaws may exist in the overall design.
900
+
901
+ 5 Points (Excellent):
902
+
903
+ The style of the slide is harmonious and engaging, the use of supplementary visual elements like images and geometric shapes enhances the slide's overall visual appeal.
904
+
905
+ Example Output:
906
+
907
+ "reason": "xx",
908
+
909
+ "score": int
910
+
911
+ Input: {descr}
912
+
913
+ Let's think step by step and provide your judgment.
914
+
915
+ # Figure 22: Illustration of the prompt used to evaluate style in PPTEval.
916
+
917
+ # System Message:
918
+
919
+ You are an unbiased presentation analysis judge responsible for evaluating the coherence of the presentation. Please carefully review the provided summary of the presentation, assessing its logical flow and contextual information, each score level requires that all evaluation criteria meet the standards of that level.
920
+
921
+ # Prompt:
922
+
923
+ Scoring Criteria (Five-Point Scale)
924
+
925
+ 1 Point (Poor)
926
+
927
+ Terminology are inconsistent, or the logical structure is unclear, making it difficult for the audience to understand.
928
+
929
+ 2 Points (Fair)
930
+
931
+ Terminology are consistent and the logical structure is generally reasonable, with minor issues in transitions.
932
+
933
+ 3 Points (Average):
934
+
935
+ The logical structure is sound with fluent transitions; however, it lacks basic background information.
936
+
937
+ 4 Points (Good):
938
+
939
+ The logical flow is reasonable and include basic background information (e.g., speaker or acknowledgments/conclusion).
940
+
941
+ 5 Points (Excellent)
942
+
943
+ The narrative structure is engaging and meticulously organized with detailed and comprehensive background information included.
944
+
945
+ Example Output
946
+
947
+ 1
948
+
949
+ "reason": "xx",
950
+
951
+ "score": int
952
+
953
+ Input:
954
+
955
+ {presentation}
956
+
957
+ Let's think step by step and provide your judgment, focusing exclusively on the dimensions outlined above and strictly follow the criteria.
958
+
959
+ # Figure 23: Illustration of the prompt used to evaluate coherence in PPTEval.
960
+
961
+ # Figure 21: Illustration of the prompt used to evaluate content in PPTEval.
2501.03xxx/2501.03936/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1ad7b2fecb40768fa31a985c4dc6d76afc45dbb1ad442a17b4def7a4bfe4dae
3
+ size 750095
2501.03xxx/2501.03936/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03939/10878171-fb3a-4a9c-9c00-9e4356a62c21_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03939/10878171-fb3a-4a9c-9c00-9e4356a62c21_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03939/10878171-fb3a-4a9c-9c00-9e4356a62c21_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a27032539a39a5cbfbd120cd345953dca9927e624764e273cc86c45ae25b30de
3
+ size 1051797
2501.03xxx/2501.03939/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2501.03xxx/2501.03939/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ba329222e160dbe2b23035a12ed0826cec777aa1f1820a161cf4c1ebf82bc0a
3
+ size 1238964
2501.03xxx/2501.03939/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04001/561b2e22-1967-4178-81b9-be05fc459b25_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04001/561b2e22-1967-4178-81b9-be05fc459b25_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04001/561b2e22-1967-4178-81b9-be05fc459b25_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:498b8880ba4cdf530722041fb641100a1b918573b5fee76513fa2bfe45bbcc41
3
+ size 14867991
2501.04xxx/2501.04001/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04001/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8010588861fc939febc2b4c68f147efc770a8c2005cfb565359fa4779657fef
3
+ size 1499402
2501.04xxx/2501.04001/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04003/fe7989b2-6d25-4d52-94ba-58857e0d7417_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04003/fe7989b2-6d25-4d52-94ba-58857e0d7417_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04003/fe7989b2-6d25-4d52-94ba-58857e0d7417_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2a8c66ed3defb8dd20cf781a553f6d30198fdaf8194c22c7045056529b9930
3
+ size 6315785
2501.04xxx/2501.04003/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04003/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8365afb72eed1a9a520ee60d2dec2145eec1ad2af9731390bde3219603d569
3
+ size 4145493
2501.04xxx/2501.04003/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04164/62206504-e3ec-477f-bcdd-019e9d8a4ba1_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04164/62206504-e3ec-477f-bcdd-019e9d8a4ba1_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04164/62206504-e3ec-477f-bcdd-019e9d8a4ba1_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88de24733e60fd66b0a585e20f07786678cfe9e2c0e048fc026f3e4562bd7b02
3
+ size 1589980
2501.04xxx/2501.04164/full.md ADDED
@@ -0,0 +1,679 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Holographic Metasurface-Based Beamforming for Multi-Altitude LEO Satellite Networks
2
+
3
+ Qingchao Li, Member, IEEE, Mohammed El-Hajjar, Senior Member, IEEE, Kaijun Cao, Chao Xu, Senior Member, IEEE, Harald Haas, Fellow, IEEE, and Lajos Hanzo, Life Fellow, IEEE
4
+
5
+ Abstract—Low Earth Orbit (LEO) satellite networks are capable of improving the global Internet service coverage. In this context, we propose a hybrid beamforming design for holographic metasurface based terrestrial users in multi-altitude LEO satellite networks. Firstly, the holographic beamformer is optimized by maximizing the downlink channel gain from the serving satellite to the terrestrial user. Then, the digital beamformer is designed by conceiving a minimum mean square error (MMSE) based detection algorithm for mitigating the interference arriving from other satellites. To dispense with excessive overhead of full channel state information (CSI) acquisition of all satellites, we propose a low-complexity MMSE beamforming algorithm that only relies on the distribution of the LEO satellite constellation harnessing stochastic geometry, which can achieve comparable throughput to that of the algorithm based on the full CSI in the case of a dense LEO satellite deployment. Furthermore, it outperforms the maximum ratio combining (MRC) algorithm, thanks to its inter-satellite interference mitigation capacity. The simulation results show that our proposed holographic metasurface based hybrid beamforming architecture is capable of outperforming the state-of-the-art antenna array architecture in terms of its throughput, given the same physical size of the transceivers. Moreover, we demonstrate that the beamforming performance attained can be substantially improved by taking into account the mutual coupling effect, imposed by the dense placement of the holographic metasurface elements.
6
+
7
+ Index Terms—Low Earth Orbit (LEO) satellite communication, holographic metasurface, hybrid beamforming, intersatellite interference, stochastic geometry.
8
+
9
+ # I. INTRODUCTION
10
+
11
+ ALTHOUGH traditional terrestrial communication networks have been widely rolled out across the globe for providing significantly increased throughput by leveraging a
12
+
13
+ This work was supported by the Future Telecoms Research Hub, Platform for Driving Ultimate Connectivity (TITAN), sponsored by the Department of Science Innovation and Technology (DSIT) and the Engineering and Physical Sciences Research Council (EPSRC) under Grant EP/X04047X/1 and Grant EP/Y037243/1. M. El-Hajjar would like to acknowledge the financial support of the Engineering and Physical Sciences Research Council (EPSRC) projects under grant EP/X04047X/2, EP/X04047X/1 and EP/Y037243/1. L. Hanzo would also like to acknowledge the financial support of the Engineering and Physical Sciences Research Council (EPSRC) projects under grant EP/W016605/1, EP/X01228X/1, EP/Y026721/1, EP/W032635/1, as well as of the European Research Council's Advanced Fellow Grant QuantCom (Grant No. 789028). (Corresponding author: Lajos Hanzo.)
14
+
15
+ Qingchao Li, Mohammed El-Hajjar, Chao Xu and Lajos Hanzo are with the School of Electronics and Computer Science, University of Southampton, Southampton SO17 1BJ, U.K. (e-mail: qingchao.li@soton.ac.uk; meh@ecs.soton.ac.uk; cx1g08@ecs.soton.ac.uk; lh@ecs.soton.ac.uk).
16
+
17
+ Kajun Cao is with the School of Mathematical Sciences, University of Southampton, Southampton SO17 1BJ, U.K. (e-mail: kc8g22@soton.ac.uk).
18
+
19
+ Harald Haas is with the Department of Engineering, Electrical Engineering Division, Cambridge University, CB3 0FA Cambridge, U.K. (e-mail: huh21@cam.ac.uk).
20
+
21
+ whole suite of sophisticated techniques, it is still challenging to support global Internet connectivity [1], [2]. The growing interest in satellite constellations harnessed for ubiquitous communications and global Internet coverage highlights the importance of low Earth Orbit (LEO) satellites in bridging the digital divide and enhancing connectivity in remote regions [3], [4]. Compared to its Geostationary Earth Orbit (GEO) and Medium Earth Orbit (MEO) counterparts, LEO satellites offer notable advantages such as reduced latency and higher data rates due to their proximity to the Earth [5], [6].
22
+
23
+ # A. Related Work
24
+
25
+ Substantial research efforts have also been dedicated to the exploration of various aspects in LEO satellite communications, including multiple-input and multiple-output (MIMO) technologies, hybrid beamforming methods, and robust secure transmission strategies. Specifically, Li et al. [7] focused their attention on the design of downlink transmission strategies for massive MIMO LEO satellite communications. It was demonstrated that using massive MIMO is beneficial for enhancing the spectral efficiency and mitigating the limitations imposed by the dynamic nature of LEO satellites. In this context, the advanced beamforming designs have been conceived for robust communications between satellites and the base stations (BSs) on the ground. Simulation results indicated significant throughput improvements, demonstrating the benefits of harnessing massive MIMO in LEO satellite systems. To avoid the excessive complexity and energy consumption of the fully digital beamforming architecture, You et al. [8] conceived hybrid analog-digital precoding techniques for LEO satellite systems. Specifically, a novel hybrid precoding architecture was proposed relying on statistical channel state information (CSI) for maximizing the energy efficiency and/or reducing the complexity. Their simulation results demonstrated that the proposed hybrid precoding schemes achieve significant energy efficiency gains over existing baselines, especially when discrete phase shift networks are employed for analog precoding. Considering the imperfect hardware factors, including low resolution phase shifters and nonlinear power amplifiers (NPAs), an efficient algorithmic approach was formulated in [9] for LEO satellite networks in the context of a twin-resolution phase shifting (TRPS) based hybrid precoding problem. This design struck an attractive energy efficiency versus computational complexity trade-off. In [10], Huang et al. presented a quality of service (QoS)-aware precoding design, which aimed for optimizing both the energy efficiency and user
26
+
27
+ satisfaction in the downlink of massive MIMO aided LEO satellite communications. A multi-objective optimization problem was formulated by striking a trade-off between the energy efficiency and the proportion of users meeting their QoS requirements. Furthermore, an efficient algorithm was developed for solving a multi-objective problem, resulting in improved QoS and energy performance. In [11], Liu et al. focused on robust downlink precoding strategies for LEO satellite systems operating under per-antenna power constraints. Moreover, a robust precoding scheme was proposed for optimizing the performance under individual power limits for each antenna. The proposed technique enhanced the communications integrity, particularly in environments associated with fluctuating channel conditions and stringent power constraints.
28
+
29
+ Albeit the massive MIMO technique is of substantial benefit for high-speed data services and global connectivity in LEO satellite networks, traditional antenna arrays face limitations in terms of scalability, cost, and power consumption. It is infeasible to realize extremely large-scale MIMO (XL-MIMO) schemes relying on a large number of conventional radio frequency (RF) chains and active antennas due to the excessive power consumption requirements [12], [13]. As a remedy, holographic MIMO (HMIMO) technology has emerged as a promising design alternative, exhibiting improved hardware efficiency and energy efficiency. This ambitious objective is achieved by utilizing a spatially near-continuous aperture and holographic radios having reduced power consumption and fabrication cost [14], [15], [16], [17]. Recent advances in non-terrestrial networks highlighted the effectiveness of metasurface-based receiver architectures, which are capable of enhancing the communication quality and mitigating interference through configurable multi-layer or multi-functional metasurface designs [18], [19], [20]. In [21], Deng et al. introduces a reconfigurable holographic surface (RHS) aided uplink communication system, where a user terminal equipped with RHS transmits data to multiple LEO satellites. A novel holographic beamforming algorithm was proposed for maximizing the sum rate, which is proved to be robust against tracking errors in satellite positions. Their simulation results indicated that the RHS outperforms traditional phased arrays in terms of its sum-rate and cost-efficiency, owing to its compact element spacing and low hardware costs. Furthermore, in [22], a closed-form expression was derived for maximizing the sum rate of LEO satellite communications relying on a RHS. The authors theoretically analyzed the minimum number of RHS elements required for the sum-rate of the RHS-aided system to exceed that of the phased array system. The simulation results showed that the RHS-based LEO satellite communications is capable of outperforming traditional phased array based systems in terms of both the sum rate and hardware efficiency. Stacked intelligent metasurfaces (SIM) are also capable of improving the performance of LEO satellite communications. Lin et al. [23] proposed a SIM-based multi-beam LEO system, which performs downlink precoding in the wave domain for reducing both the processing latency and computational burden. Based on the statistical CSI, an optimization problem was formulated for maximizing the ergodic sum rate, which was solved by a customized alternating optimization algorithm.
30
+
31
+ The results demonstrated significant improvements in sum rate and computational efficiency compared to traditional digital precoding methods.
32
+
33
+ The above contributions were focused on single LEO satellite architectures. Considering the ultra-dense deployment of LEO satellites to achieve global connectivity, several research efforts were dedicated to LEO constellations, resulting in inter-satellite interference. In [24], Okati et al. derived the analytically tractable expression of the downlink coverage probability and the data rate of LEO satellite constellations relying on stochastic geometry. Jung et al. [25] focused on the performance analysis of LEO satellite communication systems under the shadowed-Rician fading model. The binomial point process (BPP) was employed to model the distribution of LEO satellites. Based on this, both the outage probability and the system throughput were evaluated. In [26], Park et al. presented a tractable method for the downlink coverage analysis, highlighting the pivotal role of satellite density and altitude in optimizing the network performance attained. This approach facilitates the efficient characterization of diverse deployment scenarios. Moreover, Sun et al. in [27] utilized a homogeneous Poisson Point Process (PPP) to analyze LEO networks, focusing on user fairness and transmission reliability. It showed that deploying satellites at lower altitudes benefits dense networks by enhancing both the coverage probability and user fairness, while higher altitudes are preferable for sparser networks. In [28], Choi et al. provided a comprehensive analysis of downlink communications in heterogeneous LEO satellite networks using Cox point processes to model the distribution of satellites. They characterized both closed and open access scenarios, demonstrating that open access significantly improves the coverage probability. Complementing the homogeneous models, Okati et al. in [29] applied non-homogeneous stochastic geometry for analyzing massive LEO satellite constellations. Their approach accounts for the variations in satellite density and spatial distribution across different regions. Then various new performance metrics, such as the conditional coverage probability and user throughput, were derived under non-homogeneous conditions. The results demonstrated that considering non-homogeneous distributions provides a more accurate representation of real-world satellite networks. Furthermore, Hu et al. [30] investigated the end-to-end performance of LEO satellite-aided shore-to-ship communications using stochastic geometry in maritime communication contexts. They evaluated the impact of influential factors such as satellite altitude, transmission power and environmental conditions on the communication links between ports and ships.
34
+
35
+ To meet the growing demand of seamless global connectivity and efficient communication infrastructure, multi-altitude LEO satellite networks have emerged as a pivotal solution. They can offer numerous advantages in terms of coverage, latency, and bandwidth efficiency over the single-altitude LEO satellite architecture in [24]–[30]. In particular, Okati et al. [31] provided a comprehensive analysis of the coverage probabilities in multi-altitude LEO satellite networks, where the satellites are modelled as a BPP assuming their altitude is an arbitrarily distributed random variable. Their simulation re
36
+
37
+ TABLEI CONTRASTING THE NOVELTY OF OUR PAPER TO THE EXISTING LEO SATELLITE COMMUNICATIONS LITERATURE [7], [8], [9], [10], [11], [21], [22], [23], [24], [25], [26], [27], [28], [29], [30], [31], [32].
38
+
39
+ <table><tr><td></td><td>Our paper</td><td>[7], [8], [9], [10]</td><td>[21], [22], [23]</td><td>[24], [25], [26], [27], [28], [29], [30]</td><td>[31], [32]</td></tr><tr><td>Beamforming design</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td></tr><tr><td>Holographic metasurface</td><td>✓</td><td></td><td>✓</td><td></td><td></td></tr><tr><td>LEO satellite constellation</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td></tr><tr><td>Multi-altitude satellites</td><td>✓</td><td></td><td></td><td></td><td>✓</td></tr><tr><td>Inter-satellite interference mitigation</td><td>✓</td><td></td><td></td><td></td><td></td></tr></table>
40
+
41
+ sults showed that the coverage performance becomes saturated when the constellation size reaches a certain threshold. In [32], Choi et al. introduced an innovative technique of modeling satellite networks using Cox point processes. Specifically, the orbits vary in altitude and the distribution of satellites on each orbit are modelled as a linear PPP. Some useful statistics, including the distribution of the distance from the typical terrestrial user to its nearest visible satellite and the outage probability, were theoretically derived.
42
+
43
+ # B. Motivation
44
+
45
+ The above LEO satellite communication architectures have the following limitations. Firstly, the existing beamforming designs conceived for multi-satellite networks ignore the inter-satellite interference, which significantly limits the data rate in dense satellite constellations. Although inter-satellite interference can be mitigated through cooperative satellite networks, as suggested in [33], which brings in increased cost of establishing and maintaining inter-satellite links. Secondly, the above beamforming designs rely on the acquisition of full CSI, which significantly increases the communication overhead. The process of obtaining full CSI across all satellite links not only requires substantial bandwidth but also leads to high computational complexity, especially in dense satellite constellations. To deal with these challenges, we propose a holographic metasurface-based beamforming architecture for multi-altitude LEO satellite networks, purely relying on the statistical distribution of the LEO satellite constellation. Against this background, Table I explicitly contrasts our contributions to the literature at a glance, which are further detailed as follows.
46
+
47
+ - The holographic metasurfaces achieve high directional gain despite using a compact antenna for mitigating the severe path loss of satellite communications. In this paper, we conceive a metasurface based hybrid holographic and digital beamforming architecture for the multi-altitude LEO satellite downlink, where a holographic metasurface is employed by the terrestrial user for spectral-efficient information transfer.
48
+ - Since designing the metasurface coefficients of the holographic beamformer and of the digital receiver combining (RC) vector is a non-convex problem, we decompose it into two sub-problems. Specifically, the RF holographic beamformer is optimized for maximizing the channel gain from the serving satellite to the terrestrial user. Once the holographic beamformer weights are given, the baseband equivalent channel from satellites to the RF
49
+
50
+ chains can be obtained. Afterwards, the digital RC vector is optimized based on the minimum mean square error (MMSE) criterion for mitigating the interference imposed by the interfering satellites on the terrestrial user.
51
+
52
+ - To avoid the high overhead of acquiring full CSI, we propose a low-complexity MMSE detection algorithm. In this approach, the digital beamformer is designed based on the statistical characteristics of the LEO satellite constellation, specifically leveraging the average number and spatial distribution of interfering satellites within the visible region through stochastic geometry. By focusing on the distributional properties of visible interfering satellites, this method significantly reduces channel estimation complexity.
53
+ - Our numerical results show that the proposed holographic metasurface based hybrid beamforming architecture is capable of achieving higher throughput than the state-of-the-art (SoA) antenna array architecture. More explicitly, they show that the MMSE RC algorithm outperforms the maximum ratio combining (MRC) algorithm, thanks to the inter-satellite interference mitigation. The throughput can be improved by explicitly considering the mutual coupling effect in the beamforming design, which arises due to the dense placement of the holographic metasurface elements. Furthermore, the proposed low-complexity MMSE algorithm based on the distribution of the satellites can achieve similar throughput to that of the idealized algorithm based on the perfect knowledge of the full CSI associated with all satellites in the case of a dense deployment of LEO satellites.
54
+
55
+ # C. Organization
56
+
57
+ The rest of this paper is organized as follows. In Section II, we present the system model, while both the hybrid holographic and the digital beamforming design is described in Section III. Our simulation results are presented in Section IV, while we conclude in Section V.
58
+
59
+ # D. Notations
60
+
61
+ Vectors and matrices are denoted by boldface lower and upper case variables, respectively; sets are denoted by calligraphic letters; $(\cdot)^{\mathrm{T}}$ and $(\cdot)^{\mathrm{H}}$ represent the operation of transpose and Hermitian transpose, respectively; $|a|$ and $\angle a$ denote the amplitude and angle of the complex scalar $a$ , respectively; $|\mathcal{A}|$ represents the cardinality of the set $\mathcal{A}$ ; $\|\mathbf{a}\|$ denotes the norm of the vector $\mathbf{a}$ ; $\mathbb{C}^{m\times n}$ is the space of $m\times n$
62
+
63
+ ![](images/f8976f3038f56ab73f9bbefaa5f4b2c204cd45475e08510433323b64cc7877e6.jpg)
64
+ Fig. 1. System model of holographic metasurface-based multi-altitude LEO satellite networks.
65
+
66
+ complex-valued matrices; $\mathbf{0}_N$ is the $N\times 1$ zero vector; $\mathbf{I}_N$ represents the $N\times N$ identity matrix; $\mathbf{Diag}\{a_1,a_2,\dots ,a_N\}$ denotes a diagonal matrix having elements of $a_1,a_2,\dots ,a_N$ in order; $a_{n}$ is the nth element in the vector $\mathbf{a}$ ; $\mathcal{CN}(\pmb {\mu},\pmb {\Sigma})$ is a circularly symmetric complex Gaussian random vector with the mean $\pmb{\mu}$ and covariance matrix $\pmb{\Sigma}$ ; $f_{X}(x)$ and $F_{X}(x)$ represent the probability density function (PDF) and the cumulative distribution function (CDF) of the random variable $X$ , respectively.
67
+
68
+ # II. SYSTEM MODEL
69
+
70
+ In this section, we describe our proposed holographic metasurface-based multi-altitude LEO satellite network. The system model of the downlink multi-altitude LEO satellite network is shown in Fig. $1^{1}$ . In contrast to the conventional single-altitude LEO satellite networks, which consider a constellation consisting of LEO satellites at the same altitude, the multi-altitude LEO satellite network considers a constellation consisting of LEO satellites at different altitudes, positioned between $H_{1}$ and $H_{2}$ . We assume that the Earth is a perfect sphere with a radius of $R_{\mathrm{e}}$ centred at the origin of $(0,0,0)\in \mathbb{R}^3$ in the three-dimensional (3D) Cartesian coordinate system. We also assume that the single-antenna LEO satellites are uniformly distributed within the shell determined by $\Omega = \{R_{\mathrm{e}} + H_1\leq \sqrt{x^2 + y^2 + z^2}\leq R_{\mathrm{e}} + H_2|(x,y,z)\in \mathbb{R}^3\}$ forming a 3D BPP [25], denoted as $\mathcal{A}$ , ensuring an even spatial distribution across the different altitudes in the specified range. Furthermore, we denote the number of satellites within the region of $\Omega$ as $|\mathcal{A}|$ .
71
+
72
+ Each terrestrial user is served by the nearest satellite, which is referred to as the serving satellite, resulting in a spherical Voronoi tessellation for the satellites' coverage
73
+
74
+ <sup>1</sup>In this paper, we investigate narrowband satellite communication networks. In wideband networks, both spatial-wideband effects and frequency-selective effects should be considered [34]. The satellite communications of wideband networks considering the spatial-wideband effect and the frequency-selective effect is part of our future work.
75
+
76
+ ![](images/445b5f38a655e3a4a77b846a0730996050fc25371b14a854b75a6df39fa46530.jpg)
77
+ Fig. 2. Holographic metasurface-based hybrid beamforming architecture.
78
+
79
+ areas. We assume that all satellites in the region of $\Omega$ share the same time/frequency resource. Hence, a typical terrestrial terminal served by the nearest satellite is interfered by all other satellites. We consider a typical terrestrial user located at the Cartesian coordinate of $(0,0,R_{\mathrm{e}})$ . We denote the serving satellite as the 0th satellite, while the interfering satellites are indexed as the 1st, 2nd, $\dots$ , $(|\mathcal{A}| - 1)$ st satellites, based on their distances from the terrestrial user considered. Furthermore, we denote the coordinates of these interfering satellites as $\mathbf{p}_1,\mathbf{p}_2,\dots ,\mathbf{p}_{|\mathcal{A}| - 1}$ , respectively. As illustrated in Fig. 1, all the satellites that are visible above the horizon can communicate with the terrestrial user. The visible region can be represented as $\Omega^{\prime} = \{z\geq R_{\mathrm{e}}|(x,y,z)\in \Omega \}$ .
80
+
81
+ # A. Holographic Metasurface-Based Beamforming
82
+
83
+ Due to the large distance from the satellites to the terrestrial users, a holographic metasurface is employed by each terrestrial user to compensate the signal attenuation due to the path loss. The complete architecture includes a holographic beamformer and a digital beamforming.
84
+
85
+ As shown in Fig. 2, at the holographic metasurface of the beamformer is composed of $M$ microstrips, each of which is connected to an RF-chain. Each microstrip consists of three components, including a feed, a waveguide and $N$ subwavelength metamaterial elements. Specifically, each element in the microstrip is made of artificial composite material, which is capable of adjusting the coefficients of the electromagnetic (EM) waves with the aid of a software controller, such as a field programmable gate array (FPGA) [35], [36], [37], [38], [39]. The waveguide acts as the propagation medium of the EM wave spanning from the reconfigurable metasurface elements to the feed. The feed then transforms the EM wave into high frequency current for the RF-chain. Afterwards, the RF-chain converts the RF signals to the baseband signals for the digital beamformer.
86
+
87
+ We denote the weighting coefficient of the $n$ th reconfigurable metasurface element at the $m$ th microstrip as $\beta_{n}^{(m)}$ . Under the Lorentzian-constrained phase model of [34], [40], [41], we can get
88
+
89
+ $$
90
+ \beta_ {n} ^ {(m)} = \frac {\mathcal {J} + \mathrm {e} ^ {\mathcal {J} \phi_ {n} ^ {(m)}}}{2}, m = 1, 2, \dots , M, n = 1, 2, \dots , N, \tag {1}
91
+ $$
92
+
93
+ where $\phi_n^{(m)}\in [0,2\pi)$ . Furthermore, the response from the $n$ th reconfigurable metasurface element at the $m$ th microstrip to its connected feed is given by
94
+
95
+ $$
96
+ q _ {n} ^ {(m)} = \mathrm {e} ^ {- j \frac {2 \pi}{\lambda} \xi_ {n} ^ {(m)}}, \tag {2}
97
+ $$
98
+
99
+ where $\xi_{n}^{(m)}$ is the distance between the corresponding reconfigurable metasurface element and its connected feed.
100
+
101
+ # B. Channel Model
102
+
103
+ In this section, we describe the channel model between the satellites and the terrestrial user. We denote the link spanning from the $l$ th satellite to the $m$ th microstrip of the terrestrial user as $\mathbf{f}^{(l,m)}\in \mathbb{C}^{N\times 1}$ , given by
104
+
105
+ $$
106
+ \mathbf {f} ^ {(l, m)} = \sqrt {\varrho_ {l}} \mathbf {g} ^ {(l, m)}. \tag {3}
107
+ $$
108
+
109
+ In (3), $\varrho_{l}$ represents the link attenuation between the $l$ th satellite and the terrestrial user, given by
110
+
111
+ $$
112
+ \varrho_ {l} = \varsigma \zeta \left(\frac {\lambda}{4 \pi}\right) ^ {2} D _ {l} ^ {- \alpha}, \tag {4}
113
+ $$
114
+
115
+ where $\lambda$ denotes the carrier wavelength, $\varsigma$ is the rain attenuation coefficient, $\zeta$ represents the antenna gain, $\alpha$ denotes the path loss exponent and $D_{l}$ is the distance between the $l$ th satellite and the terrestrial user. Furthermore, $\mathbf{g}^{(l,m)}$ represents the small-scale fading. Referring to [42], the shadowed-Rician fading model is widely employed for describing the distribution of the channel links in satellite communications. Specifically, the $n$ th entry in $\mathbf{g}^{(l,m)}$ , denoted as $g_{n}^{(l,m)}$ , represents the small scale fading between the $l$ th satellite and the $n$ th reconfigurable metasurface element of the $m$ th microstrip at the terrestrial user. We denote the shadowed-Rician fading as $\mathcal{SR}(\omega, b_{0}, \upsilon)$ , where $\omega$ is the average power of the line-of-sight (LoS) component, $2b_{0}$ is the average power of the scattered component and $\upsilon$ is the Nakagami parameter. The CDF of the channel's power gain $|g_{n}^{(l,m)}|^{2}$ is given by
116
+
117
+ $$
118
+ F _ {| g _ {n} ^ {(l, m)} | 2} (x) = \left(\frac {2 b _ {0}}{2 b _ {0} v + \omega}\right) ^ {v}.
119
+ $$
120
+
121
+ $$
122
+ \sum_ {i = 0} ^ {\infty} \frac {(v) _ {i}}{i ! \Gamma (i + 1)} \left(\frac {\omega}{2 b _ {0} v + \omega}\right) ^ {i} \Gamma_ {\mathrm {i}} \left(i + 1, \frac {1}{2 b _ {0}} x\right), \tag {5}
123
+ $$
124
+
125
+ where $(v)_i$ is the Pochhammer symbol, $\Gamma(\cdot)$ is the Gamma function, and $\Gamma_i(\cdot, \cdot)$ is the lower incomplete Gamma function.
126
+
127
+ For the mutual coupling, we adopt the $Z$ -parameter based representation to model this effect [43] $^2$ . Specifically, the mutual coupling matrix, denoted as $\mathbf{C} \in \mathbb{C}^{MN \times MN}$ , is
128
+
129
+ $$
130
+ \mathbf {C} = \left(Z _ {A} + Z _ {L}\right) \left(\mathbf {Z} + Z _ {L} \mathbf {I}\right) ^ {- 1}, \tag {6}
131
+ $$
132
+
133
+ where $Z_{A}$ is the antenna impedance and $Z_{L}$ is the load impedance, both of which are fixed as 50 Ohms. Furthermore, $\mathbf{Z} \in \mathbb{C}^{MN \times MN}$ is the mutual impedance matrix, with the $(i_1, i_2)$ th entry represented as
134
+
135
+ $$
136
+ \mathbf {Z} _ {i _ {1}, i _ {2}} = Z _ {A}, \tag {7}
137
+ $$
138
+
139
+ 2The mutual coupling model used here is based on the analysis in [43] and has been widely applied in holographic metasurfaces, demonstrating its general applicability in dense antenna arrays [34].
140
+
141
+ for $i_1 = i_2$ , and
142
+
143
+ $$
144
+ \begin{array}{l} \mathbf {Z} _ {i _ {1}, i _ {2}} = 6 0 \mathcal {C} _ {\mathrm {I}} \left(\frac {2 \pi d _ {i _ {1} , i _ {2}}}{\lambda}\right) - 6 0 \mathcal {S} _ {\mathrm {I}} \left(\frac {2 \pi d _ {i _ {1} , i _ {2}}}{\lambda}\right) \\ - 3 0 \mathcal {C} _ {\mathrm {I}} \left(\frac {2 \pi \left(\tilde {d} _ {i _ {1} , i _ {2}} + \delta_ {0}\right)}{\lambda}\right) + 3 0 \mathcal {S} _ {\mathrm {I}} \left(\frac {2 \pi \left(\tilde {d} _ {i _ {1} , i _ {2}} + \delta_ {0}\right)}{\lambda}\right) \\ - 3 0 \mathcal {C} _ {\mathrm {I}} \left(\frac {2 \pi \left(\tilde {d} _ {i _ {1} , i _ {2}} - \delta_ {0}\right)}{\lambda}\right) + 3 0 \mathcal {S} _ {\mathrm {I}} \left(\frac {2 \pi \left(\tilde {d} _ {i _ {1} , i _ {2}} - \delta_ {0}\right)}{\lambda}\right), \tag {8} \\ \end{array}
145
+ $$
146
+
147
+ for $i_1 \neq i_2$ , where $\tilde{d}_{i_1,i_2} = \sqrt{d_{i_1,i_2}^2 + \delta_0^2}$ . In (8), $d_{i_1,i_2}$ represents the distance between the $i_1$ th and the $i_2$ th metasurface element, $\delta_0$ is the dipole length, $\mathcal{C}_1$ denotes the cosine integral and $\mathcal{S}_1$ represents the sine integral.
148
+
149
+ # C. Distribution of Satellite Constellation
150
+
151
+ In this section, we theoretically derive the distribution of the distance between the satellites and the terrestrial user.
152
+
153
+ Firstly, we derive the CDF of any specific satellite being in the visible region $\Omega'$ of the terrestrial user. For ease of exposition, we define a spherical cap $\Omega_1' = \{x^2 + y^2 + z^2 \leq (R_{\mathrm{e}} + H_1)^2, z \geq R_{\mathrm{e}} | (x, y, z) \in \mathbb{R}^3\}$ . Then its volume $V_1'$ can be calculated as
154
+
155
+ $$
156
+ V _ {1} ^ {\prime} = \frac {\pi}{3} H _ {1} \left(2 R _ {\mathrm {e}} + H _ {1}\right) \left(3 R _ {\mathrm {e}} + 2 H _ {1}\right). \tag {9}
157
+ $$
158
+
159
+ Similarly, we define a second spherical cap $\Omega_2' = \{x^2 + y^2 + z^2 \leq (R_{\mathrm{e}} + H_2)^2, z \geq R_{\mathrm{e}} | (x, y, z) \in \mathbb{R}^3\}$ and its volume $V_2'$ , which can be calculated as
160
+
161
+ $$
162
+ V _ {2} ^ {\prime} = \frac {\pi}{3} H _ {2} \left(2 R _ {\mathrm {e}} + H _ {2}\right) \left(3 R _ {\mathrm {e}} + 2 H _ {2}\right). \tag {10}
163
+ $$
164
+
165
+ Thus, the volume of the visible shell can be calculated as
166
+
167
+ $$
168
+ \begin{array}{l} V ^ {\prime} = V _ {1} ^ {\prime} - V _ {2} ^ {\prime} \\ = \frac {\pi}{3} \left(H _ {2} \left(2 R _ {\mathrm {e}} + H _ {2}\right) \left(3 R _ {\mathrm {e}} + 2 H _ {2}\right) \right. \\ - H _ {1} \left(2 R _ {\mathrm {e}} + H _ {1}\right) \left(3 R _ {\mathrm {e}} + 2 H _ {1}\right). \tag {11} \\ \end{array}
169
+ $$
170
+
171
+ Theorem 1. The CDF of the distance $F_{D'}(d)$ from any specific satellite in the visible shell $\Omega'$ to the terrestrial user can be formulated as shown in (12), when $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} \leq H_2$ , and as shown in (13) otherwise.
172
+
173
+ Proof: See Appendix A.
174
+
175
+ Corollary 1. By taking the derivative of the CDF of the distance from any specific satellite in the visible shell $\Omega'$ to the terrestrial user as described in Theorem 1, we arrive at its PDF formulated as
176
+
177
+ $$
178
+ f _ {D ^ {\prime}} (d) = \left\{ \begin{array}{l l} \frac {\pi}{R _ {\mathrm {e}} V ^ {\prime}} d ^ {3} + \frac {2 \pi}{V ^ {\prime}} d ^ {2} - \frac {\pi \tilde {H} _ {1} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d, & d \in [ H _ {1}, \tilde {H} _ {1}) \\ \frac {2 \pi}{V ^ {\prime}} d ^ {2}, & d \in [ \tilde {H} _ {1}, H _ {2}) \\ - \frac {\pi}{R _ {\mathrm {e}} V ^ {\prime}} d ^ {3} + \frac {\pi \tilde {H} _ {2} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d, & d \in [ H _ {2}, \tilde {H} _ {2}) \\ 0, & d \notin [ H _ {1}, \tilde {H} _ {2}) \end{array} , \right.
179
+ $$
180
+
181
+ $$
182
+ \begin{array}{l} F _ {D ^ {\prime}} (d) = \left\{ \begin{array}{l l} 0, & d \in [ 0, H _ {1}) \\ \frac {\pi}{4 R _ {\mathrm {e}} V ^ {\prime}} d ^ {4} + \frac {2 \pi}{3 V ^ {\prime}} d ^ {3} - \frac {\pi H _ {1} (2 R _ {\mathrm {e}} + H _ {1})}{2 R _ {\mathrm {e}} V ^ {\prime}} d ^ {2} + \frac {\pi H _ {1} ^ {3} (4 R _ {\mathrm {e}} + 3 H _ {1})}{1 2 R _ {\mathrm {e}} V ^ {\prime}}, & d \in [ H _ {1}, \sqrt {H _ {1} (2 R _ {\mathrm {e}} + H _ {1}))} \\ \frac {2 \pi}{3 V ^ {\prime}} d ^ {3} - \frac {\pi H _ {1} ^ {2} (3 R _ {\mathrm {e}} + 2 H _ {1})}{3 V ^ {\prime}}, & d \in [ \sqrt {H _ {1} (2 R _ {\mathrm {e}} + H _ {1})}, H _ {2}) \\ - \frac {\pi}{4 R _ {\mathrm {e}} V ^ {\prime}} d ^ {4} + \frac {\pi H _ {2} (2 R _ {\mathrm {e}} + H _ {2})}{2 R _ {\mathrm {e}} V ^ {\prime}} d ^ {2} - \frac {\pi \left(H _ {2} ^ {3} (4 R _ {\mathrm {e}} + 3 H _ {2}) + 4 R _ {\mathrm {e}} H _ {1} ^ {2} (3 R _ {\mathrm {e}} + 2 H _ {1})\right)}{1 2 R _ {\mathrm {e}} V ^ {\prime}}, & d \in [ H _ {2}, \sqrt {H _ {2} (2 R _ {\mathrm {e}} + H _ {2})}) \\ 1, & d \in [ \sqrt {H _ {2} (2 R _ {\mathrm {e}} + H _ {2})}, \infty) \end{array} \right. (12) \\ F _ {D ^ {\prime}} (d) = \left\{ \begin{array}{l l} 0, & d \in [ 0, H _ {1}) \\ \frac {\pi}{4 R _ {\mathrm {e}} V ^ {\prime}} d ^ {4} + \frac {2 \pi}{3 V ^ {\prime}} d ^ {3} - \frac {\pi H _ {1} \left(2 R _ {\mathrm {e}} + H _ {1}\right)}{2 R _ {\mathrm {e}} V ^ {\prime}} d ^ {2} + \frac {\pi H _ {1} ^ {3} \left(4 R _ {\mathrm {e}} + 3 H _ {1}\right)}{1 2 R _ {\mathrm {e}} V ^ {\prime}}, & d \in [ H _ {1}, H _ {2}) \\ \frac {\pi H _ {2} \left(2 R _ {\mathrm {e}} + H _ {2}\right)}{2 R _ {\mathrm {e}} V ^ {\prime}} d ^ {2} - \frac {\pi \left(H _ {2} ^ {3} \left(4 R _ {\mathrm {e}} + 3 H _ {2}\right) - H _ {1} ^ {3} \left(4 R _ {\mathrm {e}} + 3 H _ {1}\right)\right)}{1 2 R _ {\mathrm {e}} V ^ {\prime}}, & d \in [ H _ {2}, \sqrt {H _ {1} \left(2 R _ {\mathrm {e}} + H _ {1}\right)}) \\ - \frac {\pi}{4 R _ {\mathrm {e}} V ^ {\prime}} d ^ {4} + \frac {\pi H _ {2} \left(2 R _ {\mathrm {e}} + H _ {2}\right)}{2 R _ {\mathrm {e}} V ^ {\prime}} d ^ {2} - \frac {\pi \left(H _ {2} ^ {3} \left(4 R _ {\mathrm {e}} + 3 H _ {2}\right) + 4 R _ {\mathrm {e}} H _ {1} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {1}\right)\right)}{1 2 R _ {\mathrm {e}} V ^ {\prime}}, & d \in [ \sqrt {H _ {1} \left(2 R _ {\mathrm {e}} + H _ {1}\right)}, \sqrt {H _ {2} \left(2 R _ {\mathrm {e}} + H _ {2}\right)}) \\ 1, & d \in [ \sqrt {H _ {2} \left(2 R _ {\mathrm {e}} + H _ {2}\right)}, \infty) \end{array} \right. (13) \\ \end{array}
183
+ $$
184
+
185
+ when $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} \leq H_2$ and as
186
+
187
+ $$
188
+ f _ {D ^ {\prime}} (d) = \left\{ \begin{array}{l l} \frac {\pi}{R _ {\mathrm {e}} V ^ {\prime}} d ^ {3} + \frac {2 \pi}{V ^ {\prime}} d ^ {2} - \frac {\pi \tilde {H} _ {1} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d, & d \in [ H _ {1}, H _ {2}) \\ \frac {\pi \tilde {H} _ {2} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d, & d \in [ H _ {2}, \tilde {H} _ {1}) \\ - \frac {\pi}{R _ {\mathrm {e}} V ^ {\prime}} d ^ {3} + \frac {\pi \tilde {H} _ {2} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d, & d \in [ \tilde {H} _ {1}, \tilde {H} _ {2}) \\ 0, & d \notin [ H _ {1}, \tilde {H} _ {2}) \end{array} , \right. \tag {15}
189
+ $$
190
+
191
+ otherwise. In (14) and (15), we have $\tilde{H}_1 = \sqrt{2R_{\mathrm{e}}H_1 + H_1^2}$ and $\tilde{H}_2 = \sqrt{2R_{\mathrm{e}}H_2 + H_2^2}$ .
192
+
193
+ Then, we derive the probability of the serving satellite being in the visible shell $\Omega^{\prime}$ as follows.
194
+
195
+ Corollary 2. The probability $P_{\mathrm{S}}$ of the serving satellite being in the visible shelling can be expressed as
196
+
197
+ $$
198
+ P _ {\mathrm {S}} = 1 - \left(1 - P ^ {\prime}\right) ^ {| \mathcal {A} |}, \tag {16}
199
+ $$
200
+
201
+ where $P^{\prime}$ denotes the probability of any specific satellite being in the visible shell $\Omega^{\prime}$ , given by
202
+
203
+ $$
204
+ P ^ {\prime} = \frac {H _ {2} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {2}\right) - H _ {1} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {1}\right)}{4 \left(\left(R _ {\mathrm {e}} + H _ {2}\right) ^ {3} - \left(R _ {\mathrm {e}} + H _ {1}\right) ^ {3}\right)}. \tag {17}
205
+ $$
206
+
207
+ Proof: According to (11), the probability of any specific satellite being in the visible shell $\Omega^{\prime}$ can be expressed as
208
+
209
+ $$
210
+ P ^ {\prime} = \frac {V ^ {\prime}}{V}. \tag {18}
211
+ $$
212
+
213
+ In (18), $V$ represents the volume of the shell $\Omega$ , given by
214
+
215
+ $$
216
+ V = \frac {H _ {2} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {2}\right) - H _ {1} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {1}\right)}{4 \left(\left(R _ {\mathrm {e}} + H _ {2}\right) ^ {3} - \left(R _ {\mathrm {e}} + H _ {1}\right) ^ {3}\right)}, \tag {19}
217
+ $$
218
+
219
+ and $V^{\prime}$ is the volume of the visible shell $\Omega^{\prime}$ , as shown in (11). According to (11), (18) and (19), $P^{\prime}$ can be expressed as shown in (17).
220
+
221
+ Among all $|\mathcal{A}|$ satellites, the serving satellite is the one having the minimal distance from the terrestrial user. Therefore, the probability of the serving satellite located in the visible region can be formulated as shown in (16).
222
+
223
+ Corollary 3. Given that the serving satellite is located in the visible shell $\Omega'$ and the distance between the serving satellite and the terrestrial user is $D_0 = d_0$ , the probability $P_{\mathrm{I}}$ that one of the other satellites is within the visible shell, can be formulated as
224
+
225
+ $$
226
+ P _ {\mathrm {I}} = \frac {\frac {\pi}{3} \left(H _ {2} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {2}\right) - H _ {1} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {1}\right)\right) - V ^ {\prime} F _ {D ^ {\prime}} \left(d _ {0}\right)}{\frac {4 \pi}{3} \left(\left(R _ {\mathrm {e}} + H _ {2}\right) ^ {3} - \left(R _ {\mathrm {e}} + H _ {1}\right) ^ {3}\right) - V ^ {\prime} F _ {D ^ {\prime}} \left(d _ {0}\right)}. \tag {20}
227
+ $$
228
+
229
+ Proof: Given that the distance between the serving satellite and the terrestrial user is $D_0 = d_0$ , the probability that one of the interfering satellites is within the visible shell $\Omega'$ is
230
+
231
+ $$
232
+ P _ {\mathrm {I}} = \frac {V ^ {\prime} - V _ {B \cap \Omega^ {\prime}} (d)}{V} = \frac {V ^ {\prime} - F _ {D ^ {\prime}} (d) V ^ {\prime}}{V}. \tag {21}
233
+ $$
234
+
235
+ Upon substituting (11), (12), (13) and (19) into (21), $P_{\mathrm{I}}$ can be expressed as seen in (20).
236
+
237
+ Theorem 2. Given that the distance between the serving satellite and the terrestrial user is $D_0 = d_0$ , the conditional PDF $f_{D_1'|D_0}(d|d_0)$ of the distance between any of the interfering satellites located in the visible shell $\Omega'$ and the terrestrial user can be formulated as
238
+
239
+ $$
240
+ f _ {D _ {1} ^ {\prime} | D _ {0}} (d | d _ {0}) = \left\{ \begin{array}{l l} \frac {\frac {\pi}{R _ {\mathrm {e}} V ^ {\prime}} d ^ {3} + \frac {2 \pi}{V ^ {\prime}} d ^ {2} - \frac {\pi \tilde {H} _ {1} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d}{1 - F _ {D ^ {\prime}} (d _ {0})}, & d \in [ H _ {1}, \tilde {H} _ {1}) \\ \frac {\frac {2 \pi}{V ^ {\prime}} d ^ {2}}{1 - F _ {D ^ {\prime}} (d _ {0})}, & d \in [ \tilde {H} _ {1}, H _ {2}) \\ \frac {- \frac {\pi}{R _ {\mathrm {e}} V ^ {\prime}} d ^ {3} + \frac {\pi \tilde {H} _ {2} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d}{1 - F _ {D ^ {\prime}} (d _ {0})}, & d \in [ H _ {2}, \tilde {H} _ {2}) \\ 0, & d \notin [ H _ {1}, \tilde {H} _ {2}) \end{array} , \right. \tag {22}
241
+ $$
242
+
243
+ when $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} \leq H_2$ , and
244
+
245
+ $$
246
+ f _ {D _ {1} ^ {\prime} \mid D _ {0}} (d \mid d _ {0}) = \left\{ \begin{array}{l l} \frac {\frac {\pi}{R _ {\mathrm {e}} V ^ {\prime}} d ^ {3} + \frac {2 \pi}{V ^ {\prime}} d ^ {2} - \frac {\pi \tilde {H} _ {1} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d}{1 - F _ {D ^ {\prime}} (d _ {0})}, & d \in [ H _ {1}, H _ {2}) \\ \frac {\frac {\pi \tilde {H} _ {2} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d}{1 - F _ {D ^ {\prime}} (d _ {0})}, & d \in [ H _ {2}, \tilde {H} _ {1}) \\ \frac {- \frac {\pi}{R _ {\mathrm {e}} V ^ {\prime}} d ^ {3} + \frac {\pi \tilde {H} _ {2} ^ {2}}{R _ {\mathrm {e}} V ^ {\prime}} d}{1 - F _ {D ^ {\prime}} (d _ {0})}, & d \in [ \tilde {H} _ {1}, \tilde {H} _ {2}) \\ 0, & d \notin [ H _ {1}, \tilde {H} _ {2}) \end{array} , \right. \tag {23}
247
+ $$
248
+
249
+ otherwise.
250
+
251
+ Proof: See Appendix B.
252
+
253
+ # III. HYBRID HOLEGROPHIC AND DIGITAL BEAMFORMING DESIGN
254
+
255
+ In this section, we design the metasurface-based holographic beamformer and the digital beamformer at the terrestrial user for maximizing the throughput of the LEO system considered.
256
+
257
+ According to (1), (2), (3) and (6), the baseband channel spanning from the $l$ th satellite to the RF chains of the terrestrial user is given by
258
+
259
+ $$
260
+ \mathbf {h} ^ {(l)} = \mathbf {A} \mathbf {F} ^ {\prime (l)}, \tag {24}
261
+ $$
262
+
263
+ where $\mathbf{A}\in \mathbb{C}^{M\times MN}$ is formulated as
264
+
265
+ $$
266
+ \mathbf {A} = \left[ \begin{array}{c c c c} \mathbf {q} ^ {(1) \mathrm {T}} \mathbf {B} ^ {(1)} & \mathbf {0} _ {N} ^ {\mathrm {T}} & \dots & \mathbf {0} _ {N} ^ {\mathrm {T}} \\ \mathbf {0} _ {N} ^ {\mathrm {T}} & \mathbf {q} ^ {(2) \mathrm {T}} \mathbf {B} ^ {(2)} & \dots & \mathbf {0} _ {N} ^ {\mathrm {T}} \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf {0} _ {N} ^ {\mathrm {T}} & \mathbf {0} _ {N} ^ {\mathrm {T}} & \dots & \mathbf {q} ^ {(M) \mathrm {T}} \mathbf {B} ^ {(M)} \end{array} \right]. \tag {25}
267
+ $$
268
+
269
+ In (25), $\mathbf{B}^{(m)} = \mathbf{Diag}\{\beta_1^{(m)},\beta_2^{(m)},\dots ,\beta_N^{(m)}\}$ and $\mathbf{F}^{\prime (l)}\in$ $\mathbb{C}^{MN\times 1}$ is given by
270
+
271
+ $$
272
+ \mathbf {F} ^ {\prime (l)} = \mathbf {C} \left[ \begin{array}{c} \mathbf {f} ^ {(l, 1)} \\ \mathbf {f} ^ {(l, 2)} \\ \vdots \\ \mathbf {f} ^ {(l, M)} \end{array} \right] = \left[ \begin{array}{c} \mathbf {f} ^ {\prime (l, 1)} \\ \mathbf {f} ^ {\prime (l, 2)} \\ \vdots \\ \mathbf {f} ^ {\prime (l, M)} \end{array} \right], \tag {26}
273
+ $$
274
+
275
+ where $\mathbf{f}^{\prime (l,m)}\in \mathbb{C}^{N\times 1}$ represents the link spanning from the $l$ th satellite to the $m$ th microstrip embodying the mutual coupling. According to (24), (25) and (26), the baseband channel $\mathbf{h}^{(l)}$ can be further expressed as
276
+
277
+ $$
278
+ \mathbf {h} ^ {(l)} = \left[ \begin{array}{c} \mathbf {q} ^ {(1) \mathrm {T}} \mathbf {B} ^ {(1)} \mathbf {f} ^ {\prime (l, 1)} \\ \mathbf {q} ^ {(2) \mathrm {T}} \mathbf {B} ^ {(2)} \mathbf {f} ^ {\prime (l, 2)} \\ \vdots \\ \mathbf {q} ^ {(M) \mathrm {T}} \mathbf {B} ^ {(M)} \mathbf {f} ^ {\prime (l, M)} \end{array} \right]. \tag {27}
279
+ $$
280
+
281
+ Therefore, the baseband received signal of the terrestrial user, denoted as $\mathbf{y} \in \mathbb{C}^{M \times 1}$ , can be formulated as
282
+
283
+ $$
284
+ \begin{array}{l} \mathbf {y} = \sqrt {\rho} \mathbf {h} ^ {(0)} s _ {0} + \sum_ {\mathbf {p} _ {l} \in \Omega^ {\prime}} \sqrt {\rho} \mathbf {h} ^ {(l)} s _ {l} + \mathbf {A C w} \\ = \underbrace {\sqrt {\rho} \mathbf {h} ^ {(0)} s _ {0}} _ {\text {S i g n a l} \text {a t R F - c h a i n s}} + \underbrace {\sum_ {\mathbf {p} _ {l} \in \Omega^ {\prime}} \sqrt {\rho} \mathbf {h} ^ {(l)} s _ {l}} _ {\text {I n t e r - s a t i l l e} \text {i n t e r f e r e n c e} \text {a t R F - c h a i n s}} + \underbrace {\mathbf {w} ^ {\prime}} _ {\text {A d d i t i v e} \text {n o i s e}}, \tag {28} \\ \end{array}
285
+ $$
286
+
287
+ where $\rho$ is the transmitted power of the satellites, $s_l\in \mathbb{C}^{1\times 1}$ is the signal transmitted from the $l$ th satellite satisfying $\mathbb{E}[|s_l|^2 ] = 1$ and $\mathbf{w}\in \mathbb{C}^{MN\times 1}$ is the additive white Gaussian noise (AWGN) at the reconfigurable metasurface elements satisfying $\mathbf{w}\sim \mathcal{CN}(\mathbf{0}_{MN},\sigma_w^2\mathbf{I}_{MN})$ . Furthermore, $\mathbf{w}'$ represents the equivalent additive noise at the RF-chains, given by $\mathbf{w}' = \mathbf{AC}\mathbf{w}$ . Therefore, we have $\mathbf{w}'\sim \mathcal{CN}(\mathbf{0}_M,\sigma_w^2\mathbf{AC}\mathbf{C}^{\mathrm{H}}\mathbf{A}^{\mathrm{H}})$ .
288
+
289
+ According to (28), the signal-to-interference-plus-noise ratio (SINR) is given by
290
+
291
+ $$
292
+ \gamma_ {0} = \frac {\rho \left| \mathbf {v} ^ {\mathrm {H}} \mathbf {h} ^ {(0)} \right| ^ {2}}{\sum_ {\mathbf {p} _ {l} \in \Omega^ {\prime}} \rho \left| \mathbf {v} ^ {\mathrm {H}} \mathbf {h} ^ {(l)} \right| ^ {2} + \left| \mathbf {v} ^ {\mathrm {H}} \mathbf {w} ^ {\prime} \right| ^ {2}}, \tag {29}
293
+ $$
294
+
295
+ where $\mathbf{v} \in \mathbb{C}^{1 \times M}$ is the digital combining vector. Thus, the throughput can be represented as
296
+
297
+ $$
298
+ \begin{array}{l} R = P _ {\mathrm {S}} \cdot \log_ {2} (1 + \gamma_ {0}) \\ = P _ {\mathrm {S}} \cdot \log_ {2} \left(1 + \frac {\rho | \mathbf {v} ^ {\mathrm {H}} \mathbf {h} ^ {(0)} | ^ {2}}{\sum_ {\mathbf {p} _ {l} \in \Omega^ {\prime}} \rho | \mathbf {v} ^ {\mathrm {H}} \mathbf {h} ^ {(l)} | ^ {2} + | \mathbf {v} ^ {\mathrm {H}} \mathbf {w} ^ {\prime} | ^ {2}}\right). \tag {30} \\ \end{array}
299
+ $$
300
+
301
+ Here, our aim is to optimize the digital beamformer $\mathbf{v}$ and the reconfigurable metasurface element coefficient matrix $\mathbf{B}$ in order to maximize the throughput $R$ . The corresponding optimization problem can be formulated as
302
+
303
+ $$
304
+ \mathcal {P} 1: \max _ {\mathbf {v}, \mathbf {B}} \log_ {2} \left(1 + \frac {\rho | \mathbf {v} ^ {\mathrm {H}} \mathbf {h} ^ {(0)} | ^ {2}}{\sum_ {\mathbf {p} _ {l} \in \Omega^ {\prime}} \rho | \mathbf {v} ^ {\mathrm {H}} \mathbf {h} ^ {(l)} | ^ {2} + | \mathbf {v} ^ {\mathrm {H}} \mathbf {w} ^ {\prime} | ^ {2}}\right) \tag {31}
305
+ $$
306
+
307
+ $$
308
+ s. t. \beta_ {n} ^ {(m)} = \frac {j + e ^ {j \phi_ {n} ^ {(m)}}}{2}, \tag {32}
309
+ $$
310
+
311
+ $$
312
+ \phi_ {n} ^ {(m)} \in [ 0, 2 \pi), m = 1, 2, \dots , M, n = 1, 2, \dots , N. \tag {33}
313
+ $$
314
+
315
+ Since $\mathcal{P}1$ is a non-convex problem, we can decouple it into a pair of sub-problems and optimize them separately. Specifically, the metasurface-based holographic beamformer is designed to maximize the baseband channel gain between the serving satellite and the terrestrial user, while the digital beamformer is optimized based on the MMSE detection method to reduce the inter-satellite interference.
316
+
317
+ # A. Holographic Beamformer
318
+
319
+ Maximizing the baseband channel gain in metasurface-based holographic beamformers is crucial for precise beam shaping and for achieving high directional gain. This enhances the signal strength and compensates for the path-loss of satellite-to-ground links, making it ideal for robust communication in dense LEO satellite networks [17]. To maximize the baseband channel gain between the serving satellite and the terrestrial user, the corresponding problem of optimizing the holographic beamformer can be formulated as
320
+
321
+ $$
322
+ \mathcal {P} 2: \max _ {\mathbf {B}} \left\| \mathbf {h} ^ {(0)} \right\| ^ {2} \tag {34}
323
+ $$
324
+
325
+ $$
326
+ \text {s . t .} \beta_ {n} ^ {(m)} = \frac {\mathcal {J} + \mathrm {e} ^ {\mathcal {J} \phi_ {n} ^ {(m)}}}{2}, \tag {35}
327
+ $$
328
+
329
+ $$
330
+ \phi_ {n} ^ {(m)} \in [ 0, 2 \pi), m = 1, 2, \dots , M, n = 1, 2, \dots , N. \tag {36}
331
+ $$
332
+
333
+ According to (27), the baseband channel gain $\| \mathbf{h}^{(0)}\| ^2$ can be further reformulated as
334
+
335
+ $$
336
+ \begin{array}{l} \left| \left| \mathbf {h} ^ {(0)} \right| \right| ^ {2} = \sum_ {m = 1} ^ {M} \left| \mathbf {q} ^ {(m) \mathrm {T}} \mathbf {B} ^ {(m)} \mathbf {f} ^ {\prime (0, m)} \right| ^ {2} \\ = \sum_ {m = 1} ^ {M} \sum_ {n = 1} ^ {N} \left| q _ {n} ^ {(m)} \beta_ {n} ^ {(m)} f _ {n} ^ {\prime (0, m)} \right| ^ {2}. \tag {37} \\ \end{array}
337
+ $$
338
+
339
+ Therefore, the problem $\mathcal{P}2$ can be recast as
340
+
341
+ $$
342
+ \mathcal {P} 3: \max _ {\mathbf {B}} \sum_ {m = 1} ^ {M} \left| \sum_ {n = 1} ^ {N} q _ {n} ^ {(m)} \left(\frac {\mathcal {I} + \mathrm {e} ^ {\mathcal {I} \phi_ {n} ^ {(m)}}}{2}\right) f _ {n} ^ {\prime (0, m)} \right| ^ {2} \tag {38}
343
+ $$
344
+
345
+ $$
346
+ \text {s . t .} \phi_ {n} ^ {(m)} \in [ 0, 2 \pi), n = 1, 2, \dots , N, \tag {39}
347
+ $$
348
+
349
+ for $m = 1,2,\dots ,M$ . Finally, the closed-form solution for $\mathcal{P}3$ can be expressed as
350
+
351
+ $$
352
+ \begin{array}{l} \phi_ {n} ^ {(m)} = \angle \left(\sum_ {n = 1} ^ {N} q _ {n} ^ {(m)} f _ {n} ^ {\prime (0, m)}\right) + \frac {\pi}{2} \\ - \left(\angle q _ {n} ^ {(m)} + \angle f _ {n} ^ {\prime (0, m)}\right), n = 1, 2, \dots , N. \tag {40} \\ \end{array}
353
+ $$
354
+
355
+ # B. Digital Beamformer
356
+
357
+ Based on the holographic beamformer in (40), the baseband channel between the serving satellite and the terrestrial user can be formulated as
358
+
359
+ $$
360
+ \mathbf {h} ^ {(0)} = \left[ \begin{array}{c} \frac {1}{2} \left(\left| \sum_ {n = 1} ^ {N} q _ {n} ^ {(1)} f _ {n} ^ {\prime (0, 1)} \right| + \sum_ {n = 1} ^ {N} \left| q _ {n} ^ {(1)} \right| \left| f _ {n} ^ {\prime (0, 1)} \right|\right) \\ \frac {1}{2} \left(\left| \sum_ {n = 1} ^ {N} q _ {n} ^ {(2)} f _ {n} ^ {\prime (0, 2)} \right| + \sum_ {n = 1} ^ {N} \left| q _ {n} ^ {(2)} \right| \left| f _ {n} ^ {\prime (0, 2)} \right|\right) \\ \vdots \\ \frac {1}{2} \left(\left| \sum_ {n = 1} ^ {N} q _ {n} ^ {(M)} f _ {n} ^ {\prime (0, M)} \right| + \sum_ {n = 1} ^ {N} \left| q _ {n} ^ {(M)} \right| \left| f _ {n} ^ {\prime (0, M)} \right|\right) \end{array} \right]. \tag {41}
361
+ $$
362
+
363
+ By contrast, the baseband channel between the interfering satellites and the terrestrial user can be characterized by:
364
+
365
+ $$
366
+ \mathbf {h} ^ {(l)} = \left[ \begin{array}{c} \frac {1}{2} \left(\jmath \sum_ {n = 1} ^ {N} q _ {n} ^ {(1)} f _ {n} ^ {\prime (l, 1)} + \sum_ {n = 1} ^ {N} q _ {n} ^ {(1)} f _ {n} ^ {\prime (l, 1)} \mathrm {e} ^ {\jmath \phi_ {n} ^ {(1)}}\right) \\ \frac {1}{2} \left(\jmath \sum_ {n = 1} ^ {N} q _ {n} ^ {(2)} f _ {n} ^ {\prime (l, 2)} + \sum_ {n = 1} ^ {N} q _ {n} ^ {(2)} f _ {n} ^ {\prime (l, 2)} \mathrm {e} ^ {\jmath \phi_ {n} ^ {(2)}}\right) \\ \vdots \\ \frac {1}{2} \left(\jmath \sum_ {n = 1} ^ {N} q _ {n} ^ {(M)} f _ {n} ^ {\prime (l, M)} + \sum_ {n = 1} ^ {N} q _ {n} ^ {(M)} f _ {n} ^ {\prime (l, M)} \mathrm {e} ^ {\jmath \phi_ {n} ^ {(M)}}\right) \end{array} \right], \tag {42}
367
+ $$
368
+
369
+ for $l = 1,2,\dots ,|\mathcal{A}| - 1$ with $\phi_n^{(m)}$ optimized in (40).
370
+
371
+ To design the digital beamformer, we employ the MMSE combining methods based on the required amount of CSI as follows.
372
+
373
+ 1) MMSE RC method based on full CSI: Firstly, we focus our attention on the case of the MMSE RC based on the full CSI, assuming that the terrestrial user can acquire the CSI from both the serving satellite and all the interfering satellites.
374
+
375
+ Therefore, the MMSE RC vector based on the full CSI can be designed as follows:
376
+
377
+ $$
378
+ \mathbf {v} _ {\mathrm {f}} = \left(\mathbf {h} ^ {(0)} \mathbf {h} ^ {(0) \mathrm {H}} + \mathbf {U}\right) ^ {- 1} \mathbf {h} ^ {(0)}, \tag {43}
379
+ $$
380
+
381
+ which leads to the attainable throughput of
382
+
383
+ $$
384
+ \begin{array}{l} R = P _ {\mathrm {S}} \cdot \log_ {2} \left(1 + \frac {\left| \mathbf {v} _ {\mathrm {f}} ^ {\mathrm {H}} \mathbf {h} ^ {(0)} \right| ^ {2}}{\mathbf {v} _ {\mathrm {f}} ^ {\mathrm {H}} \mathbf {U} \mathbf {v} _ {\mathrm {f}}}\right) \\ = P _ {\mathrm {S}} \cdot \log_ {2} \left(1 + \mathbf {h} ^ {(0) \mathrm {H}} \mathbf {U} ^ {- 1} \mathbf {h} ^ {(0)}\right), \tag {44} \\ \end{array}
385
+ $$
386
+
387
+ where $\mathbf{U} = \sum_{\mathbf{p}_l\in \Omega '}\mathbf{h}^{(l)}\mathbf{h}^{(l)\mathrm{H}} + \frac{\sigma_w^2}{\rho}\mathbf{A}\mathbf{C}\mathbf{C}^{\mathrm{H}}\mathbf{A}^{\mathrm{H}}.$
388
+
389
+ 2) MMSE RC method based on the distribution of satellites: In practical systems, acquiring the full CSI of all interfering satellites is infeasible. Hence, we propose an MMSE RC method based on the distribution of the satellite constellation by harnessing stochastic geometry. More explicitly, our MMSE RC method utilizes only the average number and spatial distribution of interfering satellites within the visible region $\Omega'$ as statistical information. Notably, it does not rely on the full CSI $\mathbf{h}^{(1)},\mathbf{h}^{(2)},\dots ,\mathbf{h}^{(|A| - 1)}$ , or on the precise positional details such as azimuth and elevation angles of individual interfering satellites. By focusing on the distributional characteristics of visible interfering satellites, this approach reduces the complexity of channel estimation. Specifically, the MMSE RC vector based on the statistical information can be formulated as
390
+
391
+ $$
392
+ \mathbf {v} _ {\mathrm {s}} = \left(\mathbf {h} ^ {(0)} \mathbf {h} ^ {(0) \mathrm {H}} + \mathbf {R} _ {\mathrm {I}} ^ {\prime} + \frac {\sigma_ {w} ^ {2}}{\rho} \mathbf {A C C} ^ {\mathrm {H}} \mathbf {A} ^ {\mathrm {H}}\right) ^ {- 1} \mathbf {h} ^ {(0)}, \tag {45}
393
+ $$
394
+
395
+ where $\mathbf{R}_{\mathrm{I}}^{\prime}$ represents the covariance matrix of the baseband channels between all interfering satellites in the visible shell $\Omega^{\prime}$ and the terrestrial user. Explicitly, $\mathbf{R}_{\mathrm{I}}^{\prime} = \mathbb{E}[\sum_{\mathbf{p}_{l}\in \Omega^{\prime}}\mathbf{h}^{(l)}\mathbf{h}^{(l)\mathrm{H}}]$ .
396
+
397
+ Theorem 3. The covariance matrix of the baseband channels spanning from all interfering satellites in the visible domain $\Omega^{\prime}$ to the terrestrial user can be expressed as
398
+
399
+ $$
400
+ \begin{array}{l} \mathbf {R} _ {\mathrm {I}} ^ {\prime} = \varsigma \zeta \left(\frac {\lambda}{4 \pi}\right) ^ {2} (| \mathcal {A} | - 1) P _ {\mathrm {I}} \mathcal {L} (d _ {0}) \cdot \\ \frac {\mathbf {Q} \left(\mathbf {C C} ^ {\mathrm {H}} + \left(\mathbf {C C} ^ {\mathrm {H}}\right) \odot \mathbf {I} _ {M N}\right) \mathbf {Q} ^ {\mathrm {H}}}{4}, \tag {46} \\ \end{array}
401
+ $$
402
+
403
+ where $\mathbf{Q} \in \mathbb{C}^{M \times MN}$ is formulated as
404
+
405
+ $$
406
+ \mathbf {Q} = \left[ \begin{array}{c c c c} \mathbf {q} ^ {(1) \mathrm {T}} & \mathbf {0} _ {N} ^ {\mathrm {T}} & \dots & \mathbf {0} _ {N} ^ {\mathrm {T}} \\ \mathbf {0} _ {N} ^ {\mathrm {T}} & \mathbf {q} ^ {(2) \mathrm {T}} & \dots & \mathbf {0} _ {N} ^ {\mathrm {T}} \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf {0} _ {N} ^ {\mathrm {T}} & \mathbf {0} _ {N} ^ {\mathrm {T}} & \dots & \mathbf {q} ^ {(M) \mathrm {T}} \end{array} \right], \tag {47}
407
+ $$
408
+
409
+ and $\mathcal{L}(r_0)$ denotes the average small scale fading of the link spanning from each interfering satellite located in the visible shell $\Omega^{\prime}$ to the terrestrial user, given the distance $D_0 = d_0$ from the serving satellite to the terrestrial terminal. If $\alpha = 2$ ,
410
+
411
+ the value of $\mathcal{L}(d_0)$ is given by
412
+
413
+ $$
414
+ \mathcal {L} \left(d _ {0}\right) = \left\{ \begin{array}{l l} \frac {\pi}{2 R _ {\mathrm {e}}} d _ {0} ^ {2} + 2 \pi d _ {0} - \frac {\pi \tilde {H} _ {1} ^ {2}}{R _ {\mathrm {e}}} \ln d _ {0}, & d \in \left[ H _ {1}, \tilde {H} _ {1}\right) \\ 2 \pi d _ {0}, & d \in \left[ \tilde {H} _ {1}, H _ {2}\right), \\ - \frac {\pi}{2 R _ {\mathrm {e}}} + \frac {\pi}{R _ {\mathrm {e}}} \tilde {H} _ {2} ^ {2} \ln d _ {0}, & d \in \left[ H _ {2}, \tilde {H} _ {2}\right) \end{array} \right. \tag {48}
415
+ $$
416
+
417
+ when $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} \leq H_2$ , and
418
+
419
+ $$
420
+ \mathcal {L} \left(d _ {0}\right) = \left\{ \begin{array}{l l} \frac {\pi}{2 R _ {\mathrm {e}}} d _ {0} ^ {2} + 2 \pi d _ {0} - \frac {\pi \tilde {H} _ {1} ^ {2}}{R _ {\mathrm {e}}} \ln d _ {0}, & d \in \left[ H _ {1}, H _ {2}\right) \\ \frac {\pi}{R _ {\mathrm {e}}} \left(\tilde {H} _ {2} ^ {2} - \tilde {H} _ {1} ^ {2}\right) \ln d _ {0}, & d \in \left[ H _ {2}, \tilde {H} _ {1}\right), \\ - \frac {\pi}{2 R _ {\mathrm {e}}} + \frac {\pi}{R _ {\mathrm {e}}} \tilde {H} _ {2} ^ {2} \ln d _ {0}, & d \in \left[ \tilde {H} _ {1}, \tilde {H} _ {2}\right) \end{array} \right. \tag {49}
421
+ $$
422
+
423
+ otherwise. Furthermore, if $\alpha = 3$ , the value of $\mathcal{L}(d_0)$ is given by
424
+
425
+ $$
426
+ \mathcal {L} \left(d _ {0}\right) = \left\{ \begin{array}{l l} \frac {\pi}{R _ {\mathrm {e}}} d _ {0} + 2 \pi \ln d _ {0} + \frac {\pi \tilde {H} _ {1} ^ {2}}{R _ {\mathrm {e}}} \frac {1}{d _ {0}}, & d \in \left[ H _ {1}, \tilde {H} _ {1}\right) \\ 2 \pi \ln d _ {0}, & d \in \left[ \tilde {H} _ {1}, H _ {2}\right), \\ - \frac {\pi}{R _ {\mathrm {e}}} d _ {0} - \frac {\pi}{R _ {\mathrm {e}}} \tilde {H} _ {2} ^ {2} \frac {1}{d _ {0}}, & d \in \left[ H _ {2}, \tilde {H} _ {2}\right) \end{array} \right. \tag {50}
427
+ $$
428
+
429
+ when $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} \leq H_2$ , and
430
+
431
+ $$
432
+ \mathcal {L} \left(d _ {0}\right) = \left\{ \begin{array}{l l} \frac {\pi}{R _ {\mathrm {e}}} d _ {0} + 2 \pi \ln d _ {0} + \frac {\pi \tilde {H} _ {1} ^ {2}}{R _ {\mathrm {e}}} \frac {1}{d _ {0}}, & d \in \left[ H _ {1}, H _ {2}\right) \\ - \frac {\pi}{R _ {\mathrm {e}}} \left(\tilde {H} _ {2} ^ {2} - \tilde {H} _ {1} ^ {2}\right) \frac {1}{d _ {0}}, & d \in \left[ H _ {2}, \tilde {H} _ {1}\right) \\ - \frac {\pi}{R _ {\mathrm {e}}} d _ {0} - \frac {\pi}{R _ {\mathrm {e}}} \tilde {H} _ {2} ^ {2} \frac {1}{d _ {0}}, & d \in \left[ \tilde {H} _ {1}, \tilde {H} _ {2}\right) \end{array} , \right. \tag {51}
433
+ $$
434
+
435
+ otherwise. Finally, if $\alpha = 4$ , the value of $\mathcal{L}(d_0)$ is given by
436
+
437
+ $$
438
+ \mathcal {L} \left(d _ {0}\right) = \left\{ \begin{array}{l l} \frac {\pi}{R _ {\mathrm {e}}} \ln d _ {0} - 2 \pi \frac {1}{d _ {0}} + \frac {\pi \tilde {H} _ {1} ^ {2}}{2 R _ {\mathrm {e}}} \frac {1}{d _ {0} ^ {2}}, & d \in \left[ H _ {1}, \tilde {H} _ {1}\right) \\ - 2 \pi \frac {1}{d _ {0}}, & d \in \left[ \tilde {H} _ {1}, H _ {2}\right) \\ - \frac {\pi}{R _ {\mathrm {e}}} \ln d _ {0} - \frac {\pi}{2 R _ {\mathrm {e}}} \tilde {H} _ {2} ^ {2} \frac {1}{d _ {0} ^ {2}}, & d \in \left[ H _ {2}, \tilde {H} _ {2}\right) \end{array} , \right. \tag {52}
439
+ $$
440
+
441
+ when $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} \leq H_2$ , and
442
+
443
+ $$
444
+ \mathcal {L} \left(d _ {0}\right) = \left\{ \begin{array}{l l} \frac {\pi}{R _ {\mathrm {e}}} \ln d _ {0} - 2 \pi \frac {1}{d _ {0}} + \frac {\pi \tilde {H} _ {1} ^ {2}}{2 R _ {\mathrm {e}}} \frac {1}{d _ {0} ^ {2}}, & d \in \left[ H _ {1}, H _ {2}\right) \\ - \frac {\pi}{2 R _ {\mathrm {e}}} \left(\tilde {H} _ {2} ^ {2} - \tilde {H} _ {1} ^ {2}\right) \frac {1}{d _ {0} ^ {2}}, & d \in \left[ H _ {2}, \tilde {H} _ {1}\right), \\ - \frac {\pi}{R _ {\mathrm {e}}} \ln d _ {0} - \frac {\pi}{2 R _ {\mathrm {e}}} \tilde {H} _ {2} ^ {2} \frac {1}{d _ {0} ^ {2}}, & d \in \left[ \tilde {H} _ {1}, \tilde {H} _ {2}\right) \end{array} \right. \tag {53}
445
+ $$
446
+
447
+ otherwise. If $\alpha \neq 2,3,4$ , the value of $\mathcal{L}(d_0)$ is given by
448
+
449
+ $$
450
+ \mathcal {L} \left(d _ {0}\right) = \left\{ \begin{array}{c c} \frac {\pi}{(4 - \alpha) R _ {\mathrm {e}}} d _ {0} ^ {4 - \alpha} + \frac {2 \pi}{3 - \alpha} d _ {0} ^ {3 - \alpha} & \\ - \frac {\pi \tilde {H} _ {1} ^ {2}}{(2 - \alpha) R _ {\mathrm {e}}} d _ {0} ^ {2 - \alpha}, & d \in \left[ H _ {1}, \tilde {H} _ {1}\right) \\ \frac {2 \pi}{3 - \alpha} d _ {0} ^ {3 - \alpha}, & d \in \left[ \tilde {H} _ {1}, H _ {2}\right), \\ - \frac {\pi}{(4 - \alpha) R _ {\mathrm {e}}} d _ {0} ^ {4 - \alpha} & \\ + \frac {\pi \tilde {H} _ {2} ^ {2}}{(2 - \alpha) R _ {\mathrm {e}}} d _ {0} ^ {2 - \alpha}, & d \in \left[ H _ {2}, \tilde {H} _ {2}\right) \end{array} \right. \tag {54}
451
+ $$
452
+
453
+ when $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} \leq H_2$ , and
454
+
455
+ $$
456
+ \mathcal {L} \left(d _ {0}\right) = \left\{ \begin{array}{c c} \frac {\pi}{(4 - \alpha) R _ {\mathrm {e}}} d _ {0} ^ {4 - \alpha} + \frac {2 \pi}{3 - \alpha} d _ {0} ^ {3 - \alpha} & \\ - \frac {\pi \tilde {H} _ {1} ^ {2}}{(2 - \alpha) R _ {\mathrm {e}}} d _ {0} ^ {2 - \alpha}, & d \in [ H _ {1}, H _ {2}) \\ \frac {\pi}{(2 - \alpha) R _ {\mathrm {e}}} \left(\tilde {H} _ {2} ^ {2} - \tilde {H} _ {1} ^ {2}\right) d _ {0} ^ {2 - \alpha}, & d \in [ H _ {2}, \tilde {H} _ {1}) \\ - \frac {\pi}{(4 - \alpha) R _ {\mathrm {e}}} d _ {0} ^ {4 - \alpha} & \\ + \frac {\pi \tilde {H} _ {2} ^ {2}}{(2 - \alpha) R _ {\mathrm {e}}} d _ {0} ^ {2 - \alpha}, & d \in [ \tilde {H} _ {1}, \tilde {H} _ {2}) \end{array} , \right. \tag {55}
457
+ $$
458
+
459
+ otherwise.
460
+
461
+ Proof: See Appendix C.
462
+
463
+ Given the satellite distribution, the throughput of the hybrid beamforming method can be formulated as in (56).
464
+
465
+ # C. Computational Complexity of the RC Methods
466
+
467
+ The computational complexity of the MMSE RC methods can be quantified in terms of the calculation of $\mathbf{v}_{\mathrm{f}}^{\mathrm{H}}\mathbf{y}$ or $\mathbf{v}_{\mathrm{s}}^{\mathrm{H}}\mathbf{y}$ for every received signal $\mathbf{y}$ at the terrestrial user. The complexity of additions and subtractions is neglected, since it is considerably lower. Hence, we quantify the complexity by counting the number of floating-point multiplication and division operations required for the calculation of $\mathbf{v}_{\mathrm{f}}^{\mathrm{H}}\mathbf{y}$ or $\mathbf{v}_{\mathrm{s}}^{\mathrm{H}}\mathbf{y}$ .
468
+
469
+ The complexity of the MMSE RC is presented in Table II, where $\tau$ denotes the number of information symbols within each coherence interval. Specifically, in the MMSE RC method based on the full CSI, the computational complexity includes the calculation of the combining vector $\mathbf{v}_{\mathrm{f}}$ in (43) and that of the information recovery $\mathbf{v}_{\mathrm{f}}^{\mathrm{H}}\mathbf{y}$ . Specifically, in terms of $\mathbf{v}_{\mathrm{f}}$ , the calculation of $\mathbf{h}^{(0)}\mathbf{h}^{(0)\mathrm{H}} + \sum_{\mathbf{p}_l\in \Omega '}\mathbf{h}^{(l)}\mathbf{h}^{(l)\mathrm{H}}$ requires an average number of $[(|\mathrm{A}| - 1)P_{\mathrm{I}} + 1]M^{2}$ floating-point multiplications, while the complexity of calculating $\frac{\sigma_w^2}{\rho}\mathbf{ACCH}^{\mathrm{H}}\mathbf{A}^{\mathrm{H}}$ can be ignored, since it remains unchanged in each statistical block. Furthermore, the inverse of the matrix $\mathbf{h}^{(0)}\mathbf{h}^{(0)\mathrm{H}} + \sum_{\mathbf{p}_l\in \Omega '}\mathbf{h}^{(l)}\mathbf{h}^{(l)\mathrm{H}} + \frac{\sigma_w^2}{\rho}\mathbf{ACCH}^{\mathrm{H}}\mathbf{A}^{\mathrm{H}}$ and the multiplication with the vector $\mathbf{h}^{(0)}$ require $M^2$ number of floating-point multiplications and $M$ floating-point divisions by utilizing the LDL decomposition [44]. In terms of the information recovery of $\mathbf{v}_{\mathrm{f}}^{\mathrm{H}}\mathbf{y}$ , we require $M$ floating-point multiplications. By contrast, in the MMSE combining method based on the statistical CSI, the computational complexity includes the calculation of the combining vector $\mathbf{v}_{\mathrm{s}}$ in (45) and that of the information recovery $\mathbf{v}_{\mathrm{s}}^{\mathrm{H}}\mathbf{y}$ . Specifically, in terms of $\mathbf{v}_{\mathrm{s}}$ , the calculation of $\mathbf{h}^{(0)}\mathbf{h}^{(0)\mathrm{H}}$ requires the average number of $M^2$ floating-point multiplications, while the computational complexity of calculating $\mathbf{R}_{\mathrm{I}}'$ and $\frac{\sigma_w^2}{\rho}\mathbf{ACCH}^{\mathrm{H}}\mathbf{A}^{\mathrm{H}}$ can be readily ignored, since they remain unchanged in each statistical block. Furthermore, the inverse of the matrix $\mathbf{h}^{(0)}\mathbf{h}^{(0)\mathrm{H}} + \mathbf{R}_{\mathrm{I}}' + \frac{\sigma_w^2}{\rho}\mathbf{ACCH}^{\mathrm{H}}\mathbf{A}^{\mathrm{H}}$ and its multiplication with the vector $\mathbf{h}^{(0)}$ require $M^2$ number of floating-point multiplications and $M$ floating-point divisions by utilizing the LDL decomposition. In terms of the information recovery of $\mathbf{v}_{\mathrm{s}}^{\mathrm{H}}\mathbf{y}$ , we require $M$ floating-point multiplications.
470
+
471
+ In summary, the MMSE RC method based on full CSI requires a higher computational complexity due to the need for complete CSI acquired from both the serving and interfering
472
+
473
+ $$
474
+ R = P _ {\mathrm {S}} \cdot \log_ {2} \left(1 + \frac {\left| \mathbf {v} _ {\mathrm {s}} ^ {\mathrm {H}} \mathbf {h} ^ {(0)} \right| ^ {2}}{\mathbf {v} _ {\mathrm {s}} ^ {\mathrm {H}} \mathbf {U} \mathbf {v} _ {\mathrm {s}}}\right) = P _ {\mathrm {S}} \cdot \log_ {2} \left(1 + \frac {\mathbf {h} ^ {(0) \mathrm {H}} \left(\mathbf {R} _ {\mathrm {I}} ^ {\prime} + \frac {\sigma_ {w} ^ {2}}{\rho} \mathbf {A C C} ^ {\mathrm {H}} \mathbf {A} ^ {\mathrm {H}}\right) ^ {- 1} \mathbf {h} ^ {(0)}}{\mathbf {h} ^ {(0) \mathrm {H}} \left(\mathbf {R} _ {\mathrm {I}} ^ {\prime} + \frac {\sigma_ {w} ^ {2}}{\rho} \mathbf {A C C} ^ {\mathrm {H}} \mathbf {A} ^ {\mathrm {H}}\right) ^ {- 1} \mathbf {U} \left(\mathbf {R} _ {\mathrm {I}} ^ {\prime} + \frac {\sigma_ {w} ^ {2}}{\rho} \mathbf {A C C} ^ {\mathrm {H}} \mathbf {A} ^ {\mathrm {H}}\right) ^ {- 1} \mathbf {h} ^ {(0)}}\right). \tag {56}
475
+ $$
476
+
477
+ TABLE II COMPLEXITY OF THE MMSE RC METHODS BASED ON VARIOUS LEVELS OF CSI.
478
+
479
+ <table><tr><td rowspan="2">Schemes</td><td colspan="2">Calculation of combining vectors</td><td colspan="2">Information recovery</td><td colspan="2">Total calculation complexity</td></tr><tr><td>Multiplication</td><td>Division</td><td>Multiplication</td><td>Division</td><td>Multiplication</td><td>Division</td></tr><tr><td>MMSE, full CSI</td><td>((|A| - 1)P1 + 2)M2</td><td>M</td><td>M</td><td>0</td><td>((|A| - 1)P1 + 2)M2 + τM</td><td>M</td></tr><tr><td>MMSE, satellite distribution</td><td>2M2</td><td>M</td><td>M</td><td>0</td><td>2M2 + τM</td><td>M</td></tr></table>
480
+
481
+ ![](images/faac4ddfde9258492d7d1e4a61fe67d1402e8a7094d70a76e5057efe5ff14e21.jpg)
482
+ (a) Number of microstrips $M = 2$ .
483
+
484
+ ![](images/1db8d3bd4321f8e2907b7b4fc7ecd2bcfa96d4405f021c41f80bfed00d69d8b6.jpg)
485
+ (b) Number of microstrips $M = 4$ .
486
+ Fig. 3. Throughput $R$ versus the number of holographic metasurface elements in each microstrip $N$ , with the fixed holographic metasurface element spacing $\delta_N = \frac{\lambda}{4}$ in each microstrip, i.e. the physical dimension of each microstrip being $\frac{\lambda}{4} N$ .
487
+
488
+ ![](images/264e90b9d0b64c758e57d66c7dc2e0a98f7caba063eb75d378e12468121bf101.jpg)
489
+ (c) Number of microstrips $M = 6$ .
490
+
491
+ satellites, which includes calculating the full covariance matrix. By contrast, the MMSE RC method based on the satellite distribution significantly reduces complexity by relying on the statistical characteristics of the satellite constellation, thus avoiding the overhead associated with acquiring full CSI. This approach has lower computational demand, making it more feasible for dense LEO satellite deployments.
492
+
493
+ # IV. SIMULATION RESULTS
494
+
495
+ In this section, we numerically evaluate the performance of proposed beamforming methods for the holographic metasurface-based multi-altitude LEO satellite communications. We assume that the holographic metasurface elements are compactly packed, i.e. the physical size of each microstrip is $N\delta_{N}$ . In all simulations, mutual coupling is modeled based on (6), providing a realistic assessment of the element interactions under dense element configurations. Each dipole is arranged parallel to the holographic surface, ensuring uniform orientation across the planar array. This configuration maximizes surface efficiency and simplifies mutual coupling modeling by allowing a consistent application of the coupling model across the elements. The simulation parameters are similar to those in [45], [46], which are given in Table III, unless specified otherwise.
496
+
497
+ Fig. 3 compares the throughput $R$ versus the number $N$ of holographic metasurface elements in each microstrip. The
498
+
499
+ TABLE III SIMULATION PARAMETERS.
500
+
501
+ <table><tr><td>Parameters</td><td>Values</td></tr><tr><td>Carrier frequency</td><td>fc=28 GHz</td></tr><tr><td>Bandwidth</td><td>Bw=1 MHz</td></tr><tr><td>Antenna gain</td><td>ζ=50 dBi</td></tr><tr><td>Number of microstrips</td><td>M=4</td></tr><tr><td>Number of elements in each microstrip</td><td>N=8</td></tr><tr><td>Earth radius</td><td>Re=6371 km</td></tr><tr><td>Microstrip spacing</td><td>δM=λ/2</td></tr><tr><td>Holographic metasurface element spacing</td><td>δN=λ/4</td></tr><tr><td>Dipole length</td><td>δ0=δN</td></tr><tr><td>Minimum altitude of LEO satellites</td><td>H1=160 km</td></tr><tr><td>Maximum altitude of LEO satellites</td><td>H2=2000 km</td></tr><tr><td>Number of satellites</td><td>[A] = 720</td></tr><tr><td>Satellite transmit power</td><td>ρ=60 dBW</td></tr><tr><td>Path loss exponent</td><td>α=2</td></tr><tr><td>Average rain attenuation</td><td>ζ=-4.324 dB</td></tr><tr><td>Noise power</td><td>σw2=-104 dBm</td></tr><tr><td>Shadowed-Rician fading coefficients</td><td>b=0.3, v=3, ω=0.4</td></tr></table>
502
+
503
+ holographic metasurface element spacing is fixed to $\delta_N = \frac{\lambda}{4}$ in each microstrip, which means that the physical dimension of each microstrip is $\frac{\lambda}{4} N$ . The legend 'consider MC' indicates considering the effect of mutual coupling in the beamforming design, while 'ignore MC' means that the mutual coupling is present among the holographic metasurface elements but
504
+
505
+ ![](images/dc6d2bd2979458b390e7dc97462844fe6ce69dce4b072693e14260fec8403312.jpg)
506
+ (a) Number of microstrips $M = 2$ .
507
+
508
+ ![](images/b0ca5cd89e8e249499b2aea4e1cfe7ee5a57e30d6a1b48576b10307062e23817.jpg)
509
+ (b) Number of microstrips $M = 4$ .
510
+ Fig. 4. Throughput $R$ versus the number of holographic metasurface elements in each microstrip $N$ , with the fixed physical dimension of $4\lambda$ for each microstrip, i.e. for the holographic metasurface element spacing of $\delta_N = \frac{4\lambda}{N}$ .
511
+
512
+ ![](images/a4fed0faba091c5800b29e3570b5e907328bbabcf4ca7ba04181eb77954d4499.jpg)
513
+ (c) Number of microstrips $M = 6$ .
514
+
515
+ not accounted for in the beamforming designs. Observe in Fig. 3 that as expected, the throughput can be significantly improved upon considering the effect of mutual coupling in the beamforming design. Furthermore, the MMSE RC method based on the satellite distribution can achieve almost the same throughput as that based on the full CSI, even though it has a lower overhead and a lower computational complexity. Compared to the MRC method, where the RC vector is designed as $\mathbf{v} = \mathbf{h}^{(0)}$ , the MMSE RC method exhibits higher throughput since it can effectively mitigate the inter-satellite interference.
516
+
517
+ Fig. 4 portrays the throughput $R$ versus the number of holographic metasurface elements $N$ in each microstrip, with a fixed physical dimension of $4\lambda$ per microstrip. This setup implies that the holographic metasurface element spacing is $\delta_N = \frac{4\lambda}{N}$ . Fig. 4 demonstrates that increasing the number of holographic metasurface elements in each microstrip enhances the throughput. Moreover, employing more RF chains can further boost the throughput at the expense of increased hardware and energy costs. Notably, the throughput may degrade as the number of elements in each microstrip increases when the effect of mutual coupling is ignored in the beamforming design. This degradation occurs due to the intensified mutual coupling caused by the reduced spacing between metasurface elements.
518
+
519
+ To provide further insights, Fig. 5 illustrates the throughput $R$ as a function of the physical size of the microstrip, considering different holographic metasurface element spacings. Observe that the throughput can be improved by decreasing the holographic metasurface element spacing, which allows for more elements to be employed within the fixed physical size of the microstrip. Additionally, the throughput of the holographic metasurface is compared to that of the SoA antenna array architecture. In the latter, the antenna spacing is $\frac{\lambda}{2}$ and full-digital beamforming is employed. The results indicate that the holographic metasurface outperforms the SoA antenna array upon decreasing the holographic metasurface element spacing.
520
+
521
+ Fig. 6 compares the throughput $R$ versus the dipole length $\delta_0$ across various detection methods. The legend 'ideal hardware' refers to the absence of mutual coupling among holo
522
+
523
+ ![](images/f5c4696f7c8b44856edca65f98fbc4236e615f634c1a309ef01f175a772084e1.jpg)
524
+ (a) MMSE method based on the full CSI.
525
+
526
+ ![](images/960c93a532a98ffdc769e9603927ad282b85e4436be254330ed5db384b813bf7.jpg)
527
+ (b) MMSE method based on the statistical CSI.
528
+ Fig. 5. The throughput $R$ versus the physical size of the microstrip.
529
+
530
+ graphic metasurface elements, i.e., when the mutual coupling matrix obeys $\mathbf{C} = \mathbf{I}_{MN}$ . Observe that the throughput of the holographic metasurface relying on non-ideal hardware approaches that of the ideal hardware when the dipole length is small, as the side effects of mutual coupling on the system performance are reduced. By contrast, when the dipole length is long, mutual coupling significantly reduces the throughput. However, incorporating mutual coupling into the beamforming design can effectively compensate for the reduced throughput.
531
+
532
+ ![](images/72111c98ef65a5f8429bacbe49d7d8e94cfa24eb453f0ca5b3dac2f578b26de9.jpg)
533
+ Fig. 6. The throughput $R$ versus the dipole length $\delta_0$ in various beamforming methods.
534
+
535
+ ![](images/c116554c6492a78514a41b88fb711c304ddd44676d65007b1a4facf9c2ca8319.jpg)
536
+ Fig. 7. The throughput $R$ versus the number of satellites $|\mathcal{A}|$ , at different transmit power $\rho$ .
537
+
538
+ In Fig. 7, the throughput $R$ is compared against the number of satellites $|\mathcal{A}|$ for different transmit power levels $\rho$ . When the satellites are sparse, i.e., $|\mathcal{A}| < 30$ , the throughput can be improved by increasing the number of satellites. This can be attributed to the fact that when the satellites are sparse, the throughput is primarily determined by the probability $P_{\mathrm{S}}$ of the serving satellite located in the visible domain. Increasing the number of satellites enhances this probability $P_{\mathrm{S}}$ , thereby improving the throughput. However, as the number of satellites further increases, the throughput will be degraded due to the increased inter-satellite interference.
539
+
540
+ In satellite communications, the path loss exponent varies due to factors like atmospheric conditions, terrain features, and ground reflections within the propagation environment. Fig. 8 compares the signal-to-interference ratio (SIR) denoted as $\gamma_0$ , versus the path loss exponent $\alpha$ for the various beamforming methods. This shows that the throughput can be improved as the path loss exponent $\alpha$ increases, since a higher path loss exponent mitigates inter-satellite interference.
541
+
542
+ Finally, to investigate the effect of varying the altitude of the satellite constellation on the throughput, we consider the specific case of a single altitude, i.e., when $H_{1} = H_{2} = H$ . Fig. 9 compares the throughput $R$ versus the altitude of the satellite orbit $H$ for different numbers of satellites $|\mathcal{A}|$ . When the number of satellites is small, i.e. $|\mathcal{A}| = 10$ , a higher
543
+
544
+ ![](images/dd465374bc6c7ca6d0900307172a0953d0a92f581ffe0ca3d04ee99e06a29df9.jpg)
545
+ Fig. 8. Comparison of the SIR $\gamma_0$ versus the path loss exponent $\alpha$ in various beamforming methods.
546
+
547
+ ![](images/7d29b3a2e6a5f6560480d7878be1ae3228fef49f4f39e7f3110e4909f3629970.jpg)
548
+ Fig. 9. The throughput $R$ versus the altitude of satellite orbit $H$ , for 10, 30 and 300 satellites.
549
+
550
+ throughput can be attained as the altitude of the satellite constellation increases. This can be explained by the fact that when the satellite density is low, increasing the altitude improves signal coverage probability. However, when the number of satellites is moderate, i.e. $|\mathcal{A}| = 30$ , the throughput initially increases and then decreases with the altitude of the satellite constellation. Conversely, when the number of satellites is high, i.e. $|\mathcal{A}| = 300$ , the throughput consistently decreases as the altitude of the satellite constellation increases. This can be attributed to the fact that at high satellite densities, increasing the altitude exacerbates inter-satellite interference.
551
+
552
+ # V. CONCLUSIONS
553
+
554
+ In this paper, we conceived a hybrid beamforming design for holographic metasurface-based multi-altitude LEO satellite networks. Specifically, the holographic beamformer was optimized for maximizing the channel gain of the serving satellite link, while the digital beamformer was designed for mitigating the interference. To reduce the CSI acquisition overhead, we proposed the low-complexity MMSE RC scheme based on the statistical information of the LEO satellite constellation by leveraging stochastic geometry, which achieves comparable throughput to using full CSI in the case of a dense deployment of LEO satellites. Furthermore, the holographic metasurface-based hybrid beamformer can achieve higher throughput than
555
+
556
+ the full digital beamformer relying on the SoA antenna array, given the same physical size of the transceivers. The dense placement of holographic metasurface elements underscores the importance of explicitly considering the mutual coupling in the beamforming design.
557
+
558
+ # APPENDIX A PROOF OF Theorem 1
559
+
560
+ Firstly, we consider the case of $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} \leq H_2$ . We define the ball $B(d) = \{x^2 + y^2 + (z - R_{\mathrm{e}})^2 \leq d^2 | (x, y, z) \in \mathbb{R}^3\}$ , which represents the set of points whose distance from the terrestrial user is no higher than $d$ . When $d \in [0, H_1)$ , the volume of the intersection of the ball $B(d)$ and the visible area $\Omega'$ , denoted as $V_{B \cap \Omega'}(d)$ , is
561
+
562
+ $$
563
+ V _ {B \cap \Omega^ {\prime}} (d) = 0. \tag {57}
564
+ $$
565
+
566
+ When $d \in [H_1, \tilde{H}_1)$ , the volume of the intersection of the ball $B(d)$ and the visible domain $\Omega'$ is
567
+
568
+ $$
569
+ \begin{array}{l} V _ {B \cap \Omega^ {\prime}} (d) = \frac {\pi}{4 R _ {\mathrm {e}}} d ^ {4} + \frac {2 \pi}{3} d ^ {3} - \frac {\pi H _ {1} (2 R _ {\mathrm {e}} + H _ {1})}{2 R _ {\mathrm {e}}} d ^ {2} \\ + \frac {\pi H _ {1} ^ {3} \left(4 R _ {\mathrm {e}} + 3 H _ {1}\right)}{1 2 R _ {\mathrm {e}}}. \tag {58} \\ \end{array}
570
+ $$
571
+
572
+ By contrast, when $d \in [\sqrt{H_1(2R_{\mathrm{e}} + H_1)}, H_2)$ , the volume of the intersection of the ball $B(d)$ and the visible shell $\Omega'$ is
573
+
574
+ $$
575
+ V _ {B \cap \Omega^ {\prime}} (d) = \frac {2 \pi}{3} d ^ {3} - \frac {\pi H _ {1} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {1}\right)}{3}. \tag {59}
576
+ $$
577
+
578
+ For $d \in [H_2, \sqrt{H_2(2R_{\mathrm{e}} + H_2)})$ , the volume of the intersection of the ball $B(d)$ and the visible region $\Omega'$ is
579
+
580
+ $$
581
+ \begin{array}{l} V _ {B \cap \Omega^ {\prime}} (d) = - \frac {\pi}{4 R _ {\mathrm {e}}} d ^ {4} + \frac {\pi H _ {2} \left(2 R _ {\mathrm {e}} + H _ {2}\right)}{2 R _ {\mathrm {e}}} d ^ {2} \\ - \frac {\pi \left(H _ {2} ^ {3} \left(4 R _ {\mathrm {e}} + 3 H _ {2}\right) + 4 R _ {\mathrm {e}} H _ {1} ^ {2} \left(3 R _ {\mathrm {e}} + 2 H _ {1}\right)\right)}{1 2 R _ {\mathrm {e}}} \tag {60} \\ \end{array}
582
+ $$
583
+
584
+ Finally, when $d \in [\sqrt{H_2(2R_{\mathrm{e}} + H_2)}, \infty)$ , the volume of the intersection of the ball $B(d)$ and the visible region $\Omega'$ is
585
+
586
+ $$
587
+ V _ {B \cap \Omega^ {\prime}} (d) = V ^ {\prime}. \tag {61}
588
+ $$
589
+
590
+ According to (57), (58), (59), (60) and (61), the CDF of the distance from any specific one of the satellite in the visible region $\Omega'$ to the terrestrial user is
591
+
592
+ $$
593
+ F _ {D ^ {\prime}} (d) = \frac {V _ {B \cap \Omega^ {\prime}} (d)}{V ^ {\prime}}, \tag {62}
594
+ $$
595
+
596
+ as shown in (12).
597
+
598
+ By contrast, when $\sqrt{H_1(2R_{\mathrm{e}} + H_1)} > H_2$ , the CDF of the distance from any specific satellite in the visible region to the terrestrial user can be derived as shown in (13).
599
+
600
+ # APPENDIX B PROOF OF Theorem 2
601
+
602
+ Given the distance between the serving satellite and the terrestrial user $D_0 = d_0$ , the value of $f_{D_1^{\prime}|D_0}(d|d_0)$ can be derived as
603
+
604
+ $$
605
+ f _ {D _ {1} ^ {\prime} \mid D _ {0}} (d \mid d _ {0}) = \frac {f _ {D ^ {\prime}} (d)}{1 - F _ {D ^ {\prime}} (d _ {0})}. \tag {63}
606
+ $$
607
+
608
+ Substituting (14) and (15) into (63), we can arrive at $f_{D_1'|D_0}(d|d_0)$ as shown in (22) and (23).
609
+
610
+ # APPENDIX C PROOF OF Theorem 3
611
+
612
+ According to (2), (3), (4) and (42), $\mathbb{E}[\mathbf{h}^{(l)}\mathbf{h}^{(l)\mathrm{H}}]$ can be expressed as
613
+
614
+ $$
615
+ \begin{array}{l} \mathbb {E} \left[ \mathbf {h} ^ {(l)} \mathbf {h} ^ {(l) \mathrm {H}} \right] = \varsigma \zeta \left(\frac {\lambda}{4 \pi}\right) ^ {2} \mathcal {L} (d _ {0}) \cdot \\ \frac {\mathbf {Q} \left(\mathbf {C C} ^ {\mathrm {H}} + \left(\mathbf {C C} ^ {\mathrm {H}}\right) \odot \mathbf {I} _ {M N}\right) \mathbf {Q} ^ {\mathrm {H}}}{4}, \tag {64} \\ \end{array}
616
+ $$
617
+
618
+ where $\mathcal{L}(d_0) = \mathbb{E}\left[d_0^{-\alpha}\right]$ can be further represented as shown in (48), (49), (50), (51), (52), (53), (54) and (55) by substituting $d = d_0$ into (14) and (15). Furthermore, the average number of interfering satellites located in the visible region, denoted as $|\mathcal{A}^{\prime}|$ , is
619
+
620
+ $$
621
+ \left| \mathcal {A} ^ {\prime} \right| = \left(\left| \mathcal {A} \right| - 1\right) P _ {1}. \tag {65}
622
+ $$
623
+
624
+ According to (64) and (65), we have:
625
+
626
+ $$
627
+ \begin{array}{l} \mathbf {R} _ {\mathrm {I}} ^ {\prime} = \mathbb {E} \left[ \sum_ {\mathbf {p} _ {l} \in \Omega^ {\prime}} \mathbf {h} ^ {(l)} \mathbf {h} ^ {(l) \mathrm {H}} \right] \\ = \left| \mathcal {A} ^ {\prime} \right| \cdot \mathbb {E} \left[ \mathbf {h} ^ {(l)} \mathbf {h} ^ {(l) \mathrm {H}} \right] \\ = \varsigma \zeta \left(\frac {\lambda}{4 \pi}\right) ^ {2} (| \mathcal {A} | - 1) P _ {\mathrm {I}} \mathcal {L} \left(d _ {0}\right). \\ \frac {\mathbf {Q} \left(\mathbf {C C} ^ {\mathrm {H}} + \left(\mathbf {C C} ^ {\mathrm {H}}\right) \odot \mathbf {I} _ {M N}\right) \mathbf {Q} ^ {\mathrm {H}}}{4}. \tag {66} \\ \end{array}
628
+ $$
629
+
630
+ # REFERENCES
631
+
632
+ [1] K.-C. Tsai, L. Fan, R. Lent, L.-C. Wang, and Z. Han, "Distributionally robust optimal routing for integrated satellite-terrestrial networks under uncertainty," IEEE Trans. Commun., vol. 72, no. 10, pp. 6401-6405, 2024.
633
+ [2] Z. Xiang, X. Gao, K.-X. Li, and X.-G. Xia, "Massive MIMO downlink transmission for multiple LEO satellite communication," IEEE Trans. Commun., vol. 72, no. 6, pp. 3352-3364, 2024.
634
+ [3] X. Cao, B. Yang, Y. Shen, C. Yuen, Y. Zhang, Z. Han, H. V. Poor, and L. Hanzo, "Edge-assisted multi-layer offloading optimization of LEO satellite-terrestrial integrated networks," IEEE J. Sel. Areas Commun., vol. 41, no. 2, pp. 381-398, 2022.
635
+ [4] R. Wang, M. A. Kishk, and M.-S. Alouini, "Ultra reliable low latency routing in LEO satellite constellations: A stochastic geometry approach," IEEE J. Sel. Areas Commun., vol. 42, no. 5, pp. 1231-1245, 2024.
636
+ [5] G. Zeng, Y. Zhan, and X. Xiao, "Multi-agent deep reinforcement learning based channel allocation for MEO-LEO networked telemetry system," IEEE Internet Things J., vol. 11, no. 6, pp. 1081-10830, 2024.
637
+ [6] C. Jiang and X. Zhu, "Reinforcement learning based capacity management in multi-layer satellite networks," IEEE Tran. Wireless Commun., vol. 19, no. 7, pp. 4685-4699, 2020.
638
+ [7] K.-X. Li, L. You, J. Wang, X. Gao, C. G. Tsinos, S. Chatzinotas, and B. Ottersten, "Downlink transmit design for massive MIMO LEO satellite communications," IEEE Trans. Commun., vol. 70, no. 2, pp. 1014-1028, 2021.
639
+ [8] L. You, X. Qiang, K.-X. Li, C. G. Tsinos, W. Wang, X. Gao, and B. Ottersten, "Hybrid analog/digital precoding for downlink massive MIMO LEO satellite communications," IEEE Trans. Wireless Commun., vol. 21, no. 8, pp. 5962-5976, 2022.
640
+ [9] ——, "Massive MIMO hybrid precoding for LEO satellite communications with twin-resolution phase shifters and nonlinear power amplifiers," IEEE Trans. Commun., vol. 70, no. 8, pp. 5543-5557, 2022.
641
+ [10] Y. Huang, L. You, C. G. Tsinos, W. Wang, and X. Gao, “QoS-aware precoding in downlink massive MIMO LEO satellite communications,” IEEE Commun. Lett., vol. 27, no. 6, pp. 1560–1564, 2023.
642
+
643
+ [11] Y. Liu, Y. Wang, J. Wang, L. You, W. Wang, and X. Gao, "Robust downlink precoding for LEO satellite systems with per-antenna power constraints," IEEE Trans. Veh. Technol., vol. 71, no. 10, pp. 10694-10711, 2022.
644
+ [12] C. Huang, S. Hu, G. C. Alexandropoulos, A. Zappone, C. Yuen, R. Zhang, M. Di Renzo, and M. Debbah, “Holographic MIMO surfaces for 6G wireless networks: Opportunities, challenges, and trends,” IEEE Wireless Commun., vol. 27, no. 5, pp. 118–125, 2020.
645
+ [13] T. Gong, P. Gavrilidis, R. Ji, C. Huang, G. C. Alexandropoulos, L. Wei, Z. Zhang, M. Debbah, H. V. Poor, and C. Yuen, "Holographic MIMO communications: Theoretical foundations, enabling technologies, and future directions," IEEE Commun. Surv. Tutor., vol. 26, no. 1, pp. 196-257, 2024.
646
+ [14] Q. Li, M. El-Hajjar, I. Hemadeh, D. Jagyasi, A. Shojaeifard, E. Basar, and L. Hanzo, “The reconfigurable intelligent surface-aided multi-node IoT downlink: Beamforming design and performance analysis,” IEEE Internet Things J., vol. 10, no. 7, pp. 6400–6414, 2022.
647
+ [15] Q. Li, M. El-Hajjar, I. Hemadeh, A. Shojaeifard, A. A. Mourad, and L. Hanzo, "Reconfigurable intelligent surface aided amplitude-and phase-modulated downlink transmission," IEEE Trans. Veh. Technol., vol. 72, no. 6, pp. 8146-8151, 2023.
648
+ [16] I. Yoo and D. R. Smith, "Sub-6-GHz uplink massive MIMO system using holographic beamforming metasurfaces: A conceptual development," IEEE Wireless Commun. Lett., vol. 12, no. 4, pp. 644-648, 2023.
649
+ [17] R. Deng, Y. Zhang, H. Zhang, B. Di, H. Zhang, and L. Song, "Reconfigurable holographic surface: A new paradigm to implement holographic radio," IEEE Veh. Technol. Mag., vol. 18, no. 1, pp. 20-28, 2023.
650
+ [18] Y. Sun, Z. Lin, K. An, D. Li, C. Li, Y. Zhu, D. W. K. Ng, N. Al-Dhahir, and J. Wang, "Multi-functional RIS-assisted semantic anti-jamming communication and computing in integrated aerial-ground networks," IEEE J. Sel. Areas Commun., blue.
651
+ [19] K. An, Y. Sun, Z. Lin, Y. Zhu, W. Ni, N. Al-Dhahir, K.-K. Wong, and D. Niyato, "Exploiting multi-layer refracting RIS-assisted receiver for HAP-SWIPT networks," IEEE Trans. Wireless Commun., vol. 23, no. 10, pp. 12638-12657, 2024.
652
+ [20] Y. Sun, Y. Zhu, K. An, Z. Lin, C. Li, D. W. K. Ng, and J. Wang, "Active-passive cascaded RIS-aided receiver design for jamming nulling and signal enhancing," IEEE Trans. Wireless Commun., vol. 23, no. 6, pp. 5345-5362, 2024.
653
+ [21] R. Deng, B. Di, H. Zhang, H. V. Poor, and L. Song, "Holographic MIMO for LEO satellite communications aided by reconfigurable holographic surfaces," IEEE J. Sel. Areas Commun., vol. 40, no. 10, pp. 3071-3085, 2022.
654
+ [22] X. Hu, R. Deng, B. Di, H. Zhang, and L. Song, “Holographic beamforming for LEO satellites,” IEEE Commun. Lett., vol. 27, no. 10, pp. 2717–2721, 2023.
655
+ [23] S. Lin, J. An, L. Gan, M. Debbah, and C. Yuen, "Stacked intelligent metasurface enabled LEO satellite communications relying on statistical CSI," IEEE Wireless Commun. Lett., vol. 13, no. 5, pp. 1295-1299, 2024.
656
+ [24] N. Okati, T. Riihonen, D. Korpi, I. Angervuori, and R. Wichman, "Downlink coverage and rate analysis of low earth orbit satellite constellations using stochastic geometry," IEEE Trans. Commun., vol. 68, no. 8, pp. 5120-5134, 2020.
657
+ [25] D.-H. Jung, J.-G. Ryu, W.-J. Byun, and J. Choi, “Performance analysis of satellite communication system under the shadowed-rician fading: A stochastic geometry approach,” IEEE Trans. Commun., vol. 70, no. 4, pp. 2707–2721, 2022.
658
+ [26] J. Park, J. Choi, and N. Lee, "A tractable approach to coverage analysis in downlink satellite networks," IEEE Trans. Wireless Commun., vol. 22, no. 2, pp. 793-807, 2022.
659
+ [27] Y. Sun and Z. Ding, "A fine grained stochastic geometry-based analysis on LEO satellite communication systems," IEEE Netw. Lett., vol. 5, no. 4, pp. 237-240, 2023.
660
+
661
+ [28] C.-S. Choi, "Modeling and analysis of downlink communications in a heterogeneous LEO satellite network," IEEE Trans. Wireless Commun., vol. 23, no. 8, pp. 8588-8602, 2024.
662
+ [29] N. Okati and T. Riihonen, “Nonhomogeneous stochastic geometry analysis of massive LEO communication constellations,” IEEE Trans. Commun., vol. 70, no. 3, pp. 1848–1860, 2022.
663
+ [30] X. Hu, B. Lin, X. Lu, P. Wang, N. Cheng, Z. Yin, and W. Zhuang, "Performance analysis of end-to-end LEO satellite-aided shore-to-ship communications: A stochastic geometry approach," IEEE Trans. Wireless Commun., vol. 23, no. 9, pp. 11753-11769, 2024.
664
+ [31] N. Okati and T. Riihonen, "Stochastic coverage analysis for multialtitude LEO satellite networks," IEEE Commun. Lett., vol. 27, no. 12, pp. 3305-3309, 2023.
665
+ [32] C.-S. Choi et al., "Cox point processes for multi altitude LEO satellite networks," IEEE Trans. Veh. Technol., vol. 73, no. 10, pp. 15916-15921, 2024.
666
+ [33] X. Zhang, S. Sun, M. Tao, Q. Huang, and X. Tang, "Multi-satellite cooperative networks: Joint hybrid beamforming and user scheduling design," IEEE Trans. Wireless Commun., vol. 23, no. 7, pp. 7938-7952, 2024.
667
+ [34] J. Xu, L. You, G. C. Alexandropoulos, X. Yi, W. Wang, and X. Gao, "Near-field wideband extremely large-scale MIMO transmissions with holographic metasurface-based antenna arrays," IEEE Trans. Wireless Commun., vol. 23, no. 9, pp. 12054-12067, 2024.
668
+ [35] J. An, C. Xu, D. W. K. Ng, G. C. Alexandropoulos, C. Huang, C. Yuen, and L. Hanzo, "Stacked intelligent metasurfaces for efficient holographic MIMO communications in 6G," IEEE J. Sel. Areas Commun., vol. 41, no. 8, pp. 2380-2396, 2023.
669
+ [36] J. An, M. Di Renzo, M. Debbah, H. V. Poor, and C. Yuen, "Stacked intelligent metasurfaces for multiuser downlink beamforming in the wave domain," arXiv preprint arXiv:2309.02687, 2023.
670
+ [37] Q. Li, M. El-Hajjar, C. Xu, J. An, C. Yuen, and L. Hanzo, "Stacked intelligent metasurfaces for holographic MIMO aided cell-free networks," IEEE Trans. Commun., vol. 72, no. 11, pp. 7139-7151, 2024.
671
+ [38] Q. Li, M. El-Hajjar, Y. Sun, I. Hemadeh, A. Shojaeffard, and L. Hanzo, "Energy-efficient reconfigurable holographic surfaces operating in the presence of realistic hardware impairments," IEEE Trans. Commun., vol. 72, no. 8, pp. 5226-5238, 2024.
672
+ [39] Q. Li, M. El-Hajjar, Y. Sun, and L. Hanzo, "Performance analysis of reconfigurable holographic surfaces in the near-field scenario of cell-free networks under hardware impairments," IEEE Trans. Wireless Commun., vol. 23, no. 9, pp. 11972-11984, 2024.
673
+ [40] H. Zhang, N. Shlezinger, F. Guidi, D. Dardari, M. F. Imani, and Y. C. Eldar, "Beam focusing for near-field multiuser MIMO communications," IEEE Trans. Wireless Commun., vol. 21, no. 9, pp. 7476-7490, 2022.
674
+ [41] Y. Li, S. Gong, H. Liu, C. Xing, N. Zhao, and X. Wang, "Nearfield beamforming optimization for holographic XL-MIMO multiuser systems," IEEE Trans. Commun., vol. 72, no. 4, pp. 2309-2323, 2024.
675
+ [42] Q. Huang, M. Lin, J.-B. Wang, T. A. Tsiftsis, and J. Wang, "Energy efficient beamforming schemes for satellite-aerial-terrestrial networks," IEEE Trans. Commun., vol. 68, no. 6, pp. 3863-3875, 2020.
676
+ [43] C. A. Balanis, Antenna theory: analysis and design. John Wiley & Sons, 2016.
677
+ [44] E. Björnson, J. Hoydis, L. Sanguinetti et al., "Massive MIMO networks: Spectral, energy, and hardware efficiency," Foundations and Trends® in Signal Processing, vol. 11, no. 3-4, pp. 154-655, 2017.
678
+ [45] J. An, C. Yuen, C. Xu, H. Li, D. W. K. Ng, M. Di Renzo, M. Debbah, and L. Hanzo, "Stacked intelligent metasurface-aided MIMO transceiver design," IEEE Wireless Commun., vol. 31, no. 4, pp. 123-131, 2024.
679
+ [46] R. Deng, B. Di, H. Zhang, Y. Tan, and L. Song, "Reconfigurable holographic surface-enabled multi-user wireless communications: Amplitude-controlled holographic beamforming," IEEE Trans. Wireless Commun., vol. 21, no. 8, pp. 6003-6017, 2022.
2501.04xxx/2501.04164/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46f4cdc313c9516b630aadfed95a11b1c8ce3e44654703109101b4adc028b868
3
+ size 1308104
2501.04xxx/2501.04164/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2501.04xxx/2501.04167/17e4f3b6-5b7c-4e3c-9a0f-250f368ded8c_content_list.json ADDED
The diff for this file is too large to render. See raw diff